problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_37750
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-2859
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FunnyOrDie extractor not finding thumbnails python -m youtube_dl -v --skip-download --write-info-json --no-playlist -f mp4 http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie extracts the video properly but not a thumbnail. Here's the resulting JSON: {"display_id": "e402820827", "extractor": "FunnyOrDie", "description": "Please use this to sell something", "format": "0 - unknown", "format_id": "0", "playlist_index": null, "stitle": "Please Use This Song (Jon Lajoie)", "playlist": null, "title": "Please Use This Song (Jon Lajoie)", "url": "http://vo.fod4.com/v/e402820827/v600.mp4", "extractor_key": "FunnyOrDie", "id": "e402820827", "ext": "mp4", "webpage_url": "http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie", "fulltitle": "Please Use This Song (Jon Lajoie)", "thumbnail": null, "webpage_url_basename": "please-use-this-song-jon-lajoie"} FunnyorDie's RSS feed entry for this page does contain a thumbnail: media:thumbnail url="http://t.fod4.com/t/e402820827/c480x270_50.jpg" width="464" height="348" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/funnyordie.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import json 4 import re 5 6 from .common import InfoExtractor 7 8 9 class FunnyOrDieIE(InfoExtractor): 10 _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' 11 _TEST = { 12 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', 13 'file': '0732f586d7.mp4', 14 'md5': 'f647e9e90064b53b6e046e75d0241fbd', 15 'info_dict': { 16 'description': ('Lyrics changed to match the video. Spoken cameo ' 17 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' 18 'concept by Dustin McLean (DustFilms.com). Performed, edited, ' 19 'and written by David A. Scott.'), 20 'title': 'Heart-Shaped Box: Literal Video Version', 21 }, 22 } 23 24 def _real_extract(self, url): 25 mobj = re.match(self._VALID_URL, url) 26 27 video_id = mobj.group('id') 28 webpage = self._download_webpage(url, video_id) 29 30 video_url = self._search_regex( 31 [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], 32 webpage, 'video URL', flags=re.DOTALL) 33 34 if mobj.group('type') == 'embed': 35 post_json = self._search_regex( 36 r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') 37 post = json.loads(post_json) 38 title = post['name'] 39 description = post.get('description') 40 thumbnail = post.get('picture') 41 else: 42 title = self._og_search_title(webpage) 43 description = self._og_search_description(webpage) 44 thumbnail = None 45 46 return { 47 'id': video_id, 48 'url': video_url, 49 'ext': 'mp4', 50 'title': title, 51 'description': description, 52 'thumbnail': thumbnail, 53 } 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -8,18 +8,27 @@ class FunnyOrDieIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' - _TEST = { + _TESTS = [{ 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', - 'file': '0732f586d7.mp4', 'md5': 'f647e9e90064b53b6e046e75d0241fbd', 'info_dict': { - 'description': ('Lyrics changed to match the video. Spoken cameo ' - 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' - 'concept by Dustin McLean (DustFilms.com). Performed, edited, ' - 'and written by David A. Scott.'), + 'id': '0732f586d7', + 'ext': 'mp4', 'title': 'Heart-Shaped Box: Literal Video Version', + 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338', + 'thumbnail': 're:^http:.*\.jpg$', + }, + }, { + 'url': 'http://www.funnyordie.com/embed/e402820827', + 'md5': '0e0c5a7bf45c52b95cd16aa7f28be0b6', + 'info_dict': { + 'id': 'e402820827', + 'ext': 'mp4', + 'title': 'Please Use This Song (Jon Lajoie)', + 'description': 'md5:2ed27d364f5a805a6dba199faaf6681d', + 'thumbnail': 're:^http:.*\.jpg$', }, - } + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -31,23 +40,15 @@ [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], webpage, 'video URL', flags=re.DOTALL) - if mobj.group('type') == 'embed': - post_json = self._search_regex( - r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') - post = json.loads(post_json) - title = post['name'] - description = post.get('description') - thumbnail = post.get('picture') - else: - title = self._og_search_title(webpage) - description = self._og_search_description(webpage) - thumbnail = None + post_json = self._search_regex( + r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') + post = json.loads(post_json) return { 'id': video_id, 'url': video_url, 'ext': 'mp4', - 'title': title, - 'description': description, - 'thumbnail': thumbnail, + 'title': post['name'], + 'description': post.get('description'), + 'thumbnail': post.get('picture'), }
{"golden_diff": "diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py\n--- a/youtube_dl/extractor/funnyordie.py\n+++ b/youtube_dl/extractor/funnyordie.py\n@@ -8,18 +8,27 @@\n \n class FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n- _TEST = {\n+ _TESTS = [{\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n- 'file': '0732f586d7.mp4',\n 'md5': 'f647e9e90064b53b6e046e75d0241fbd',\n 'info_dict': {\n- 'description': ('Lyrics changed to match the video. Spoken cameo '\n- 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a '\n- 'concept by Dustin McLean (DustFilms.com). Performed, edited, '\n- 'and written by David A. Scott.'),\n+ 'id': '0732f586d7',\n+ 'ext': 'mp4',\n 'title': 'Heart-Shaped Box: Literal Video Version',\n+ 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',\n+ 'thumbnail': 're:^http:.*\\.jpg$',\n+ },\n+ }, {\n+ 'url': 'http://www.funnyordie.com/embed/e402820827',\n+ 'md5': '0e0c5a7bf45c52b95cd16aa7f28be0b6',\n+ 'info_dict': {\n+ 'id': 'e402820827',\n+ 'ext': 'mp4',\n+ 'title': 'Please Use This Song (Jon Lajoie)',\n+ 'description': 'md5:2ed27d364f5a805a6dba199faaf6681d',\n+ 'thumbnail': 're:^http:.*\\.jpg$',\n },\n- }\n+ }]\n \n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n@@ -31,23 +40,15 @@\n [r'type=\"video/mp4\" src=\"(.*?)\"', r'src=\"([^>]*?)\" type=\\'video/mp4\\''],\n webpage, 'video URL', flags=re.DOTALL)\n \n- if mobj.group('type') == 'embed':\n- post_json = self._search_regex(\n- r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n- post = json.loads(post_json)\n- title = post['name']\n- description = post.get('description')\n- thumbnail = post.get('picture')\n- else:\n- title = self._og_search_title(webpage)\n- description = self._og_search_description(webpage)\n- thumbnail = None\n+ post_json = self._search_regex(\n+ r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n+ post = json.loads(post_json)\n \n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n- 'title': title,\n- 'description': description,\n- 'thumbnail': thumbnail,\n+ 'title': post['name'],\n+ 'description': post.get('description'),\n+ 'thumbnail': post.get('picture'),\n }\n", "issue": "FunnyOrDie extractor not finding thumbnails\npython -m youtube_dl -v --skip-download --write-info-json --no-playlist -f mp4 http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie extracts the video properly but not a thumbnail. Here's the resulting JSON:\n{\"display_id\": \"e402820827\", \"extractor\": \"FunnyOrDie\", \"description\": \"Please use this to sell something\", \"format\": \"0 - unknown\", \"format_id\": \"0\", \"playlist_index\": null, \"stitle\": \"Please Use This Song (Jon Lajoie)\", \"playlist\": null, \"title\": \"Please Use This Song (Jon Lajoie)\", \"url\": \"http://vo.fod4.com/v/e402820827/v600.mp4\", \"extractor_key\": \"FunnyOrDie\", \"id\": \"e402820827\", \"ext\": \"mp4\", \"webpage_url\": \"http://www.funnyordie.com/videos/e402820827/please-use-this-song-jon-lajoie\", \"fulltitle\": \"Please Use This Song (Jon Lajoie)\", \"thumbnail\": null, \"webpage_url_basename\": \"please-use-this-song-jon-lajoie\"}\n\nFunnyorDie's RSS feed entry for this page does contain a thumbnail:\nmedia:thumbnail url=\"http://t.fod4.com/t/e402820827/c480x270_50.jpg\" width=\"464\" height=\"348\"\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\n\n\nclass FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n _TEST = {\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n 'file': '0732f586d7.mp4',\n 'md5': 'f647e9e90064b53b6e046e75d0241fbd',\n 'info_dict': {\n 'description': ('Lyrics changed to match the video. Spoken cameo '\n 'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a '\n 'concept by Dustin McLean (DustFilms.com). Performed, edited, '\n 'and written by David A. Scott.'),\n 'title': 'Heart-Shaped Box: Literal Video Version',\n },\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n\n video_url = self._search_regex(\n [r'type=\"video/mp4\" src=\"(.*?)\"', r'src=\"([^>]*?)\" type=\\'video/mp4\\''],\n webpage, 'video URL', flags=re.DOTALL)\n\n if mobj.group('type') == 'embed':\n post_json = self._search_regex(\n r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n post = json.loads(post_json)\n title = post['name']\n description = post.get('description')\n thumbnail = post.get('picture')\n else:\n title = self._og_search_title(webpage)\n description = self._og_search_description(webpage)\n thumbnail = None\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/funnyordie.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\n\n\nclass FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n _TESTS = [{\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n 'md5': 'f647e9e90064b53b6e046e75d0241fbd',\n 'info_dict': {\n 'id': '0732f586d7',\n 'ext': 'mp4',\n 'title': 'Heart-Shaped Box: Literal Video Version',\n 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }, {\n 'url': 'http://www.funnyordie.com/embed/e402820827',\n 'md5': '0e0c5a7bf45c52b95cd16aa7f28be0b6',\n 'info_dict': {\n 'id': 'e402820827',\n 'ext': 'mp4',\n 'title': 'Please Use This Song (Jon Lajoie)',\n 'description': 'md5:2ed27d364f5a805a6dba199faaf6681d',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n\n video_url = self._search_regex(\n [r'type=\"video/mp4\" src=\"(.*?)\"', r'src=\"([^>]*?)\" type=\\'video/mp4\\''],\n webpage, 'video URL', flags=re.DOTALL)\n\n post_json = self._search_regex(\n r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n post = json.loads(post_json)\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n 'title': post['name'],\n 'description': post.get('description'),\n 'thumbnail': post.get('picture'),\n }\n", "path": "youtube_dl/extractor/funnyordie.py"}]}
1,248
899
gh_patches_debug_37748
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1123
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add support for OTEL_PROPAGATORS The spec describes environment variables that should be supported to configure propagators, this feature request is to add support in the current implementation. https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-api/src/opentelemetry/propagators/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 API for propagation of context. 17 18 Example:: 19 20 import flask 21 import requests 22 from opentelemetry import propagators 23 24 25 PROPAGATOR = propagators.get_global_textmap() 26 27 28 def get_header_from_flask_request(request, key): 29 return request.headers.get_all(key) 30 31 def set_header_into_requests_request(request: requests.Request, 32 key: str, value: str): 33 request.headers[key] = value 34 35 def example_route(): 36 context = PROPAGATOR.extract( 37 get_header_from_flask_request, 38 flask.request 39 ) 40 request_to_downstream = requests.Request( 41 "GET", "http://httpbin.org/get" 42 ) 43 PROPAGATOR.inject( 44 set_header_into_requests_request, 45 request_to_downstream, 46 context=context 47 ) 48 session = requests.Session() 49 session.send(request_to_downstream.prepare()) 50 51 52 .. _Propagation API Specification: 53 https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-propagators.md 54 """ 55 56 import typing 57 58 from opentelemetry.baggage.propagation import BaggagePropagator 59 from opentelemetry.context.context import Context 60 from opentelemetry.propagators import composite 61 from opentelemetry.trace.propagation import textmap 62 from opentelemetry.trace.propagation.tracecontext import ( 63 TraceContextTextMapPropagator, 64 ) 65 66 67 def extract( 68 get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT], 69 carrier: textmap.TextMapPropagatorT, 70 context: typing.Optional[Context] = None, 71 ) -> Context: 72 """ Uses the configured propagator to extract a Context from the carrier. 73 74 Args: 75 get_from_carrier: a function that can retrieve zero 76 or more values from the carrier. In the case that 77 the value does not exist, return an empty list. 78 carrier: and object which contains values that are 79 used to construct a Context. This object 80 must be paired with an appropriate get_from_carrier 81 which understands how to extract a value from it. 82 context: an optional Context to use. Defaults to current 83 context if not set. 84 """ 85 return get_global_textmap().extract(get_from_carrier, carrier, context) 86 87 88 def inject( 89 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT], 90 carrier: textmap.TextMapPropagatorT, 91 context: typing.Optional[Context] = None, 92 ) -> None: 93 """ Uses the configured propagator to inject a Context into the carrier. 94 95 Args: 96 set_in_carrier: A setter function that can set values 97 on the carrier. 98 carrier: An object that contains a representation of HTTP 99 headers. Should be paired with set_in_carrier, which 100 should know how to set header values on the carrier. 101 context: an optional Context to use. Defaults to current 102 context if not set. 103 """ 104 get_global_textmap().inject(set_in_carrier, carrier, context) 105 106 107 _HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator( 108 [TraceContextTextMapPropagator(), BaggagePropagator()], 109 ) # type: textmap.TextMapPropagator 110 111 112 def get_global_textmap() -> textmap.TextMapPropagator: 113 return _HTTP_TEXT_FORMAT 114 115 116 def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None: 117 global _HTTP_TEXT_FORMAT # pylint:disable=global-statement 118 _HTTP_TEXT_FORMAT = http_text_format 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py --- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py +++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py @@ -15,6 +15,21 @@ """ API for propagation of context. +The propagators for the +``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined +via configuration in the ``OTEL_PROPAGATORS`` environment variable. This +variable should be set to a comma-separated string of names of values for the +``opentelemetry_propagator`` entry point. For example, setting +``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value) +would instantiate +``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2 +propagators, one of type +``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator`` +and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``. +Notice that these propagator classes are defined as +``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of +``opentelemetry``. + Example:: import flask @@ -54,14 +69,16 @@ """ import typing +from logging import getLogger + +from pkg_resources import iter_entry_points -from opentelemetry.baggage.propagation import BaggagePropagator +from opentelemetry.configuration import Configuration from opentelemetry.context.context import Context from opentelemetry.propagators import composite from opentelemetry.trace.propagation import textmap -from opentelemetry.trace.propagation.tracecontext import ( - TraceContextTextMapPropagator, -) + +logger = getLogger(__name__) def extract( @@ -104,9 +121,25 @@ get_global_textmap().inject(set_in_carrier, carrier, context) -_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator( - [TraceContextTextMapPropagator(), BaggagePropagator()], -) # type: textmap.TextMapPropagator +try: + + propagators = [] + + for propagator in ( # type: ignore + Configuration().get("PROPAGATORS", "tracecontext,baggage").split(",") # type: ignore + ): + + propagators.append( # type: ignore + next( # type: ignore + iter_entry_points("opentelemetry_propagator", propagator) # type: ignore + ).load()() + ) + +except Exception: # pylint: disable=broad-except + logger.exception("Failed to load configured propagators") + raise + +_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore def get_global_textmap() -> textmap.TextMapPropagator: @@ -115,4 +148,4 @@ def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None: global _HTTP_TEXT_FORMAT # pylint:disable=global-statement - _HTTP_TEXT_FORMAT = http_text_format + _HTTP_TEXT_FORMAT = http_text_format # type: ignore
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py\n@@ -15,6 +15,21 @@\n \"\"\"\n API for propagation of context.\n \n+The propagators for the\n+``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined\n+via configuration in the ``OTEL_PROPAGATORS`` environment variable. This\n+variable should be set to a comma-separated string of names of values for the\n+``opentelemetry_propagator`` entry point. For example, setting\n+``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)\n+would instantiate\n+``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2\n+propagators, one of type\n+``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``\n+and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.\n+Notice that these propagator classes are defined as\n+``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of\n+``opentelemetry``.\n+\n Example::\n \n import flask\n@@ -54,14 +69,16 @@\n \"\"\"\n \n import typing\n+from logging import getLogger\n+\n+from pkg_resources import iter_entry_points\n \n-from opentelemetry.baggage.propagation import BaggagePropagator\n+from opentelemetry.configuration import Configuration\n from opentelemetry.context.context import Context\n from opentelemetry.propagators import composite\n from opentelemetry.trace.propagation import textmap\n-from opentelemetry.trace.propagation.tracecontext import (\n- TraceContextTextMapPropagator,\n-)\n+\n+logger = getLogger(__name__)\n \n \n def extract(\n@@ -104,9 +121,25 @@\n get_global_textmap().inject(set_in_carrier, carrier, context)\n \n \n-_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(\n- [TraceContextTextMapPropagator(), BaggagePropagator()],\n-) # type: textmap.TextMapPropagator\n+try:\n+\n+ propagators = []\n+\n+ for propagator in ( # type: ignore\n+ Configuration().get(\"PROPAGATORS\", \"tracecontext,baggage\").split(\",\") # type: ignore\n+ ):\n+\n+ propagators.append( # type: ignore\n+ next( # type: ignore\n+ iter_entry_points(\"opentelemetry_propagator\", propagator) # type: ignore\n+ ).load()()\n+ )\n+\n+except Exception: # pylint: disable=broad-except\n+ logger.exception(\"Failed to load configured propagators\")\n+ raise\n+\n+_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore\n \n \n def get_global_textmap() -> textmap.TextMapPropagator:\n@@ -115,4 +148,4 @@\n \n def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n- _HTTP_TEXT_FORMAT = http_text_format\n+ _HTTP_TEXT_FORMAT = http_text_format # type: ignore\n", "issue": "Add support for OTEL_PROPAGATORS\nThe spec describes environment variables that should be supported to configure propagators, this feature request is to add support in the current implementation.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/sdk-environment-variables.md\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAPI for propagation of context.\n\nExample::\n\n import flask\n import requests\n from opentelemetry import propagators\n\n\n PROPAGATOR = propagators.get_global_textmap()\n\n\n def get_header_from_flask_request(request, key):\n return request.headers.get_all(key)\n\n def set_header_into_requests_request(request: requests.Request,\n key: str, value: str):\n request.headers[key] = value\n\n def example_route():\n context = PROPAGATOR.extract(\n get_header_from_flask_request,\n flask.request\n )\n request_to_downstream = requests.Request(\n \"GET\", \"http://httpbin.org/get\"\n )\n PROPAGATOR.inject(\n set_header_into_requests_request,\n request_to_downstream,\n context=context\n )\n session = requests.Session()\n session.send(request_to_downstream.prepare())\n\n\n.. _Propagation API Specification:\n https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-propagators.md\n\"\"\"\n\nimport typing\n\nfrom opentelemetry.baggage.propagation import BaggagePropagator\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import composite\nfrom opentelemetry.trace.propagation import textmap\nfrom opentelemetry.trace.propagation.tracecontext import (\n TraceContextTextMapPropagator,\n)\n\n\ndef extract(\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> Context:\n \"\"\" Uses the configured propagator to extract a Context from the carrier.\n\n Args:\n get_from_carrier: a function that can retrieve zero\n or more values from the carrier. In the case that\n the value does not exist, return an empty list.\n carrier: and object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate get_from_carrier\n which understands how to extract a value from it.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n return get_global_textmap().extract(get_from_carrier, carrier, context)\n\n\ndef inject(\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> None:\n \"\"\" Uses the configured propagator to inject a Context into the carrier.\n\n Args:\n set_in_carrier: A setter function that can set values\n on the carrier.\n carrier: An object that contains a representation of HTTP\n headers. Should be paired with set_in_carrier, which\n should know how to set header values on the carrier.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n get_global_textmap().inject(set_in_carrier, carrier, context)\n\n\n_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(\n [TraceContextTextMapPropagator(), BaggagePropagator()],\n) # type: textmap.TextMapPropagator\n\n\ndef get_global_textmap() -> textmap.TextMapPropagator:\n return _HTTP_TEXT_FORMAT\n\n\ndef set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n _HTTP_TEXT_FORMAT = http_text_format\n", "path": "opentelemetry-api/src/opentelemetry/propagators/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAPI for propagation of context.\n\nThe propagators for the\n``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined\nvia configuration in the ``OTEL_PROPAGATORS`` environment variable. This\nvariable should be set to a comma-separated string of names of values for the\n``opentelemetry_propagator`` entry point. For example, setting\n``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)\nwould instantiate\n``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2\npropagators, one of type\n``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``\nand other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.\nNotice that these propagator classes are defined as\n``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of\n``opentelemetry``.\n\nExample::\n\n import flask\n import requests\n from opentelemetry import propagators\n\n\n PROPAGATOR = propagators.get_global_textmap()\n\n\n def get_header_from_flask_request(request, key):\n return request.headers.get_all(key)\n\n def set_header_into_requests_request(request: requests.Request,\n key: str, value: str):\n request.headers[key] = value\n\n def example_route():\n context = PROPAGATOR.extract(\n get_header_from_flask_request,\n flask.request\n )\n request_to_downstream = requests.Request(\n \"GET\", \"http://httpbin.org/get\"\n )\n PROPAGATOR.inject(\n set_header_into_requests_request,\n request_to_downstream,\n context=context\n )\n session = requests.Session()\n session.send(request_to_downstream.prepare())\n\n\n.. _Propagation API Specification:\n https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/api-propagators.md\n\"\"\"\n\nimport typing\nfrom logging import getLogger\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.configuration import Configuration\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import composite\nfrom opentelemetry.trace.propagation import textmap\n\nlogger = getLogger(__name__)\n\n\ndef extract(\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> Context:\n \"\"\" Uses the configured propagator to extract a Context from the carrier.\n\n Args:\n get_from_carrier: a function that can retrieve zero\n or more values from the carrier. In the case that\n the value does not exist, return an empty list.\n carrier: and object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate get_from_carrier\n which understands how to extract a value from it.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n return get_global_textmap().extract(get_from_carrier, carrier, context)\n\n\ndef inject(\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> None:\n \"\"\" Uses the configured propagator to inject a Context into the carrier.\n\n Args:\n set_in_carrier: A setter function that can set values\n on the carrier.\n carrier: An object that contains a representation of HTTP\n headers. Should be paired with set_in_carrier, which\n should know how to set header values on the carrier.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n get_global_textmap().inject(set_in_carrier, carrier, context)\n\n\ntry:\n\n propagators = []\n\n for propagator in ( # type: ignore\n Configuration().get(\"PROPAGATORS\", \"tracecontext,baggage\").split(\",\") # type: ignore\n ):\n\n propagators.append( # type: ignore\n next( # type: ignore\n iter_entry_points(\"opentelemetry_propagator\", propagator) # type: ignore\n ).load()()\n )\n\nexcept Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to load configured propagators\")\n raise\n\n_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore\n\n\ndef get_global_textmap() -> textmap.TextMapPropagator:\n return _HTTP_TEXT_FORMAT\n\n\ndef set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n _HTTP_TEXT_FORMAT = http_text_format # type: ignore\n", "path": "opentelemetry-api/src/opentelemetry/propagators/__init__.py"}]}
1,462
773
gh_patches_debug_28935
rasdani/github-patches
git_diff
pyload__pyload-1659
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- premiumize.me hook is broken account says username and password is ok but the log always shows: 4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry so i guess the hook is broken premiumize.me hook is broken account says username and password is ok but the log always shows: 4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry 10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry so i guess the hook is broken --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `module/plugins/hoster/PremiumizeMe.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from module.common.json_layer import json_loads 4 from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo 5 6 7 class PremiumizeMe(MultiHoster): 8 __name__ = "PremiumizeMe" 9 __type__ = "hoster" 10 __version__ = "0.19" 11 __status__ = "testing" 12 13 __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate 14 __config__ = [("use_premium" , "bool", "Use premium account if available" , True), 15 ("revertfailed", "bool", "Revert to standard download if fails", True)] 16 17 __description__ = """Premiumize.me multi-hoster plugin""" 18 __license__ = "GPLv3" 19 __authors__ = [("Florian Franzen", "[email protected]")] 20 21 22 def handle_premium(self, pyfile): 23 #: In some cases hostsers do not supply us with a filename at download, so we 24 #: Are going to set a fall back filename (e.g. for freakshare or xfileshare) 25 pyfile.name = pyfile.name.split('/').pop() #: Remove everthing before last slash 26 27 #: Correction for automatic assigned filename: Removing html at end if needed 28 suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"] 29 temp = pyfile.name.split('.') 30 if temp.pop() in suffix_to_remove: 31 pyfile.name = ".".join(temp) 32 33 #: Get account data 34 user, data = self.account.select() 35 36 #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api) 37 data = json_loads(self.load("http://api.premiumize.me/pm-api/v1.php", #@TODO: Revert to `https` in 0.4.10 38 get={'method' : "directdownloadlink", 39 'params[login]': user, 40 'params[pass]' : data['password'], 41 'params[link]' : pyfile.url})) 42 43 #: Check status and decide what to do 44 status = data['status'] 45 46 if status == 200: 47 self.link = data['result']['location'] 48 return 49 50 elif status == 400: 51 self.fail(_("Invalid link")) 52 53 elif status == 404: 54 self.offline() 55 56 elif status >= 500: 57 self.temp_offline() 58 59 else: 60 self.fail(data['statusmessage']) 61 62 63 getInfo = create_getInfo(PremiumizeMe) 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/module/plugins/hoster/PremiumizeMe.py b/module/plugins/hoster/PremiumizeMe.py --- a/module/plugins/hoster/PremiumizeMe.py +++ b/module/plugins/hoster/PremiumizeMe.py @@ -7,7 +7,7 @@ class PremiumizeMe(MultiHoster): __name__ = "PremiumizeMe" __type__ = "hoster" - __version__ = "0.19" + __version__ = "0.20" __status__ = "testing" __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate @@ -31,13 +31,13 @@ pyfile.name = ".".join(temp) #: Get account data - user, data = self.account.select() + user, info = self.account.select() #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api) data = json_loads(self.load("http://api.premiumize.me/pm-api/v1.php", #@TODO: Revert to `https` in 0.4.10 get={'method' : "directdownloadlink", 'params[login]': user, - 'params[pass]' : data['password'], + 'params[pass]' : info['login']['password'], 'params[link]' : pyfile.url})) #: Check status and decide what to do
{"golden_diff": "diff --git a/module/plugins/hoster/PremiumizeMe.py b/module/plugins/hoster/PremiumizeMe.py\n--- a/module/plugins/hoster/PremiumizeMe.py\n+++ b/module/plugins/hoster/PremiumizeMe.py\n@@ -7,7 +7,7 @@\n class PremiumizeMe(MultiHoster):\n __name__ = \"PremiumizeMe\"\n __type__ = \"hoster\"\n- __version__ = \"0.19\"\n+ __version__ = \"0.20\"\n __status__ = \"testing\"\n \n __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate\n@@ -31,13 +31,13 @@\n pyfile.name = \".\".join(temp)\n \n #: Get account data\n- user, data = self.account.select()\n+ user, info = self.account.select()\n \n #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)\n data = json_loads(self.load(\"http://api.premiumize.me/pm-api/v1.php\", #@TODO: Revert to `https` in 0.4.10\n get={'method' : \"directdownloadlink\",\n 'params[login]': user,\n- 'params[pass]' : data['password'],\n+ 'params[pass]' : info['login']['password'],\n 'params[link]' : pyfile.url}))\n \n #: Check status and decide what to do\n", "issue": "premiumize.me hook is broken\naccount says username and password is ok\n\nbut the log always shows:\n\n4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n\nso i guess the hook is broken\n\npremiumize.me hook is broken\naccount says username and password is ok\n\nbut the log always shows:\n\n4 01.08.2015 19:50:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n5 01.08.2015 19:51:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n6 01.08.2015 19:51:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n7 01.08.2015 19:52:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n8 01.08.2015 19:52:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n9 01.08.2015 19:53:05 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n10 01.08.2015 19:53:13 WARNING HOOK PremiumizeMe: 'password' | Waiting 1 minute and retry\n\nso i guess the hook is broken\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.internal.MultiHoster import MultiHoster, create_getInfo\n\n\nclass PremiumizeMe(MultiHoster):\n __name__ = \"PremiumizeMe\"\n __type__ = \"hoster\"\n __version__ = \"0.19\"\n __status__ = \"testing\"\n\n __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n (\"revertfailed\", \"bool\", \"Revert to standard download if fails\", True)]\n\n __description__ = \"\"\"Premiumize.me multi-hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Florian Franzen\", \"[email protected]\")]\n\n\n def handle_premium(self, pyfile):\n #: In some cases hostsers do not supply us with a filename at download, so we\n #: Are going to set a fall back filename (e.g. for freakshare or xfileshare)\n pyfile.name = pyfile.name.split('/').pop() #: Remove everthing before last slash\n\n #: Correction for automatic assigned filename: Removing html at end if needed\n suffix_to_remove = [\"html\", \"htm\", \"php\", \"php3\", \"asp\", \"shtm\", \"shtml\", \"cfml\", \"cfm\"]\n temp = pyfile.name.split('.')\n if temp.pop() in suffix_to_remove:\n pyfile.name = \".\".join(temp)\n\n #: Get account data\n user, data = self.account.select()\n\n #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)\n data = json_loads(self.load(\"http://api.premiumize.me/pm-api/v1.php\", #@TODO: Revert to `https` in 0.4.10\n get={'method' : \"directdownloadlink\",\n 'params[login]': user,\n 'params[pass]' : data['password'],\n 'params[link]' : pyfile.url}))\n\n #: Check status and decide what to do\n status = data['status']\n\n if status == 200:\n self.link = data['result']['location']\n return\n\n elif status == 400:\n self.fail(_(\"Invalid link\"))\n\n elif status == 404:\n self.offline()\n\n elif status >= 500:\n self.temp_offline()\n\n else:\n self.fail(data['statusmessage'])\n\n\ngetInfo = create_getInfo(PremiumizeMe)\n", "path": "module/plugins/hoster/PremiumizeMe.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.internal.MultiHoster import MultiHoster, create_getInfo\n\n\nclass PremiumizeMe(MultiHoster):\n __name__ = \"PremiumizeMe\"\n __type__ = \"hoster\"\n __version__ = \"0.20\"\n __status__ = \"testing\"\n\n __pattern__ = r'^unmatchable$' #: Since we want to allow the user to specify the list of hoster to use we let MultiHoster.activate\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n (\"revertfailed\", \"bool\", \"Revert to standard download if fails\", True)]\n\n __description__ = \"\"\"Premiumize.me multi-hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Florian Franzen\", \"[email protected]\")]\n\n\n def handle_premium(self, pyfile):\n #: In some cases hostsers do not supply us with a filename at download, so we\n #: Are going to set a fall back filename (e.g. for freakshare or xfileshare)\n pyfile.name = pyfile.name.split('/').pop() #: Remove everthing before last slash\n\n #: Correction for automatic assigned filename: Removing html at end if needed\n suffix_to_remove = [\"html\", \"htm\", \"php\", \"php3\", \"asp\", \"shtm\", \"shtml\", \"cfml\", \"cfm\"]\n temp = pyfile.name.split('.')\n if temp.pop() in suffix_to_remove:\n pyfile.name = \".\".join(temp)\n\n #: Get account data\n user, info = self.account.select()\n\n #: Get rewritten link using the premiumize.me api v1 (see https://secure.premiumize.me/?show=api)\n data = json_loads(self.load(\"http://api.premiumize.me/pm-api/v1.php\", #@TODO: Revert to `https` in 0.4.10\n get={'method' : \"directdownloadlink\",\n 'params[login]': user,\n 'params[pass]' : info['login']['password'],\n 'params[link]' : pyfile.url}))\n\n #: Check status and decide what to do\n status = data['status']\n\n if status == 200:\n self.link = data['result']['location']\n return\n\n elif status == 400:\n self.fail(_(\"Invalid link\"))\n\n elif status == 404:\n self.offline()\n\n elif status >= 500:\n self.temp_offline()\n\n else:\n self.fail(data['statusmessage'])\n\n\ngetInfo = create_getInfo(PremiumizeMe)\n", "path": "module/plugins/hoster/PremiumizeMe.py"}]}
1,617
348
gh_patches_debug_22076
rasdani/github-patches
git_diff
netbox-community__netbox-16229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User and group queries are not properly restricted via GraphQL API in v4.0.2 Re-Open ### Deployment Type Self-hosted ### NetBox Version v4.0.2 ### Python Version 3.10 ### Steps to Reproduce This is is to re-opent #7814 Create New Group netbox-graphql. Don't add any permission to the group. Add new user to the group Login as new user Access https://netbox/graphql query { user_list{ username password } } Username and hash in password returned. ### Expected Behavior Empty result retured because the user in a group without permission to Group/User view. ### Observed Behavior All Username and hash in Database returned. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/users/graphql/types.py` Content: ``` 1 from typing import List 2 3 import strawberry 4 import strawberry_django 5 from django.contrib.auth import get_user_model 6 from django.contrib.auth.models import Group 7 from strawberry import auto 8 from users import filtersets 9 from users.models import Group 10 from utilities.querysets import RestrictedQuerySet 11 from .filters import * 12 13 __all__ = ( 14 'GroupType', 15 'UserType', 16 ) 17 18 19 @strawberry_django.type( 20 Group, 21 fields=['id', 'name'], 22 filters=GroupFilter 23 ) 24 class GroupType: 25 pass 26 27 28 @strawberry_django.type( 29 get_user_model(), 30 fields=[ 31 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 32 'is_active', 'date_joined', 'groups', 33 ], 34 filters=UserFilter 35 ) 36 class UserType: 37 groups: List[GroupType] 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/users/graphql/types.py b/netbox/users/graphql/types.py --- a/netbox/users/graphql/types.py +++ b/netbox/users/graphql/types.py @@ -1,13 +1,10 @@ from typing import List -import strawberry import strawberry_django from django.contrib.auth import get_user_model -from django.contrib.auth.models import Group -from strawberry import auto -from users import filtersets + +from netbox.graphql.types import BaseObjectType from users.models import Group -from utilities.querysets import RestrictedQuerySet from .filters import * __all__ = ( @@ -21,17 +18,16 @@ fields=['id', 'name'], filters=GroupFilter ) -class GroupType: +class GroupType(BaseObjectType): pass @strawberry_django.type( get_user_model(), fields=[ - 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', - 'is_active', 'date_joined', 'groups', + 'id', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined', 'groups', ], filters=UserFilter ) -class UserType: +class UserType(BaseObjectType): groups: List[GroupType]
{"golden_diff": "diff --git a/netbox/users/graphql/types.py b/netbox/users/graphql/types.py\n--- a/netbox/users/graphql/types.py\n+++ b/netbox/users/graphql/types.py\n@@ -1,13 +1,10 @@\n from typing import List\n \n-import strawberry\n import strawberry_django\n from django.contrib.auth import get_user_model\n-from django.contrib.auth.models import Group\n-from strawberry import auto\n-from users import filtersets\n+\n+from netbox.graphql.types import BaseObjectType\n from users.models import Group\n-from utilities.querysets import RestrictedQuerySet\n from .filters import *\n \n __all__ = (\n@@ -21,17 +18,16 @@\n fields=['id', 'name'],\n filters=GroupFilter\n )\n-class GroupType:\n+class GroupType(BaseObjectType):\n pass\n \n \n @strawberry_django.type(\n get_user_model(),\n fields=[\n- 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff',\n- 'is_active', 'date_joined', 'groups',\n+ 'id', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined', 'groups',\n ],\n filters=UserFilter\n )\n-class UserType:\n+class UserType(BaseObjectType):\n groups: List[GroupType]\n", "issue": "User and group queries are not properly restricted via GraphQL API in v4.0.2 Re-Open\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv4.0.2\n\n### Python Version\n\n3.10\n\n### Steps to Reproduce\n\nThis is is to re-opent #7814\r\n\r\nCreate New Group netbox-graphql. Don't add any permission to the group.\r\nAdd new user to the group\r\nLogin as new user\r\nAccess https://netbox/graphql\r\n\r\nquery {\r\n user_list{\r\n username\r\n password\r\n }\r\n }\r\n\r\nUsername and hash in password returned.\r\n\r\n\n\n### Expected Behavior\n\nEmpty result retured because the user in a group without permission to Group/User view.\n\n### Observed Behavior\n\nAll Username and hash in Database returned.\n", "before_files": [{"content": "from typing import List\n\nimport strawberry\nimport strawberry_django\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom strawberry import auto\nfrom users import filtersets\nfrom users.models import Group\nfrom utilities.querysets import RestrictedQuerySet\nfrom .filters import *\n\n__all__ = (\n 'GroupType',\n 'UserType',\n)\n\n\n@strawberry_django.type(\n Group,\n fields=['id', 'name'],\n filters=GroupFilter\n)\nclass GroupType:\n pass\n\n\n@strawberry_django.type(\n get_user_model(),\n fields=[\n 'id', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff',\n 'is_active', 'date_joined', 'groups',\n ],\n filters=UserFilter\n)\nclass UserType:\n groups: List[GroupType]\n", "path": "netbox/users/graphql/types.py"}], "after_files": [{"content": "from typing import List\n\nimport strawberry_django\nfrom django.contrib.auth import get_user_model\n\nfrom netbox.graphql.types import BaseObjectType\nfrom users.models import Group\nfrom .filters import *\n\n__all__ = (\n 'GroupType',\n 'UserType',\n)\n\n\n@strawberry_django.type(\n Group,\n fields=['id', 'name'],\n filters=GroupFilter\n)\nclass GroupType(BaseObjectType):\n pass\n\n\n@strawberry_django.type(\n get_user_model(),\n fields=[\n 'id', 'username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined', 'groups',\n ],\n filters=UserFilter\n)\nclass UserType(BaseObjectType):\n groups: List[GroupType]\n", "path": "netbox/users/graphql/types.py"}]}
681
287
gh_patches_debug_42938
rasdani/github-patches
git_diff
vas3k__vas3k.club-109
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Отображение моих комментариев в личном кабинете Хочу иметь возможность видеть всё, что я написал под своими и чужими постами, сейчас невозможно понять, что и где я писал, это не очень удобно. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `users/views.py` Content: ``` 1 from django.http import Http404 2 from django.shortcuts import render, redirect, get_object_or_404 3 from django_q.tasks import async_task 4 5 from auth.helpers import auth_required, moderator_role_required 6 from common.pagination import paginate 7 from common.request import ajax_request 8 from notifications.telegram.users import notify_profile_needs_review 9 from posts.models import Post 10 from search.models import SearchIndex 11 from users.admin import do_user_admin_actions 12 from users.forms.admin import UserAdminForm 13 from users.forms.intro import UserIntroForm 14 from users.forms.profile import UserEditForm, ExpertiseForm 15 from users.models import User, UserBadge, UserExpertise, UserTag, Tag 16 17 18 @auth_required 19 def intro(request): 20 if request.me.is_profile_complete \ 21 and request.me.is_profile_reviewed \ 22 and not request.me.is_profile_rejected: 23 return redirect("profile", request.me.slug) 24 25 if request.method == "POST": 26 form = UserIntroForm(request.POST, request.FILES, instance=request.me) 27 if form.is_valid(): 28 user = form.save(commit=False) 29 30 # send to moderation 31 user.is_profile_complete = True 32 user.is_profile_reviewed = False 33 user.is_profile_rejected = False 34 user.save() 35 36 # create intro post 37 intro_post = Post.upsert_user_intro(user, form.cleaned_data["intro"], is_visible=False) 38 39 # notify moderators to review profile 40 async_task(notify_profile_needs_review, user, intro_post) 41 42 return redirect("on_review") 43 else: 44 existing_intro = Post.get_user_intro(request.me) 45 form = UserIntroForm( 46 instance=request.me, 47 initial={"intro": existing_intro.text if existing_intro else ""}, 48 ) 49 50 return render(request, "users/intro.html", {"form": form}) 51 52 53 @auth_required 54 def profile(request, user_slug): 55 user = get_object_or_404(User, slug=user_slug) 56 57 if not request.me.is_moderator: 58 # hide unverified users 59 if not user.is_profile_complete or not user.is_profile_complete or user.is_profile_rejected: 60 raise Http404() 61 62 if user.id == request.me.id: 63 goto = request.GET.get("goto") 64 if goto: 65 return redirect(goto) 66 67 tags = Tag.objects.filter(is_visible=True).all() 68 69 intro = Post.get_user_intro(user) 70 projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all() 71 active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()} 72 achievements = UserBadge.objects.filter(user=user)[:8] 73 expertises = UserExpertise.objects.filter(user=user).all() 74 posts = Post.objects_for_user(request.me)\ 75 .filter(author=user, is_visible=True)\ 76 .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT]) 77 78 return render(request, "users/profile.html", { 79 "user": user, 80 "intro": intro, 81 "projects": projects, 82 "tags": tags, 83 "active_tags": active_tags, 84 "achievements": achievements, 85 "expertises": expertises, 86 "posts": paginate(request, posts), 87 }) 88 89 90 @auth_required 91 def edit_profile(request, user_slug): 92 user = get_object_or_404(User, slug=user_slug) 93 if user.id != request.me.id and not request.me.is_moderator: 94 raise Http404() 95 96 if request.method == "POST": 97 form = UserEditForm(request.POST, request.FILES, instance=user) 98 if form.is_valid(): 99 user = form.save(commit=False) 100 user.save() 101 102 SearchIndex.update_user_index(user) 103 104 return redirect("profile", user.slug) 105 else: 106 form = UserEditForm(instance=user) 107 108 return render(request, "users/edit.html", {"form": form}) 109 110 111 @auth_required 112 @moderator_role_required 113 def admin_profile(request, user_slug): 114 user = get_object_or_404(User, slug=user_slug) 115 116 if request.method == "POST": 117 form = UserAdminForm(request.POST, request.FILES) 118 if form.is_valid(): 119 return do_user_admin_actions(request, user, form.cleaned_data) 120 else: 121 form = UserAdminForm() 122 123 return render(request, "users/admin.html", {"user": user, "form": form}) 124 125 126 @auth_required 127 @ajax_request 128 def toggle_tag(request, tag_code): 129 if request.method != "POST": 130 raise Http404() 131 132 tag = get_object_or_404(Tag, code=tag_code) 133 134 user_tag, is_created = UserTag.objects.get_or_create( 135 user=request.me, 136 tag=tag, 137 defaults=dict( 138 name=tag.name 139 ) 140 ) 141 142 if not is_created: 143 user_tag.delete() 144 145 return { 146 "status": "created" if is_created else "deleted", 147 "tag": {"code": tag.code, "name": tag.name, "color": tag.color}, 148 } 149 150 151 @auth_required 152 @ajax_request 153 def add_expertise(request): 154 if request.method == "POST": 155 form = ExpertiseForm(request.POST) 156 if form.is_valid(): 157 user_expertise = form.save(commit=False) 158 user_expertise.user = request.me 159 UserExpertise.objects.filter(user=request.me, expertise=user_expertise.expertise).delete() 160 user_expertise.save() 161 return { 162 "status": "created", 163 "expertise": { 164 "name": user_expertise.name, 165 "expertise": user_expertise.expertise, 166 "value": user_expertise.value, 167 }, 168 } 169 170 return {"status": "tipidor"} 171 172 173 @auth_required 174 @ajax_request 175 def delete_expertise(request, expertise): 176 if request.method == "POST": 177 UserExpertise.objects.filter(user=request.me, expertise=expertise).delete() 178 return { 179 "status": "deleted", 180 "expertise": { 181 "expertise": expertise, 182 }, 183 } 184 185 return {"status": "tipidor"} 186 187 188 @auth_required 189 def on_review(request): 190 if request.me.is_profile_reviewed: 191 return redirect("profile", request.me.slug) 192 return render(request, "users/messages/on_review.html") 193 194 195 @auth_required 196 def rejected(request): 197 return render(request, "users/messages/rejected.html") 198 199 200 @auth_required 201 def banned(request): 202 return render(request, "users/messages/banned.html") 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/users/views.py b/users/views.py --- a/users/views.py +++ b/users/views.py @@ -7,6 +7,7 @@ from common.request import ajax_request from notifications.telegram.users import notify_profile_needs_review from posts.models import Post +from comments.models import Comment from search.models import SearchIndex from users.admin import do_user_admin_actions from users.forms.admin import UserAdminForm @@ -17,9 +18,11 @@ @auth_required def intro(request): - if request.me.is_profile_complete \ - and request.me.is_profile_reviewed \ - and not request.me.is_profile_rejected: + if ( + request.me.is_profile_complete + and request.me.is_profile_reviewed + and not request.me.is_profile_rejected + ): return redirect("profile", request.me.slug) if request.method == "POST": @@ -34,7 +37,9 @@ user.save() # create intro post - intro_post = Post.upsert_user_intro(user, form.cleaned_data["intro"], is_visible=False) + intro_post = Post.upsert_user_intro( + user, form.cleaned_data["intro"], is_visible=False + ) # notify moderators to review profile async_task(notify_profile_needs_review, user, intro_post) @@ -71,6 +76,7 @@ active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()} achievements = UserBadge.objects.filter(user=user)[:8] expertises = UserExpertise.objects.filter(user=user).all() + comments = Comment.visible_objects().filter(author=user).order_by("-created_at")[:5] posts = Post.objects_for_user(request.me)\ .filter(author=user, is_visible=True)\ .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT]) @@ -83,6 +89,7 @@ "active_tags": active_tags, "achievements": achievements, "expertises": expertises, + "comments": comments, "posts": paginate(request, posts), }) @@ -132,11 +139,7 @@ tag = get_object_or_404(Tag, code=tag_code) user_tag, is_created = UserTag.objects.get_or_create( - user=request.me, - tag=tag, - defaults=dict( - name=tag.name - ) + user=request.me, tag=tag, defaults=dict(name=tag.name) ) if not is_created: @@ -156,7 +159,9 @@ if form.is_valid(): user_expertise = form.save(commit=False) user_expertise.user = request.me - UserExpertise.objects.filter(user=request.me, expertise=user_expertise.expertise).delete() + UserExpertise.objects.filter( + user=request.me, expertise=user_expertise.expertise + ).delete() user_expertise.save() return { "status": "created", @@ -177,9 +182,7 @@ UserExpertise.objects.filter(user=request.me, expertise=expertise).delete() return { "status": "deleted", - "expertise": { - "expertise": expertise, - }, + "expertise": {"expertise": expertise,}, } return {"status": "tipidor"}
{"golden_diff": "diff --git a/users/views.py b/users/views.py\n--- a/users/views.py\n+++ b/users/views.py\n@@ -7,6 +7,7 @@\n from common.request import ajax_request\n from notifications.telegram.users import notify_profile_needs_review\n from posts.models import Post\n+from comments.models import Comment\n from search.models import SearchIndex\n from users.admin import do_user_admin_actions\n from users.forms.admin import UserAdminForm\n@@ -17,9 +18,11 @@\n \n @auth_required\n def intro(request):\n- if request.me.is_profile_complete \\\n- and request.me.is_profile_reviewed \\\n- and not request.me.is_profile_rejected:\n+ if (\n+ request.me.is_profile_complete\n+ and request.me.is_profile_reviewed\n+ and not request.me.is_profile_rejected\n+ ):\n return redirect(\"profile\", request.me.slug)\n \n if request.method == \"POST\":\n@@ -34,7 +37,9 @@\n user.save()\n \n # create intro post\n- intro_post = Post.upsert_user_intro(user, form.cleaned_data[\"intro\"], is_visible=False)\n+ intro_post = Post.upsert_user_intro(\n+ user, form.cleaned_data[\"intro\"], is_visible=False\n+ )\n \n # notify moderators to review profile\n async_task(notify_profile_needs_review, user, intro_post)\n@@ -71,6 +76,7 @@\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserBadge.objects.filter(user=user)[:8]\n expertises = UserExpertise.objects.filter(user=user).all()\n+ comments = Comment.visible_objects().filter(author=user).order_by(\"-created_at\")[:5]\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n@@ -83,6 +89,7 @@\n \"active_tags\": active_tags,\n \"achievements\": achievements,\n \"expertises\": expertises,\n+ \"comments\": comments,\n \"posts\": paginate(request, posts),\n })\n \n@@ -132,11 +139,7 @@\n tag = get_object_or_404(Tag, code=tag_code)\n \n user_tag, is_created = UserTag.objects.get_or_create(\n- user=request.me,\n- tag=tag,\n- defaults=dict(\n- name=tag.name\n- )\n+ user=request.me, tag=tag, defaults=dict(name=tag.name)\n )\n \n if not is_created:\n@@ -156,7 +159,9 @@\n if form.is_valid():\n user_expertise = form.save(commit=False)\n user_expertise.user = request.me\n- UserExpertise.objects.filter(user=request.me, expertise=user_expertise.expertise).delete()\n+ UserExpertise.objects.filter(\n+ user=request.me, expertise=user_expertise.expertise\n+ ).delete()\n user_expertise.save()\n return {\n \"status\": \"created\",\n@@ -177,9 +182,7 @@\n UserExpertise.objects.filter(user=request.me, expertise=expertise).delete()\n return {\n \"status\": \"deleted\",\n- \"expertise\": {\n- \"expertise\": expertise,\n- },\n+ \"expertise\": {\"expertise\": expertise,},\n }\n \n return {\"status\": \"tipidor\"}\n", "issue": "\u041e\u0442\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u043c\u043e\u0438\u0445 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0435\u0432 \u0432 \u043b\u0438\u0447\u043d\u043e\u043c \u043a\u0430\u0431\u0438\u043d\u0435\u0442\u0435\n\u0425\u043e\u0447\u0443 \u0438\u043c\u0435\u0442\u044c \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u0432\u0438\u0434\u0435\u0442\u044c \u0432\u0441\u0451, \u0447\u0442\u043e \u044f \u043d\u0430\u043f\u0438\u0441\u0430\u043b \u043f\u043e\u0434 \u0441\u0432\u043e\u0438\u043c\u0438 \u0438 \u0447\u0443\u0436\u0438\u043c\u0438 \u043f\u043e\u0441\u0442\u0430\u043c\u0438, \u0441\u0435\u0439\u0447\u0430\u0441 \u043d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e \u043f\u043e\u043d\u044f\u0442\u044c, \u0447\u0442\u043e \u0438 \u0433\u0434\u0435 \u044f \u043f\u0438\u0441\u0430\u043b, \u044d\u0442\u043e \u043d\u0435 \u043e\u0447\u0435\u043d\u044c \u0443\u0434\u043e\u0431\u043d\u043e.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django_q.tasks import async_task\n\nfrom auth.helpers import auth_required, moderator_role_required\nfrom common.pagination import paginate\nfrom common.request import ajax_request\nfrom notifications.telegram.users import notify_profile_needs_review\nfrom posts.models import Post\nfrom search.models import SearchIndex\nfrom users.admin import do_user_admin_actions\nfrom users.forms.admin import UserAdminForm\nfrom users.forms.intro import UserIntroForm\nfrom users.forms.profile import UserEditForm, ExpertiseForm\nfrom users.models import User, UserBadge, UserExpertise, UserTag, Tag\n\n\n@auth_required\ndef intro(request):\n if request.me.is_profile_complete \\\n and request.me.is_profile_reviewed \\\n and not request.me.is_profile_rejected:\n return redirect(\"profile\", request.me.slug)\n\n if request.method == \"POST\":\n form = UserIntroForm(request.POST, request.FILES, instance=request.me)\n if form.is_valid():\n user = form.save(commit=False)\n\n # send to moderation\n user.is_profile_complete = True\n user.is_profile_reviewed = False\n user.is_profile_rejected = False\n user.save()\n\n # create intro post\n intro_post = Post.upsert_user_intro(user, form.cleaned_data[\"intro\"], is_visible=False)\n\n # notify moderators to review profile\n async_task(notify_profile_needs_review, user, intro_post)\n\n return redirect(\"on_review\")\n else:\n existing_intro = Post.get_user_intro(request.me)\n form = UserIntroForm(\n instance=request.me,\n initial={\"intro\": existing_intro.text if existing_intro else \"\"},\n )\n\n return render(request, \"users/intro.html\", {\"form\": form})\n\n\n@auth_required\ndef profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if not request.me.is_moderator:\n # hide unverified users\n if not user.is_profile_complete or not user.is_profile_complete or user.is_profile_rejected:\n raise Http404()\n\n if user.id == request.me.id:\n goto = request.GET.get(\"goto\")\n if goto:\n return redirect(goto)\n\n tags = Tag.objects.filter(is_visible=True).all()\n\n intro = Post.get_user_intro(user)\n projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all()\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserBadge.objects.filter(user=user)[:8]\n expertises = UserExpertise.objects.filter(user=user).all()\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n\n return render(request, \"users/profile.html\", {\n \"user\": user,\n \"intro\": intro,\n \"projects\": projects,\n \"tags\": tags,\n \"active_tags\": active_tags,\n \"achievements\": achievements,\n \"expertises\": expertises,\n \"posts\": paginate(request, posts),\n })\n\n\n@auth_required\ndef edit_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = UserEditForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n\n SearchIndex.update_user_index(user)\n\n return redirect(\"profile\", user.slug)\n else:\n form = UserEditForm(instance=user)\n\n return render(request, \"users/edit.html\", {\"form\": form})\n\n\n@auth_required\n@moderator_role_required\ndef admin_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if request.method == \"POST\":\n form = UserAdminForm(request.POST, request.FILES)\n if form.is_valid():\n return do_user_admin_actions(request, user, form.cleaned_data)\n else:\n form = UserAdminForm()\n\n return render(request, \"users/admin.html\", {\"user\": user, \"form\": form})\n\n\n@auth_required\n@ajax_request\ndef toggle_tag(request, tag_code):\n if request.method != \"POST\":\n raise Http404()\n\n tag = get_object_or_404(Tag, code=tag_code)\n\n user_tag, is_created = UserTag.objects.get_or_create(\n user=request.me,\n tag=tag,\n defaults=dict(\n name=tag.name\n )\n )\n\n if not is_created:\n user_tag.delete()\n\n return {\n \"status\": \"created\" if is_created else \"deleted\",\n \"tag\": {\"code\": tag.code, \"name\": tag.name, \"color\": tag.color},\n }\n\n\n@auth_required\n@ajax_request\ndef add_expertise(request):\n if request.method == \"POST\":\n form = ExpertiseForm(request.POST)\n if form.is_valid():\n user_expertise = form.save(commit=False)\n user_expertise.user = request.me\n UserExpertise.objects.filter(user=request.me, expertise=user_expertise.expertise).delete()\n user_expertise.save()\n return {\n \"status\": \"created\",\n \"expertise\": {\n \"name\": user_expertise.name,\n \"expertise\": user_expertise.expertise,\n \"value\": user_expertise.value,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\n@ajax_request\ndef delete_expertise(request, expertise):\n if request.method == \"POST\":\n UserExpertise.objects.filter(user=request.me, expertise=expertise).delete()\n return {\n \"status\": \"deleted\",\n \"expertise\": {\n \"expertise\": expertise,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\ndef on_review(request):\n if request.me.is_profile_reviewed:\n return redirect(\"profile\", request.me.slug)\n return render(request, \"users/messages/on_review.html\")\n\n\n@auth_required\ndef rejected(request):\n return render(request, \"users/messages/rejected.html\")\n\n\n@auth_required\ndef banned(request):\n return render(request, \"users/messages/banned.html\")\n", "path": "users/views.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django_q.tasks import async_task\n\nfrom auth.helpers import auth_required, moderator_role_required\nfrom common.pagination import paginate\nfrom common.request import ajax_request\nfrom notifications.telegram.users import notify_profile_needs_review\nfrom posts.models import Post\nfrom comments.models import Comment\nfrom search.models import SearchIndex\nfrom users.admin import do_user_admin_actions\nfrom users.forms.admin import UserAdminForm\nfrom users.forms.intro import UserIntroForm\nfrom users.forms.profile import UserEditForm, ExpertiseForm\nfrom users.models import User, UserBadge, UserExpertise, UserTag, Tag\n\n\n@auth_required\ndef intro(request):\n if (\n request.me.is_profile_complete\n and request.me.is_profile_reviewed\n and not request.me.is_profile_rejected\n ):\n return redirect(\"profile\", request.me.slug)\n\n if request.method == \"POST\":\n form = UserIntroForm(request.POST, request.FILES, instance=request.me)\n if form.is_valid():\n user = form.save(commit=False)\n\n # send to moderation\n user.is_profile_complete = True\n user.is_profile_reviewed = False\n user.is_profile_rejected = False\n user.save()\n\n # create intro post\n intro_post = Post.upsert_user_intro(\n user, form.cleaned_data[\"intro\"], is_visible=False\n )\n\n # notify moderators to review profile\n async_task(notify_profile_needs_review, user, intro_post)\n\n return redirect(\"on_review\")\n else:\n existing_intro = Post.get_user_intro(request.me)\n form = UserIntroForm(\n instance=request.me,\n initial={\"intro\": existing_intro.text if existing_intro else \"\"},\n )\n\n return render(request, \"users/intro.html\", {\"form\": form})\n\n\n@auth_required\ndef profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if not request.me.is_moderator:\n # hide unverified users\n if not user.is_profile_complete or not user.is_profile_complete or user.is_profile_rejected:\n raise Http404()\n\n if user.id == request.me.id:\n goto = request.GET.get(\"goto\")\n if goto:\n return redirect(goto)\n\n tags = Tag.objects.filter(is_visible=True).all()\n\n intro = Post.get_user_intro(user)\n projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all()\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserBadge.objects.filter(user=user)[:8]\n expertises = UserExpertise.objects.filter(user=user).all()\n comments = Comment.visible_objects().filter(author=user).order_by(\"-created_at\")[:5]\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n\n return render(request, \"users/profile.html\", {\n \"user\": user,\n \"intro\": intro,\n \"projects\": projects,\n \"tags\": tags,\n \"active_tags\": active_tags,\n \"achievements\": achievements,\n \"expertises\": expertises,\n \"comments\": comments,\n \"posts\": paginate(request, posts),\n })\n\n\n@auth_required\ndef edit_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = UserEditForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n\n SearchIndex.update_user_index(user)\n\n return redirect(\"profile\", user.slug)\n else:\n form = UserEditForm(instance=user)\n\n return render(request, \"users/edit.html\", {\"form\": form})\n\n\n@auth_required\n@moderator_role_required\ndef admin_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if request.method == \"POST\":\n form = UserAdminForm(request.POST, request.FILES)\n if form.is_valid():\n return do_user_admin_actions(request, user, form.cleaned_data)\n else:\n form = UserAdminForm()\n\n return render(request, \"users/admin.html\", {\"user\": user, \"form\": form})\n\n\n@auth_required\n@ajax_request\ndef toggle_tag(request, tag_code):\n if request.method != \"POST\":\n raise Http404()\n\n tag = get_object_or_404(Tag, code=tag_code)\n\n user_tag, is_created = UserTag.objects.get_or_create(\n user=request.me, tag=tag, defaults=dict(name=tag.name)\n )\n\n if not is_created:\n user_tag.delete()\n\n return {\n \"status\": \"created\" if is_created else \"deleted\",\n \"tag\": {\"code\": tag.code, \"name\": tag.name, \"color\": tag.color},\n }\n\n\n@auth_required\n@ajax_request\ndef add_expertise(request):\n if request.method == \"POST\":\n form = ExpertiseForm(request.POST)\n if form.is_valid():\n user_expertise = form.save(commit=False)\n user_expertise.user = request.me\n UserExpertise.objects.filter(\n user=request.me, expertise=user_expertise.expertise\n ).delete()\n user_expertise.save()\n return {\n \"status\": \"created\",\n \"expertise\": {\n \"name\": user_expertise.name,\n \"expertise\": user_expertise.expertise,\n \"value\": user_expertise.value,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\n@ajax_request\ndef delete_expertise(request, expertise):\n if request.method == \"POST\":\n UserExpertise.objects.filter(user=request.me, expertise=expertise).delete()\n return {\n \"status\": \"deleted\",\n \"expertise\": {\"expertise\": expertise,},\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\ndef on_review(request):\n if request.me.is_profile_reviewed:\n return redirect(\"profile\", request.me.slug)\n return render(request, \"users/messages/on_review.html\")\n\n\n@auth_required\ndef rejected(request):\n return render(request, \"users/messages/rejected.html\")\n\n\n@auth_required\ndef banned(request):\n return render(request, \"users/messages/banned.html\")\n", "path": "users/views.py"}]}
2,209
754
gh_patches_debug_28429
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-542
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Choice prompts are disabled if there's a ~/.cookiecutterrc Seems I don't get the nice prompts from https://github.com/audreyr/cookiecutter/pull/455 if I have a `~/.cookiecutterrc` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/generate.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.generate 6 --------------------- 7 8 Functions for generating a project from a project template. 9 """ 10 from __future__ import unicode_literals 11 from collections import OrderedDict 12 import fnmatch 13 import io 14 import json 15 import logging 16 import os 17 import shutil 18 19 from jinja2 import FileSystemLoader, Template 20 from jinja2.environment import Environment 21 from jinja2.exceptions import TemplateSyntaxError 22 from binaryornot.check import is_binary 23 24 from .exceptions import ( 25 NonTemplatedInputDirException, 26 ContextDecodingException, 27 OutputDirExistsException 28 ) 29 from .find import find_template 30 from .utils import make_sure_path_exists, work_in 31 from .hooks import run_hook 32 33 34 def copy_without_render(path, context): 35 """ 36 Returns True if `path` matches some pattern in the 37 `_copy_without_render` context setting. 38 39 :param path: A file-system path referring to a file or dir that 40 should be rendered or just copied. 41 :param context: cookiecutter context. 42 """ 43 try: 44 for dont_render in context['cookiecutter']['_copy_without_render']: 45 if fnmatch.fnmatch(path, dont_render): 46 return True 47 except KeyError: 48 return False 49 50 return False 51 52 53 def generate_context(context_file='cookiecutter.json', default_context=None, 54 extra_context=None): 55 """ 56 Generates the context for a Cookiecutter project template. 57 Loads the JSON file as a Python object, with key being the JSON filename. 58 59 :param context_file: JSON file containing key/value pairs for populating 60 the cookiecutter's variables. 61 :param default_context: Dictionary containing config to take into account. 62 :param extra_context: Dictionary containing configuration overrides 63 """ 64 65 context = {} 66 67 file_handle = open(context_file) 68 try: 69 obj = json.load(file_handle, object_pairs_hook=OrderedDict) 70 except ValueError as e: 71 # JSON decoding error. Let's throw a new exception that is more 72 # friendly for the developer or user. 73 full_fpath = os.path.abspath(context_file) 74 json_exc_message = str(e) 75 our_exc_message = ( 76 'JSON decoding error while loading "{0}". Decoding' 77 ' error details: "{1}"'.format(full_fpath, json_exc_message)) 78 raise ContextDecodingException(our_exc_message) 79 80 # Add the Python object to the context dictionary 81 file_name = os.path.split(context_file)[1] 82 file_stem = file_name.split('.')[0] 83 context[file_stem] = obj 84 85 # Overwrite context variable defaults with the default context from the 86 # user's global config, if available 87 if default_context: 88 obj.update(default_context) 89 if extra_context: 90 obj.update(extra_context) 91 92 logging.debug('Context generated is {0}'.format(context)) 93 return context 94 95 96 def generate_file(project_dir, infile, context, env): 97 """ 98 1. Render the filename of infile as the name of outfile. 99 2. Deal with infile appropriately: 100 101 a. If infile is a binary file, copy it over without rendering. 102 b. If infile is a text file, render its contents and write the 103 rendered infile to outfile. 104 105 Precondition: 106 107 When calling `generate_file()`, the root template dir must be the 108 current working directory. Using `utils.work_in()` is the recommended 109 way to perform this directory change. 110 111 :param project_dir: Absolute path to the resulting generated project. 112 :param infile: Input file to generate the file from. Relative to the root 113 template dir. 114 :param context: Dict for populating the cookiecutter's variables. 115 :param env: Jinja2 template execution environment. 116 """ 117 118 logging.debug('Generating file {0}'.format(infile)) 119 120 # Render the path to the output file (not including the root project dir) 121 outfile_tmpl = Template(infile) 122 outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) 123 logging.debug('outfile is {0}'.format(outfile)) 124 125 # Just copy over binary files. Don't render. 126 logging.debug("Check {0} to see if it's a binary".format(infile)) 127 if is_binary(infile): 128 logging.debug('Copying binary {0} to {1} without rendering' 129 .format(infile, outfile)) 130 shutil.copyfile(infile, outfile) 131 else: 132 # Force fwd slashes on Windows for get_template 133 # This is a by-design Jinja issue 134 infile_fwd_slashes = infile.replace(os.path.sep, '/') 135 136 # Render the file 137 try: 138 tmpl = env.get_template(infile_fwd_slashes) 139 except TemplateSyntaxError as exception: 140 # Disable translated so that printed exception contains verbose 141 # information about syntax error location 142 exception.translated = False 143 raise 144 rendered_file = tmpl.render(**context) 145 146 logging.debug('Writing {0}'.format(outfile)) 147 148 with io.open(outfile, 'w', encoding='utf-8') as fh: 149 fh.write(rendered_file) 150 151 # Apply file permissions to output file 152 shutil.copymode(infile, outfile) 153 154 155 def render_and_create_dir(dirname, context, output_dir, 156 overwrite_if_exists=False): 157 """ 158 Renders the name of a directory, creates the directory, and 159 returns its path. 160 """ 161 162 name_tmpl = Template(dirname) 163 rendered_dirname = name_tmpl.render(**context) 164 logging.debug('Rendered dir {0} must exist in output_dir {1}'.format( 165 rendered_dirname, 166 output_dir 167 )) 168 dir_to_create = os.path.normpath( 169 os.path.join(output_dir, rendered_dirname) 170 ) 171 172 output_dir_exists = os.path.exists(dir_to_create) 173 174 if overwrite_if_exists: 175 if output_dir_exists: 176 logging.debug('Output directory {} already exists,' 177 'overwriting it'.format(dir_to_create)) 178 else: 179 if output_dir_exists: 180 msg = 'Error: "{}" directory already exists'.format(dir_to_create) 181 raise OutputDirExistsException(msg) 182 183 make_sure_path_exists(dir_to_create) 184 return dir_to_create 185 186 187 def ensure_dir_is_templated(dirname): 188 """ 189 Ensures that dirname is a templated directory name. 190 """ 191 if '{{' in dirname and '}}' in dirname: 192 return True 193 else: 194 raise NonTemplatedInputDirException 195 196 197 def generate_files(repo_dir, context=None, output_dir='.', 198 overwrite_if_exists=False): 199 """ 200 Renders the templates and saves them to files. 201 202 :param repo_dir: Project template input directory. 203 :param context: Dict for populating the template's variables. 204 :param output_dir: Where to output the generated project dir into. 205 :param overwrite_if_exists: Overwrite the contents of the output directory 206 if it exists 207 """ 208 209 template_dir = find_template(repo_dir) 210 logging.debug('Generating project from {0}...'.format(template_dir)) 211 context = context or {} 212 213 unrendered_dir = os.path.split(template_dir)[1] 214 ensure_dir_is_templated(unrendered_dir) 215 project_dir = render_and_create_dir(unrendered_dir, 216 context, 217 output_dir, 218 overwrite_if_exists) 219 220 # We want the Jinja path and the OS paths to match. Consequently, we'll: 221 # + CD to the template folder 222 # + Set Jinja's path to '.' 223 # 224 # In order to build our files to the correct folder(s), we'll use an 225 # absolute path for the target folder (project_dir) 226 227 project_dir = os.path.abspath(project_dir) 228 logging.debug('project_dir is {0}'.format(project_dir)) 229 230 # run pre-gen hook from repo_dir 231 with work_in(repo_dir): 232 run_hook('pre_gen_project', project_dir, context) 233 234 with work_in(template_dir): 235 env = Environment(keep_trailing_newline=True) 236 env.loader = FileSystemLoader('.') 237 238 for root, dirs, files in os.walk('.'): 239 # We must separate the two types of dirs into different lists. 240 # The reason is that we don't want ``os.walk`` to go through the 241 # unrendered directories, since they will just be copied. 242 copy_dirs = [] 243 render_dirs = [] 244 245 for d in dirs: 246 d_ = os.path.normpath(os.path.join(root, d)) 247 # We check the full path, because that's how it can be 248 # specified in the ``_copy_without_render`` setting, but 249 # we store just the dir name 250 if copy_without_render(d_, context): 251 copy_dirs.append(d) 252 else: 253 render_dirs.append(d) 254 255 for copy_dir in copy_dirs: 256 indir = os.path.normpath(os.path.join(root, copy_dir)) 257 outdir = os.path.normpath(os.path.join(project_dir, indir)) 258 logging.debug( 259 'Copying dir {0} to {1} without rendering' 260 ''.format(indir, outdir) 261 ) 262 shutil.copytree(indir, outdir) 263 264 # We mutate ``dirs``, because we only want to go through these dirs 265 # recursively 266 dirs[:] = render_dirs 267 for d in dirs: 268 unrendered_dir = os.path.join(project_dir, root, d) 269 render_and_create_dir(unrendered_dir, context, output_dir, 270 overwrite_if_exists) 271 272 for f in files: 273 infile = os.path.normpath(os.path.join(root, f)) 274 if copy_without_render(infile, context): 275 outfile_tmpl = Template(infile) 276 outfile_rendered = outfile_tmpl.render(**context) 277 outfile = os.path.join(project_dir, outfile_rendered) 278 logging.debug( 279 'Copying file {0} to {1} without rendering' 280 ''.format(infile, outfile) 281 ) 282 shutil.copyfile(infile, outfile) 283 shutil.copymode(infile, outfile) 284 continue 285 logging.debug('f is {0}'.format(f)) 286 generate_file(project_dir, infile, context, env) 287 288 # run post-gen hook from repo_dir 289 with work_in(repo_dir): 290 run_hook('post_gen_project', project_dir, context) 291 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py --- a/cookiecutter/generate.py +++ b/cookiecutter/generate.py @@ -50,6 +50,28 @@ return False +def apply_overwrites_to_context(context, overwrite_context): + """Modify the given context in place based on the overwrite_context.""" + for variable, overwrite in overwrite_context.items(): + if variable not in context: + # Do not include variables which are not used in the template + continue + + context_value = context[variable] + + if isinstance(context_value, list): + # We are dealing with a choice variable + if overwrite in context_value: + # This overwrite is actually valid for the given context + # Let's set it as default (by definition first item in list) + # see ``cookiecutter.prompt.prompt_choice_for_config`` + context_value.remove(overwrite) + context_value.insert(0, overwrite) + else: + # Simply overwrite the value for this variable + context[variable] = overwrite + + def generate_context(context_file='cookiecutter.json', default_context=None, extra_context=None): """ @@ -85,9 +107,9 @@ # Overwrite context variable defaults with the default context from the # user's global config, if available if default_context: - obj.update(default_context) + apply_overwrites_to_context(obj, default_context) if extra_context: - obj.update(extra_context) + apply_overwrites_to_context(obj, extra_context) logging.debug('Context generated is {0}'.format(context)) return context
{"golden_diff": "diff --git a/cookiecutter/generate.py b/cookiecutter/generate.py\n--- a/cookiecutter/generate.py\n+++ b/cookiecutter/generate.py\n@@ -50,6 +50,28 @@\n return False\n \n \n+def apply_overwrites_to_context(context, overwrite_context):\n+ \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n+ for variable, overwrite in overwrite_context.items():\n+ if variable not in context:\n+ # Do not include variables which are not used in the template\n+ continue\n+\n+ context_value = context[variable]\n+\n+ if isinstance(context_value, list):\n+ # We are dealing with a choice variable\n+ if overwrite in context_value:\n+ # This overwrite is actually valid for the given context\n+ # Let's set it as default (by definition first item in list)\n+ # see ``cookiecutter.prompt.prompt_choice_for_config``\n+ context_value.remove(overwrite)\n+ context_value.insert(0, overwrite)\n+ else:\n+ # Simply overwrite the value for this variable\n+ context[variable] = overwrite\n+\n+\n def generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n@@ -85,9 +107,9 @@\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n- obj.update(default_context)\n+ apply_overwrites_to_context(obj, default_context)\n if extra_context:\n- obj.update(extra_context)\n+ apply_overwrites_to_context(obj, extra_context)\n \n logging.debug('Context generated is {0}'.format(context))\n return context\n", "issue": "Choice prompts are disabled if there's a ~/.cookiecutterrc\nSeems I don't get the nice prompts from https://github.com/audreyr/cookiecutter/pull/455 if I have a `~/.cookiecutterrc`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n file_handle = open(context_file)\n try:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n obj.update(default_context)\n if extra_context:\n obj.update(extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n # run pre-gen hook from repo_dir\n with work_in(repo_dir):\n run_hook('pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n # run post-gen hook from repo_dir\n with work_in(repo_dir):\n run_hook('post_gen_project', project_dir, context)\n", "path": "cookiecutter/generate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.generate\n---------------------\n\nFunctions for generating a project from a project template.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nimport fnmatch\nimport io\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom jinja2 import FileSystemLoader, Template\nfrom jinja2.environment import Environment\nfrom jinja2.exceptions import TemplateSyntaxError\nfrom binaryornot.check import is_binary\n\nfrom .exceptions import (\n NonTemplatedInputDirException,\n ContextDecodingException,\n OutputDirExistsException\n)\nfrom .find import find_template\nfrom .utils import make_sure_path_exists, work_in\nfrom .hooks import run_hook\n\n\ndef copy_without_render(path, context):\n \"\"\"\n Returns True if `path` matches some pattern in the\n `_copy_without_render` context setting.\n\n :param path: A file-system path referring to a file or dir that\n should be rendered or just copied.\n :param context: cookiecutter context.\n \"\"\"\n try:\n for dont_render in context['cookiecutter']['_copy_without_render']:\n if fnmatch.fnmatch(path, dont_render):\n return True\n except KeyError:\n return False\n\n return False\n\n\ndef apply_overwrites_to_context(context, overwrite_context):\n \"\"\"Modify the given context in place based on the overwrite_context.\"\"\"\n for variable, overwrite in overwrite_context.items():\n if variable not in context:\n # Do not include variables which are not used in the template\n continue\n\n context_value = context[variable]\n\n if isinstance(context_value, list):\n # We are dealing with a choice variable\n if overwrite in context_value:\n # This overwrite is actually valid for the given context\n # Let's set it as default (by definition first item in list)\n # see ``cookiecutter.prompt.prompt_choice_for_config``\n context_value.remove(overwrite)\n context_value.insert(0, overwrite)\n else:\n # Simply overwrite the value for this variable\n context[variable] = overwrite\n\n\ndef generate_context(context_file='cookiecutter.json', default_context=None,\n extra_context=None):\n \"\"\"\n Generates the context for a Cookiecutter project template.\n Loads the JSON file as a Python object, with key being the JSON filename.\n\n :param context_file: JSON file containing key/value pairs for populating\n the cookiecutter's variables.\n :param default_context: Dictionary containing config to take into account.\n :param extra_context: Dictionary containing configuration overrides\n \"\"\"\n\n context = {}\n\n file_handle = open(context_file)\n try:\n obj = json.load(file_handle, object_pairs_hook=OrderedDict)\n except ValueError as e:\n # JSON decoding error. Let's throw a new exception that is more\n # friendly for the developer or user.\n full_fpath = os.path.abspath(context_file)\n json_exc_message = str(e)\n our_exc_message = (\n 'JSON decoding error while loading \"{0}\". Decoding'\n ' error details: \"{1}\"'.format(full_fpath, json_exc_message))\n raise ContextDecodingException(our_exc_message)\n\n # Add the Python object to the context dictionary\n file_name = os.path.split(context_file)[1]\n file_stem = file_name.split('.')[0]\n context[file_stem] = obj\n\n # Overwrite context variable defaults with the default context from the\n # user's global config, if available\n if default_context:\n apply_overwrites_to_context(obj, default_context)\n if extra_context:\n apply_overwrites_to_context(obj, extra_context)\n\n logging.debug('Context generated is {0}'.format(context))\n return context\n\n\ndef generate_file(project_dir, infile, context, env):\n \"\"\"\n 1. Render the filename of infile as the name of outfile.\n 2. Deal with infile appropriately:\n\n a. If infile is a binary file, copy it over without rendering.\n b. If infile is a text file, render its contents and write the\n rendered infile to outfile.\n\n Precondition:\n\n When calling `generate_file()`, the root template dir must be the\n current working directory. Using `utils.work_in()` is the recommended\n way to perform this directory change.\n\n :param project_dir: Absolute path to the resulting generated project.\n :param infile: Input file to generate the file from. Relative to the root\n template dir.\n :param context: Dict for populating the cookiecutter's variables.\n :param env: Jinja2 template execution environment.\n \"\"\"\n\n logging.debug('Generating file {0}'.format(infile))\n\n # Render the path to the output file (not including the root project dir)\n outfile_tmpl = Template(infile)\n outfile = os.path.join(project_dir, outfile_tmpl.render(**context))\n logging.debug('outfile is {0}'.format(outfile))\n\n # Just copy over binary files. Don't render.\n logging.debug(\"Check {0} to see if it's a binary\".format(infile))\n if is_binary(infile):\n logging.debug('Copying binary {0} to {1} without rendering'\n .format(infile, outfile))\n shutil.copyfile(infile, outfile)\n else:\n # Force fwd slashes on Windows for get_template\n # This is a by-design Jinja issue\n infile_fwd_slashes = infile.replace(os.path.sep, '/')\n\n # Render the file\n try:\n tmpl = env.get_template(infile_fwd_slashes)\n except TemplateSyntaxError as exception:\n # Disable translated so that printed exception contains verbose\n # information about syntax error location\n exception.translated = False\n raise\n rendered_file = tmpl.render(**context)\n\n logging.debug('Writing {0}'.format(outfile))\n\n with io.open(outfile, 'w', encoding='utf-8') as fh:\n fh.write(rendered_file)\n\n # Apply file permissions to output file\n shutil.copymode(infile, outfile)\n\n\ndef render_and_create_dir(dirname, context, output_dir,\n overwrite_if_exists=False):\n \"\"\"\n Renders the name of a directory, creates the directory, and\n returns its path.\n \"\"\"\n\n name_tmpl = Template(dirname)\n rendered_dirname = name_tmpl.render(**context)\n logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(\n rendered_dirname,\n output_dir\n ))\n dir_to_create = os.path.normpath(\n os.path.join(output_dir, rendered_dirname)\n )\n\n output_dir_exists = os.path.exists(dir_to_create)\n\n if overwrite_if_exists:\n if output_dir_exists:\n logging.debug('Output directory {} already exists,'\n 'overwriting it'.format(dir_to_create))\n else:\n if output_dir_exists:\n msg = 'Error: \"{}\" directory already exists'.format(dir_to_create)\n raise OutputDirExistsException(msg)\n\n make_sure_path_exists(dir_to_create)\n return dir_to_create\n\n\ndef ensure_dir_is_templated(dirname):\n \"\"\"\n Ensures that dirname is a templated directory name.\n \"\"\"\n if '{{' in dirname and '}}' in dirname:\n return True\n else:\n raise NonTemplatedInputDirException\n\n\ndef generate_files(repo_dir, context=None, output_dir='.',\n overwrite_if_exists=False):\n \"\"\"\n Renders the templates and saves them to files.\n\n :param repo_dir: Project template input directory.\n :param context: Dict for populating the template's variables.\n :param output_dir: Where to output the generated project dir into.\n :param overwrite_if_exists: Overwrite the contents of the output directory\n if it exists\n \"\"\"\n\n template_dir = find_template(repo_dir)\n logging.debug('Generating project from {0}...'.format(template_dir))\n context = context or {}\n\n unrendered_dir = os.path.split(template_dir)[1]\n ensure_dir_is_templated(unrendered_dir)\n project_dir = render_and_create_dir(unrendered_dir,\n context,\n output_dir,\n overwrite_if_exists)\n\n # We want the Jinja path and the OS paths to match. Consequently, we'll:\n # + CD to the template folder\n # + Set Jinja's path to '.'\n #\n # In order to build our files to the correct folder(s), we'll use an\n # absolute path for the target folder (project_dir)\n\n project_dir = os.path.abspath(project_dir)\n logging.debug('project_dir is {0}'.format(project_dir))\n\n # run pre-gen hook from repo_dir\n with work_in(repo_dir):\n run_hook('pre_gen_project', project_dir, context)\n\n with work_in(template_dir):\n env = Environment(keep_trailing_newline=True)\n env.loader = FileSystemLoader('.')\n\n for root, dirs, files in os.walk('.'):\n # We must separate the two types of dirs into different lists.\n # The reason is that we don't want ``os.walk`` to go through the\n # unrendered directories, since they will just be copied.\n copy_dirs = []\n render_dirs = []\n\n for d in dirs:\n d_ = os.path.normpath(os.path.join(root, d))\n # We check the full path, because that's how it can be\n # specified in the ``_copy_without_render`` setting, but\n # we store just the dir name\n if copy_without_render(d_, context):\n copy_dirs.append(d)\n else:\n render_dirs.append(d)\n\n for copy_dir in copy_dirs:\n indir = os.path.normpath(os.path.join(root, copy_dir))\n outdir = os.path.normpath(os.path.join(project_dir, indir))\n logging.debug(\n 'Copying dir {0} to {1} without rendering'\n ''.format(indir, outdir)\n )\n shutil.copytree(indir, outdir)\n\n # We mutate ``dirs``, because we only want to go through these dirs\n # recursively\n dirs[:] = render_dirs\n for d in dirs:\n unrendered_dir = os.path.join(project_dir, root, d)\n render_and_create_dir(unrendered_dir, context, output_dir,\n overwrite_if_exists)\n\n for f in files:\n infile = os.path.normpath(os.path.join(root, f))\n if copy_without_render(infile, context):\n outfile_tmpl = Template(infile)\n outfile_rendered = outfile_tmpl.render(**context)\n outfile = os.path.join(project_dir, outfile_rendered)\n logging.debug(\n 'Copying file {0} to {1} without rendering'\n ''.format(infile, outfile)\n )\n shutil.copyfile(infile, outfile)\n shutil.copymode(infile, outfile)\n continue\n logging.debug('f is {0}'.format(f))\n generate_file(project_dir, infile, context, env)\n\n # run post-gen hook from repo_dir\n with work_in(repo_dir):\n run_hook('post_gen_project', project_dir, context)\n", "path": "cookiecutter/generate.py"}]}
3,282
375
gh_patches_debug_24489
rasdani/github-patches
git_diff
pytorch__rl-443
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Feature Request] Create a tensorspec for the R3M output ## Motivation R3M (torchrl.envs.transforms.r3m) is a generic transform layer that will embed an image in a tensor, easier to work with using a policy or value function. In TorchRL, each env has am `observation_spec` attribute that contains `tensor_spec` that describe what the observations are (they behave like dictionaries, and all of their keys must start with `"next_"`). Here is how the observation_spec should behave: 1. We should add one TensorSpec for the output of R3M (the vector). It should have the size that is expected given the resnet used (i.e. an unbounded continuous tensorspec of size 2048 IIRC for resnet50). 2. We should delete the tensorspec of the pixels if it disappears (which will happen unless `tensor_pixels_keys` is set). e.g. when doing `R3MTransform("resnet50", keys_in=["next_pixels"])`, we should not see the "next_pixels" key in the `observation_spec` of the TransformedEnv once R3M has been applied since this key-value pair has disappeared. Instead, we should see the spec of "next_r3m_vec". 3. If the value of "next_pixels" is kept somewhere (using `tensor_pixels_keys`) we should duplicate the value of the `"next_pixels"`'s TensorSpec onto the key(s) indicated by `tensor_pixels_keys`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchrl/envs/transforms/r3m.py` Content: ``` 1 from typing import List, Optional, Union 2 3 import torch 4 from torch.hub import load_state_dict_from_url 5 from torch.nn import Identity 6 7 from torchrl.data import TensorDict, DEVICE_TYPING 8 from torchrl.envs.transforms import ( 9 ToTensorImage, 10 Compose, 11 ObservationNorm, 12 Resize, 13 Transform, 14 CatTensors, 15 FlattenObservation, 16 UnsqueezeTransform, 17 ) 18 19 try: 20 from torchvision import models 21 22 _has_tv = True 23 except ImportError: 24 _has_tv = False 25 26 __all__ = ["R3MTransform"] 27 28 29 class _R3MNet(Transform): 30 31 inplace = False 32 33 def __init__(self, in_keys, out_keys, model_name, del_keys: bool = True): 34 if not _has_tv: 35 raise ImportError( 36 "Tried to instantiate R3M without torchvision. Make sure you have " 37 "torchvision installed in your environment." 38 ) 39 if model_name == "resnet18": 40 self.model_name = "r3m_18" 41 self.outdim = 512 42 convnet = models.resnet18(pretrained=False) 43 elif model_name == "resnet34": 44 self.model_name = "r3m_34" 45 self.outdim = 512 46 convnet = models.resnet34(pretrained=False) 47 elif model_name == "resnet50": 48 self.model_name = "r3m_50" 49 self.outdim = 2048 50 convnet = models.resnet50(pretrained=False) 51 else: 52 raise NotImplementedError( 53 f"model {model_name} is currently not supported by R3M" 54 ) 55 convnet.fc = Identity() 56 super().__init__(keys_in=in_keys, keys_out=out_keys) 57 self.convnet = convnet 58 self.del_keys = del_keys 59 60 def _call(self, tensordict): 61 tensordict_view = tensordict.view(-1) 62 super()._call(tensordict_view) 63 if self.del_keys: 64 tensordict.exclude(*self.keys_in, inplace=True) 65 return tensordict 66 67 @torch.no_grad() 68 def _apply_transform(self, obs: torch.Tensor) -> None: 69 shape = None 70 if obs.ndimension() > 4: 71 shape = obs.shape[:-3] 72 obs = obs.flatten(0, -4) 73 out = self.convnet(obs) 74 if shape is not None: 75 out = out.view(*shape, *out.shape[1:]) 76 return out 77 78 @staticmethod 79 def _load_weights(model_name, r3m_instance, dir_prefix): 80 if model_name not in ("r3m_50", "r3m_34", "r3m_18"): 81 raise ValueError( 82 "model_name should be one of 'r3m_50', 'r3m_34' or 'r3m_18'" 83 ) 84 # url = "https://download.pytorch.org/models/rl/r3m/" + model_name 85 url = "https://pytorch.s3.amazonaws.com/models/rl/r3m/" + model_name + ".pt" 86 d = load_state_dict_from_url( 87 url, 88 progress=True, 89 map_location=next(r3m_instance.parameters()).device, 90 model_dir=dir_prefix, 91 ) 92 td = TensorDict(d["r3m"], []).unflatten_keys(".") 93 td_flatten = td["module"]["convnet"].flatten_keys(".") 94 state_dict = td_flatten.to_dict() 95 r3m_instance.convnet.load_state_dict(state_dict) 96 97 def load_weights(self, dir_prefix=None): 98 self._load_weights(self.model_name, self, dir_prefix) 99 100 101 def _init_first(fun): 102 def new_fun(self, *args, **kwargs): 103 if not self.initialized: 104 self._init() 105 return fun(self, *args, **kwargs) 106 107 return new_fun 108 109 110 class R3MTransform(Compose): 111 """R3M Transform class. 112 113 R3M provides pre-trained ResNet weights aimed at facilitating visual 114 embedding for robotic tasks. The models are trained using Ego4d. 115 See the paper: 116 R3M: A Universal Visual Representation for Robot Manipulation (Suraj Nair, 117 Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, Abhinav Gupta) 118 https://arxiv.org/abs/2203.12601 119 120 Args: 121 model_name (str): one of resnet50, resnet34 or resnet18 122 keys_in (list of str, optional): list of input keys. If left empty, the 123 "next_pixels" key is assumed. 124 keys_out (list of str, optional): list of output keys. If left empty, 125 "next_r3m_vec" is assumed. 126 size (int, optional): Size of the image to feed to resnet. 127 Defaults to 244. 128 download (bool, optional): if True, the weights will be downloaded using 129 the torch.hub download API (i.e. weights will be cached for future use). 130 Defaults to False. 131 download_path (str, optional): path where to download the models. 132 Default is None (cache path determined by torch.hub utils). 133 tensor_pixels_keys (list of str, optional): Optionally, one can keep the 134 original images (as collected from the env) in the output tensordict. 135 If no value is provided, this won't be collected. 136 """ 137 138 @classmethod 139 def __new__(cls, *args, **kwargs): 140 cls._is_3d = None 141 cls.initialized = False 142 cls._device = None 143 cls._dtype = None 144 return super().__new__(cls) 145 146 def __init__( 147 self, 148 model_name: str, 149 keys_in: List[str] = None, 150 keys_out: List[str] = None, 151 size: int = 244, 152 stack_images: bool = True, 153 download: bool = False, 154 download_path: Optional[str] = None, 155 tensor_pixels_keys: List[str] = None, 156 ): 157 super().__init__() 158 self.keys_in = keys_in 159 self.download = download 160 self.download_path = download_path 161 self.model_name = model_name 162 self.keys_out = keys_out 163 self.size = size 164 self.stack_images = stack_images 165 self.tensor_pixels_keys = tensor_pixels_keys 166 167 def _init(self): 168 keys_in = self.keys_in 169 model_name = self.model_name 170 keys_out = self.keys_out 171 size = self.size 172 stack_images = self.stack_images 173 tensor_pixels_keys = self.tensor_pixels_keys 174 175 # ToTensor 176 transforms = [] 177 if tensor_pixels_keys: 178 for i in range(len(keys_in)): 179 transforms.append( 180 CatTensors( 181 keys_in=[keys_in[i]], 182 out_key=tensor_pixels_keys[i], 183 del_keys=False, 184 ) 185 ) 186 187 totensor = ToTensorImage( 188 unsqueeze=False, 189 keys_in=keys_in, 190 ) 191 transforms.append(totensor) 192 193 # Normalize 194 mean = [0.485, 0.456, 0.406] 195 std = [0.229, 0.224, 0.225] 196 normalize = ObservationNorm( 197 keys_in=keys_in, 198 loc=torch.tensor(mean).view(3, 1, 1), 199 scale=torch.tensor(std).view(3, 1, 1), 200 standard_normal=True, 201 ) 202 transforms.append(normalize) 203 204 # Resize: note that resize is a no-op if the tensor has the desired size already 205 resize = Resize(size, size, keys_in=keys_in) 206 transforms.append(resize) 207 208 # R3M 209 if keys_out is None: 210 if stack_images: 211 keys_out = ["next_r3m_vec"] 212 else: 213 keys_out = [f"next_r3m_vec_{i}" for i in range(len(keys_in))] 214 elif stack_images and len(keys_out) != 1: 215 raise ValueError( 216 f"key_out must be of length 1 if stack_images is True. Got keys_out={keys_out}" 217 ) 218 elif not stack_images and len(keys_out) != len(keys_in): 219 raise ValueError( 220 "key_out must be of length equal to keys_in if stack_images is False." 221 ) 222 223 if stack_images and len(keys_in) > 1: 224 if self.is_3d: 225 unsqueeze = UnsqueezeTransform( 226 keys_in=keys_in, 227 keys_out=keys_in, 228 unsqueeze_dim=-4, 229 ) 230 transforms.append(unsqueeze) 231 232 cattensors = CatTensors( 233 keys_in, 234 keys_out[0], 235 dim=-4, 236 ) 237 network = _R3MNet( 238 in_keys=keys_out, 239 out_keys=keys_out, 240 model_name=model_name, 241 del_keys=False, 242 ) 243 flatten = FlattenObservation(-2, -1, keys_out) 244 transforms = [*transforms, cattensors, network, flatten] 245 else: 246 network = _R3MNet( 247 in_keys=keys_in, 248 out_keys=keys_out, 249 model_name=model_name, 250 del_keys=True, 251 ) 252 transforms = [*transforms, normalize, network] 253 254 for transform in transforms: 255 self.append(transform) 256 if self.download: 257 self[-1].load_weights(dir_prefix=self.download_path) 258 self.initialized = True 259 260 if self._device is not None: 261 self.to(self._device) 262 if self._dtype is not None: 263 self.to(self._dtype) 264 265 @property 266 def is_3d(self): 267 if self._is_3d is None: 268 parent = self.parent 269 for key in parent.observation_spec.keys(): 270 self._is_3d = len(parent.observation_spec[key].shape) == 3 271 break 272 return self._is_3d 273 274 def to(self, dest: Union[DEVICE_TYPING, torch.dtype]): 275 if isinstance(dest, torch.dtype): 276 self._dtype = dest 277 else: 278 self._device = dest 279 return super().to(dest) 280 281 @property 282 def device(self): 283 return self._device 284 285 @property 286 def dtype(self): 287 return self._dtype 288 289 forward = _init_first(Compose.forward) 290 transform_action_spec = _init_first(Compose.transform_action_spec) 291 transform_observation_spec = _init_first(Compose.transform_observation_spec) 292 transform_input_spec = _init_first(Compose.transform_input_spec) 293 transform_reward_spec = _init_first(Compose.transform_reward_spec) 294 reset = _init_first(Compose.reset) 295 init = _init_first(Compose.init) 296 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchrl/envs/transforms/r3m.py b/torchrl/envs/transforms/r3m.py --- a/torchrl/envs/transforms/r3m.py +++ b/torchrl/envs/transforms/r3m.py @@ -5,6 +5,11 @@ from torch.nn import Identity from torchrl.data import TensorDict, DEVICE_TYPING +from torchrl.data.tensor_specs import ( + TensorSpec, + CompositeSpec, + NdUnboundedContinuousTensorSpec, +) from torchrl.envs.transforms import ( ToTensorImage, Compose, @@ -75,6 +80,24 @@ out = out.view(*shape, *out.shape[1:]) return out + def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec: + if not isinstance(observation_spec, CompositeSpec): + raise ValueError("_R3MNet can only infer CompositeSpec") + + keys = [key for key in observation_spec._specs.keys() if key in self.keys_in] + device = observation_spec[keys[0]].device + + if self.del_keys: + for key_in in keys: + del observation_spec[key_in] + + for key_out in self.keys_out: + observation_spec[key_out] = NdUnboundedContinuousTensorSpec( + shape=torch.Size([self.outdim]), device=device + ) + + return observation_spec + @staticmethod def _load_weights(model_name, r3m_instance, dir_prefix): if model_name not in ("r3m_50", "r3m_34", "r3m_18"):
{"golden_diff": "diff --git a/torchrl/envs/transforms/r3m.py b/torchrl/envs/transforms/r3m.py\n--- a/torchrl/envs/transforms/r3m.py\n+++ b/torchrl/envs/transforms/r3m.py\n@@ -5,6 +5,11 @@\n from torch.nn import Identity\n \n from torchrl.data import TensorDict, DEVICE_TYPING\n+from torchrl.data.tensor_specs import (\n+ TensorSpec,\n+ CompositeSpec,\n+ NdUnboundedContinuousTensorSpec,\n+)\n from torchrl.envs.transforms import (\n ToTensorImage,\n Compose,\n@@ -75,6 +80,24 @@\n out = out.view(*shape, *out.shape[1:])\n return out\n \n+ def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec:\n+ if not isinstance(observation_spec, CompositeSpec):\n+ raise ValueError(\"_R3MNet can only infer CompositeSpec\")\n+\n+ keys = [key for key in observation_spec._specs.keys() if key in self.keys_in]\n+ device = observation_spec[keys[0]].device\n+\n+ if self.del_keys:\n+ for key_in in keys:\n+ del observation_spec[key_in]\n+\n+ for key_out in self.keys_out:\n+ observation_spec[key_out] = NdUnboundedContinuousTensorSpec(\n+ shape=torch.Size([self.outdim]), device=device\n+ )\n+\n+ return observation_spec\n+\n @staticmethod\n def _load_weights(model_name, r3m_instance, dir_prefix):\n if model_name not in (\"r3m_50\", \"r3m_34\", \"r3m_18\"):\n", "issue": "[Feature Request] Create a tensorspec for the R3M output\n## Motivation\r\n\r\nR3M (torchrl.envs.transforms.r3m) is a generic transform layer that will embed an image in a tensor, easier to work with using a policy or value function.\r\nIn TorchRL, each env has am `observation_spec` attribute that contains `tensor_spec` that describe what the observations are (they behave like dictionaries, and all of their keys must start with `\"next_\"`).\r\n\r\nHere is how the observation_spec should behave:\r\n1. We should add one TensorSpec for the output of R3M (the vector). It should have the size that is expected given the resnet used (i.e. an unbounded continuous tensorspec of size 2048 IIRC for resnet50).\r\n2. We should delete the tensorspec of the pixels if it disappears (which will happen unless `tensor_pixels_keys` is set). e.g. when doing `R3MTransform(\"resnet50\", keys_in=[\"next_pixels\"])`, we should not see the \"next_pixels\" key in the `observation_spec` of the TransformedEnv once R3M has been applied since this key-value pair has disappeared. Instead, we should see the spec of \"next_r3m_vec\".\r\n3. If the value of \"next_pixels\" is kept somewhere (using `tensor_pixels_keys`) we should duplicate the value of the `\"next_pixels\"`'s TensorSpec onto the key(s) indicated by `tensor_pixels_keys`.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import List, Optional, Union\n\nimport torch\nfrom torch.hub import load_state_dict_from_url\nfrom torch.nn import Identity\n\nfrom torchrl.data import TensorDict, DEVICE_TYPING\nfrom torchrl.envs.transforms import (\n ToTensorImage,\n Compose,\n ObservationNorm,\n Resize,\n Transform,\n CatTensors,\n FlattenObservation,\n UnsqueezeTransform,\n)\n\ntry:\n from torchvision import models\n\n _has_tv = True\nexcept ImportError:\n _has_tv = False\n\n__all__ = [\"R3MTransform\"]\n\n\nclass _R3MNet(Transform):\n\n inplace = False\n\n def __init__(self, in_keys, out_keys, model_name, del_keys: bool = True):\n if not _has_tv:\n raise ImportError(\n \"Tried to instantiate R3M without torchvision. Make sure you have \"\n \"torchvision installed in your environment.\"\n )\n if model_name == \"resnet18\":\n self.model_name = \"r3m_18\"\n self.outdim = 512\n convnet = models.resnet18(pretrained=False)\n elif model_name == \"resnet34\":\n self.model_name = \"r3m_34\"\n self.outdim = 512\n convnet = models.resnet34(pretrained=False)\n elif model_name == \"resnet50\":\n self.model_name = \"r3m_50\"\n self.outdim = 2048\n convnet = models.resnet50(pretrained=False)\n else:\n raise NotImplementedError(\n f\"model {model_name} is currently not supported by R3M\"\n )\n convnet.fc = Identity()\n super().__init__(keys_in=in_keys, keys_out=out_keys)\n self.convnet = convnet\n self.del_keys = del_keys\n\n def _call(self, tensordict):\n tensordict_view = tensordict.view(-1)\n super()._call(tensordict_view)\n if self.del_keys:\n tensordict.exclude(*self.keys_in, inplace=True)\n return tensordict\n\n @torch.no_grad()\n def _apply_transform(self, obs: torch.Tensor) -> None:\n shape = None\n if obs.ndimension() > 4:\n shape = obs.shape[:-3]\n obs = obs.flatten(0, -4)\n out = self.convnet(obs)\n if shape is not None:\n out = out.view(*shape, *out.shape[1:])\n return out\n\n @staticmethod\n def _load_weights(model_name, r3m_instance, dir_prefix):\n if model_name not in (\"r3m_50\", \"r3m_34\", \"r3m_18\"):\n raise ValueError(\n \"model_name should be one of 'r3m_50', 'r3m_34' or 'r3m_18'\"\n )\n # url = \"https://download.pytorch.org/models/rl/r3m/\" + model_name\n url = \"https://pytorch.s3.amazonaws.com/models/rl/r3m/\" + model_name + \".pt\"\n d = load_state_dict_from_url(\n url,\n progress=True,\n map_location=next(r3m_instance.parameters()).device,\n model_dir=dir_prefix,\n )\n td = TensorDict(d[\"r3m\"], []).unflatten_keys(\".\")\n td_flatten = td[\"module\"][\"convnet\"].flatten_keys(\".\")\n state_dict = td_flatten.to_dict()\n r3m_instance.convnet.load_state_dict(state_dict)\n\n def load_weights(self, dir_prefix=None):\n self._load_weights(self.model_name, self, dir_prefix)\n\n\ndef _init_first(fun):\n def new_fun(self, *args, **kwargs):\n if not self.initialized:\n self._init()\n return fun(self, *args, **kwargs)\n\n return new_fun\n\n\nclass R3MTransform(Compose):\n \"\"\"R3M Transform class.\n\n R3M provides pre-trained ResNet weights aimed at facilitating visual\n embedding for robotic tasks. The models are trained using Ego4d.\n See the paper:\n R3M: A Universal Visual Representation for Robot Manipulation (Suraj Nair,\n Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, Abhinav Gupta)\n https://arxiv.org/abs/2203.12601\n\n Args:\n model_name (str): one of resnet50, resnet34 or resnet18\n keys_in (list of str, optional): list of input keys. If left empty, the\n \"next_pixels\" key is assumed.\n keys_out (list of str, optional): list of output keys. If left empty,\n \"next_r3m_vec\" is assumed.\n size (int, optional): Size of the image to feed to resnet.\n Defaults to 244.\n download (bool, optional): if True, the weights will be downloaded using\n the torch.hub download API (i.e. weights will be cached for future use).\n Defaults to False.\n download_path (str, optional): path where to download the models.\n Default is None (cache path determined by torch.hub utils).\n tensor_pixels_keys (list of str, optional): Optionally, one can keep the\n original images (as collected from the env) in the output tensordict.\n If no value is provided, this won't be collected.\n \"\"\"\n\n @classmethod\n def __new__(cls, *args, **kwargs):\n cls._is_3d = None\n cls.initialized = False\n cls._device = None\n cls._dtype = None\n return super().__new__(cls)\n\n def __init__(\n self,\n model_name: str,\n keys_in: List[str] = None,\n keys_out: List[str] = None,\n size: int = 244,\n stack_images: bool = True,\n download: bool = False,\n download_path: Optional[str] = None,\n tensor_pixels_keys: List[str] = None,\n ):\n super().__init__()\n self.keys_in = keys_in\n self.download = download\n self.download_path = download_path\n self.model_name = model_name\n self.keys_out = keys_out\n self.size = size\n self.stack_images = stack_images\n self.tensor_pixels_keys = tensor_pixels_keys\n\n def _init(self):\n keys_in = self.keys_in\n model_name = self.model_name\n keys_out = self.keys_out\n size = self.size\n stack_images = self.stack_images\n tensor_pixels_keys = self.tensor_pixels_keys\n\n # ToTensor\n transforms = []\n if tensor_pixels_keys:\n for i in range(len(keys_in)):\n transforms.append(\n CatTensors(\n keys_in=[keys_in[i]],\n out_key=tensor_pixels_keys[i],\n del_keys=False,\n )\n )\n\n totensor = ToTensorImage(\n unsqueeze=False,\n keys_in=keys_in,\n )\n transforms.append(totensor)\n\n # Normalize\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n normalize = ObservationNorm(\n keys_in=keys_in,\n loc=torch.tensor(mean).view(3, 1, 1),\n scale=torch.tensor(std).view(3, 1, 1),\n standard_normal=True,\n )\n transforms.append(normalize)\n\n # Resize: note that resize is a no-op if the tensor has the desired size already\n resize = Resize(size, size, keys_in=keys_in)\n transforms.append(resize)\n\n # R3M\n if keys_out is None:\n if stack_images:\n keys_out = [\"next_r3m_vec\"]\n else:\n keys_out = [f\"next_r3m_vec_{i}\" for i in range(len(keys_in))]\n elif stack_images and len(keys_out) != 1:\n raise ValueError(\n f\"key_out must be of length 1 if stack_images is True. Got keys_out={keys_out}\"\n )\n elif not stack_images and len(keys_out) != len(keys_in):\n raise ValueError(\n \"key_out must be of length equal to keys_in if stack_images is False.\"\n )\n\n if stack_images and len(keys_in) > 1:\n if self.is_3d:\n unsqueeze = UnsqueezeTransform(\n keys_in=keys_in,\n keys_out=keys_in,\n unsqueeze_dim=-4,\n )\n transforms.append(unsqueeze)\n\n cattensors = CatTensors(\n keys_in,\n keys_out[0],\n dim=-4,\n )\n network = _R3MNet(\n in_keys=keys_out,\n out_keys=keys_out,\n model_name=model_name,\n del_keys=False,\n )\n flatten = FlattenObservation(-2, -1, keys_out)\n transforms = [*transforms, cattensors, network, flatten]\n else:\n network = _R3MNet(\n in_keys=keys_in,\n out_keys=keys_out,\n model_name=model_name,\n del_keys=True,\n )\n transforms = [*transforms, normalize, network]\n\n for transform in transforms:\n self.append(transform)\n if self.download:\n self[-1].load_weights(dir_prefix=self.download_path)\n self.initialized = True\n\n if self._device is not None:\n self.to(self._device)\n if self._dtype is not None:\n self.to(self._dtype)\n\n @property\n def is_3d(self):\n if self._is_3d is None:\n parent = self.parent\n for key in parent.observation_spec.keys():\n self._is_3d = len(parent.observation_spec[key].shape) == 3\n break\n return self._is_3d\n\n def to(self, dest: Union[DEVICE_TYPING, torch.dtype]):\n if isinstance(dest, torch.dtype):\n self._dtype = dest\n else:\n self._device = dest\n return super().to(dest)\n\n @property\n def device(self):\n return self._device\n\n @property\n def dtype(self):\n return self._dtype\n\n forward = _init_first(Compose.forward)\n transform_action_spec = _init_first(Compose.transform_action_spec)\n transform_observation_spec = _init_first(Compose.transform_observation_spec)\n transform_input_spec = _init_first(Compose.transform_input_spec)\n transform_reward_spec = _init_first(Compose.transform_reward_spec)\n reset = _init_first(Compose.reset)\n init = _init_first(Compose.init)\n", "path": "torchrl/envs/transforms/r3m.py"}], "after_files": [{"content": "from typing import List, Optional, Union\n\nimport torch\nfrom torch.hub import load_state_dict_from_url\nfrom torch.nn import Identity\n\nfrom torchrl.data import TensorDict, DEVICE_TYPING\nfrom torchrl.data.tensor_specs import (\n TensorSpec,\n CompositeSpec,\n NdUnboundedContinuousTensorSpec,\n)\nfrom torchrl.envs.transforms import (\n ToTensorImage,\n Compose,\n ObservationNorm,\n Resize,\n Transform,\n CatTensors,\n FlattenObservation,\n UnsqueezeTransform,\n)\n\ntry:\n from torchvision import models\n\n _has_tv = True\nexcept ImportError:\n _has_tv = False\n\n__all__ = [\"R3MTransform\"]\n\n\nclass _R3MNet(Transform):\n\n inplace = False\n\n def __init__(self, in_keys, out_keys, model_name, del_keys: bool = True):\n if not _has_tv:\n raise ImportError(\n \"Tried to instantiate R3M without torchvision. Make sure you have \"\n \"torchvision installed in your environment.\"\n )\n if model_name == \"resnet18\":\n self.model_name = \"r3m_18\"\n self.outdim = 512\n convnet = models.resnet18(pretrained=False)\n elif model_name == \"resnet34\":\n self.model_name = \"r3m_34\"\n self.outdim = 512\n convnet = models.resnet34(pretrained=False)\n elif model_name == \"resnet50\":\n self.model_name = \"r3m_50\"\n self.outdim = 2048\n convnet = models.resnet50(pretrained=False)\n else:\n raise NotImplementedError(\n f\"model {model_name} is currently not supported by R3M\"\n )\n convnet.fc = Identity()\n super().__init__(keys_in=in_keys, keys_out=out_keys)\n self.convnet = convnet\n self.del_keys = del_keys\n\n def _call(self, tensordict):\n tensordict_view = tensordict.view(-1)\n super()._call(tensordict_view)\n if self.del_keys:\n tensordict.exclude(*self.keys_in, inplace=True)\n return tensordict\n\n @torch.no_grad()\n def _apply_transform(self, obs: torch.Tensor) -> None:\n shape = None\n if obs.ndimension() > 4:\n shape = obs.shape[:-3]\n obs = obs.flatten(0, -4)\n out = self.convnet(obs)\n if shape is not None:\n out = out.view(*shape, *out.shape[1:])\n return out\n\n def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec:\n if not isinstance(observation_spec, CompositeSpec):\n raise ValueError(\"_R3MNet can only infer CompositeSpec\")\n\n keys = [key for key in observation_spec._specs.keys() if key in self.keys_in]\n device = observation_spec[keys[0]].device\n\n if self.del_keys:\n for key_in in keys:\n del observation_spec[key_in]\n\n for key_out in self.keys_out:\n observation_spec[key_out] = NdUnboundedContinuousTensorSpec(\n shape=torch.Size([self.outdim]), device=device\n )\n\n return observation_spec\n\n @staticmethod\n def _load_weights(model_name, r3m_instance, dir_prefix):\n if model_name not in (\"r3m_50\", \"r3m_34\", \"r3m_18\"):\n raise ValueError(\n \"model_name should be one of 'r3m_50', 'r3m_34' or 'r3m_18'\"\n )\n # url = \"https://download.pytorch.org/models/rl/r3m/\" + model_name\n url = \"https://pytorch.s3.amazonaws.com/models/rl/r3m/\" + model_name + \".pt\"\n d = load_state_dict_from_url(\n url,\n progress=True,\n map_location=next(r3m_instance.parameters()).device,\n model_dir=dir_prefix,\n )\n td = TensorDict(d[\"r3m\"], []).unflatten_keys(\".\")\n td_flatten = td[\"module\"][\"convnet\"].flatten_keys(\".\")\n state_dict = td_flatten.to_dict()\n r3m_instance.convnet.load_state_dict(state_dict)\n\n def load_weights(self, dir_prefix=None):\n self._load_weights(self.model_name, self, dir_prefix)\n\n\ndef _init_first(fun):\n def new_fun(self, *args, **kwargs):\n if not self.initialized:\n self._init()\n return fun(self, *args, **kwargs)\n\n return new_fun\n\n\nclass R3MTransform(Compose):\n \"\"\"R3M Transform class.\n\n R3M provides pre-trained ResNet weights aimed at facilitating visual\n embedding for robotic tasks. The models are trained using Ego4d.\n See the paper:\n R3M: A Universal Visual Representation for Robot Manipulation (Suraj Nair,\n Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, Abhinav Gupta)\n https://arxiv.org/abs/2203.12601\n\n Args:\n model_name (str): one of resnet50, resnet34 or resnet18\n keys_in (list of str, optional): list of input keys. If left empty, the\n \"next_pixels\" key is assumed.\n keys_out (list of str, optional): list of output keys. If left empty,\n \"next_r3m_vec\" is assumed.\n size (int, optional): Size of the image to feed to resnet.\n Defaults to 244.\n download (bool, optional): if True, the weights will be downloaded using\n the torch.hub download API (i.e. weights will be cached for future use).\n Defaults to False.\n download_path (str, optional): path where to download the models.\n Default is None (cache path determined by torch.hub utils).\n tensor_pixels_keys (list of str, optional): Optionally, one can keep the\n original images (as collected from the env) in the output tensordict.\n If no value is provided, this won't be collected.\n \"\"\"\n\n @classmethod\n def __new__(cls, *args, **kwargs):\n cls._is_3d = None\n cls.initialized = False\n cls._device = None\n cls._dtype = None\n return super().__new__(cls)\n\n def __init__(\n self,\n model_name: str,\n keys_in: List[str] = None,\n keys_out: List[str] = None,\n size: int = 244,\n stack_images: bool = True,\n download: bool = False,\n download_path: Optional[str] = None,\n tensor_pixels_keys: List[str] = None,\n ):\n super().__init__()\n self.keys_in = keys_in\n self.download = download\n self.download_path = download_path\n self.model_name = model_name\n self.keys_out = keys_out\n self.size = size\n self.stack_images = stack_images\n self.tensor_pixels_keys = tensor_pixels_keys\n\n def _init(self):\n keys_in = self.keys_in\n model_name = self.model_name\n keys_out = self.keys_out\n size = self.size\n stack_images = self.stack_images\n tensor_pixels_keys = self.tensor_pixels_keys\n\n # ToTensor\n transforms = []\n if tensor_pixels_keys:\n for i in range(len(keys_in)):\n transforms.append(\n CatTensors(\n keys_in=[keys_in[i]],\n out_key=tensor_pixels_keys[i],\n del_keys=False,\n )\n )\n\n totensor = ToTensorImage(\n unsqueeze=False,\n keys_in=keys_in,\n )\n transforms.append(totensor)\n\n # Normalize\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n normalize = ObservationNorm(\n keys_in=keys_in,\n loc=torch.tensor(mean).view(3, 1, 1),\n scale=torch.tensor(std).view(3, 1, 1),\n standard_normal=True,\n )\n transforms.append(normalize)\n\n # Resize: note that resize is a no-op if the tensor has the desired size already\n resize = Resize(size, size, keys_in=keys_in)\n transforms.append(resize)\n\n # R3M\n if keys_out is None:\n if stack_images:\n keys_out = [\"next_r3m_vec\"]\n else:\n keys_out = [f\"next_r3m_vec_{i}\" for i in range(len(keys_in))]\n elif stack_images and len(keys_out) != 1:\n raise ValueError(\n f\"key_out must be of length 1 if stack_images is True. Got keys_out={keys_out}\"\n )\n elif not stack_images and len(keys_out) != len(keys_in):\n raise ValueError(\n \"key_out must be of length equal to keys_in if stack_images is False.\"\n )\n\n if stack_images and len(keys_in) > 1:\n if self.is_3d:\n unsqueeze = UnsqueezeTransform(\n keys_in=keys_in,\n keys_out=keys_in,\n unsqueeze_dim=-4,\n )\n transforms.append(unsqueeze)\n\n cattensors = CatTensors(\n keys_in,\n keys_out[0],\n dim=-4,\n )\n network = _R3MNet(\n in_keys=keys_out,\n out_keys=keys_out,\n model_name=model_name,\n del_keys=False,\n )\n flatten = FlattenObservation(-2, -1, keys_out)\n transforms = [*transforms, cattensors, network, flatten]\n else:\n network = _R3MNet(\n in_keys=keys_in,\n out_keys=keys_out,\n model_name=model_name,\n del_keys=True,\n )\n transforms = [*transforms, normalize, network]\n\n for transform in transforms:\n self.append(transform)\n if self.download:\n self[-1].load_weights(dir_prefix=self.download_path)\n self.initialized = True\n\n if self._device is not None:\n self.to(self._device)\n if self._dtype is not None:\n self.to(self._dtype)\n\n @property\n def is_3d(self):\n if self._is_3d is None:\n parent = self.parent\n for key in parent.observation_spec.keys():\n self._is_3d = len(parent.observation_spec[key].shape) == 3\n break\n return self._is_3d\n\n def to(self, dest: Union[DEVICE_TYPING, torch.dtype]):\n if isinstance(dest, torch.dtype):\n self._dtype = dest\n else:\n self._device = dest\n return super().to(dest)\n\n @property\n def device(self):\n return self._device\n\n @property\n def dtype(self):\n return self._dtype\n\n forward = _init_first(Compose.forward)\n transform_action_spec = _init_first(Compose.transform_action_spec)\n transform_observation_spec = _init_first(Compose.transform_observation_spec)\n transform_input_spec = _init_first(Compose.transform_input_spec)\n transform_reward_spec = _init_first(Compose.transform_reward_spec)\n reset = _init_first(Compose.reset)\n init = _init_first(Compose.init)\n", "path": "torchrl/envs/transforms/r3m.py"}]}
3,728
368
gh_patches_debug_32827
rasdani/github-patches
git_diff
python-gitlab__python-gitlab-623
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Handle missing config file ## Description of the problem, including code/CLI snippet When '~/.python-gitlab.cfg' is missing, an full traceback is printed ## Expected Behavior Create a default '~/.python-gitlab.cfg' file and print "The tool is not configured yet. Edit ~/.python-gitlab.cfg and run the command again" ## Actual Behavior See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=912537 ## Specifications - python-gitlab version: 1.6.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gitlab/cli.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU Lesser General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU Lesser General Public License for more details. 15 # 16 # You should have received a copy of the GNU Lesser General Public License 17 # along with this program. If not, see <http://www.gnu.org/licenses/>. 18 19 from __future__ import print_function 20 import argparse 21 import functools 22 import importlib 23 import re 24 import sys 25 26 import gitlab.config 27 28 camel_re = re.compile('(.)([A-Z])') 29 30 # custom_actions = { 31 # cls: { 32 # action: (mandatory_args, optional_args, in_obj), 33 # }, 34 # } 35 custom_actions = {} 36 37 38 def register_custom_action(cls_names, mandatory=tuple(), optional=tuple()): 39 def wrap(f): 40 @functools.wraps(f) 41 def wrapped_f(*args, **kwargs): 42 return f(*args, **kwargs) 43 44 # in_obj defines whether the method belongs to the obj or the manager 45 in_obj = True 46 classes = cls_names 47 if type(cls_names) != tuple: 48 classes = (cls_names, ) 49 50 for cls_name in classes: 51 final_name = cls_name 52 if cls_name.endswith('Manager'): 53 final_name = cls_name.replace('Manager', '') 54 in_obj = False 55 if final_name not in custom_actions: 56 custom_actions[final_name] = {} 57 58 action = f.__name__.replace('_', '-') 59 custom_actions[final_name][action] = (mandatory, optional, in_obj) 60 61 return wrapped_f 62 return wrap 63 64 65 def die(msg, e=None): 66 if e: 67 msg = "%s (%s)" % (msg, e) 68 sys.stderr.write(msg + "\n") 69 sys.exit(1) 70 71 72 def what_to_cls(what): 73 return "".join([s.capitalize() for s in what.split("-")]) 74 75 76 def cls_to_what(cls): 77 return camel_re.sub(r'\1-\2', cls.__name__).lower() 78 79 80 def _get_base_parser(add_help=True): 81 parser = argparse.ArgumentParser( 82 add_help=add_help, 83 description="GitLab API Command Line Interface") 84 parser.add_argument("--version", help="Display the version.", 85 action="store_true") 86 parser.add_argument("-v", "--verbose", "--fancy", 87 help="Verbose mode (legacy format only)", 88 action="store_true") 89 parser.add_argument("-d", "--debug", 90 help="Debug mode (display HTTP requests)", 91 action="store_true") 92 parser.add_argument("-c", "--config-file", action='append', 93 help=("Configuration file to use. Can be used " 94 "multiple times.")) 95 parser.add_argument("-g", "--gitlab", 96 help=("Which configuration section should " 97 "be used. If not defined, the default selection " 98 "will be used."), 99 required=False) 100 parser.add_argument("-o", "--output", 101 help="Output format (v4 only): json|legacy|yaml", 102 required=False, 103 choices=['json', 'legacy', 'yaml'], 104 default="legacy") 105 parser.add_argument("-f", "--fields", 106 help=("Fields to display in the output (comma " 107 "separated). Not used with legacy output"), 108 required=False) 109 110 return parser 111 112 113 def _get_parser(cli_module): 114 parser = _get_base_parser() 115 return cli_module.extend_parser(parser) 116 117 118 def _parse_value(v): 119 if isinstance(v, str) and v.startswith('@'): 120 # If the user-provided value starts with @, we try to read the file 121 # path provided after @ as the real value. Exit on any error. 122 try: 123 with open(v[1:]) as fl: 124 return fl.read() 125 except Exception as e: 126 sys.stderr.write("%s\n" % e) 127 sys.exit(1) 128 129 return v 130 131 132 def main(): 133 if "--version" in sys.argv: 134 print(gitlab.__version__) 135 exit(0) 136 137 parser = _get_base_parser(add_help=False) 138 if "--help" in sys.argv or "-h" in sys.argv: 139 parser.print_help() 140 exit(0) 141 142 # This first parsing step is used to find the gitlab config to use, and 143 # load the propermodule (v3 or v4) accordingly. At that point we don't have 144 # any subparser setup 145 (options, args) = parser.parse_known_args(sys.argv) 146 147 config = gitlab.config.GitlabConfigParser(options.gitlab, 148 options.config_file) 149 cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version) 150 151 # Now we build the entire set of subcommands and do the complete parsing 152 parser = _get_parser(cli_module) 153 args = parser.parse_args(sys.argv[1:]) 154 155 config_files = args.config_file 156 gitlab_id = args.gitlab 157 verbose = args.verbose 158 output = args.output 159 fields = [] 160 if args.fields: 161 fields = [x.strip() for x in args.fields.split(',')] 162 debug = args.debug 163 action = args.action 164 what = args.what 165 166 args = args.__dict__ 167 # Remove CLI behavior-related args 168 for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action', 169 'version', 'output'): 170 args.pop(item) 171 args = {k: _parse_value(v) for k, v in args.items() if v is not None} 172 173 try: 174 gl = gitlab.Gitlab.from_config(gitlab_id, config_files) 175 if gl.private_token or gl.oauth_token: 176 gl.auth() 177 except Exception as e: 178 die(str(e)) 179 180 if debug: 181 gl.enable_debug() 182 183 cli_module.run(gl, what, action, args, verbose, output, fields) 184 185 sys.exit(0) 186 ``` Path: `gitlab/config.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]> 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU Lesser General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU Lesser General Public License for more details. 14 # 15 # You should have received a copy of the GNU Lesser General Public License 16 # along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 import os 19 20 from six.moves import configparser 21 22 _DEFAULT_FILES = [ 23 '/etc/python-gitlab.cfg', 24 os.path.expanduser('~/.python-gitlab.cfg') 25 ] 26 27 28 class ConfigError(Exception): 29 pass 30 31 32 class GitlabIDError(ConfigError): 33 pass 34 35 36 class GitlabDataError(ConfigError): 37 pass 38 39 40 class GitlabConfigParser(object): 41 def __init__(self, gitlab_id=None, config_files=None): 42 self.gitlab_id = gitlab_id 43 _files = config_files or _DEFAULT_FILES 44 self._config = configparser.ConfigParser() 45 self._config.read(_files) 46 47 if self.gitlab_id is None: 48 try: 49 self.gitlab_id = self._config.get('global', 'default') 50 except Exception: 51 raise GitlabIDError("Impossible to get the gitlab id " 52 "(not specified in config file)") 53 54 try: 55 self.url = self._config.get(self.gitlab_id, 'url') 56 except Exception: 57 raise GitlabDataError("Impossible to get gitlab informations from " 58 "configuration (%s)" % self.gitlab_id) 59 60 self.ssl_verify = True 61 try: 62 self.ssl_verify = self._config.getboolean('global', 'ssl_verify') 63 except ValueError: 64 # Value Error means the option exists but isn't a boolean. 65 # Get as a string instead as it should then be a local path to a 66 # CA bundle. 67 try: 68 self.ssl_verify = self._config.get('global', 'ssl_verify') 69 except Exception: 70 pass 71 except Exception: 72 pass 73 try: 74 self.ssl_verify = self._config.getboolean(self.gitlab_id, 75 'ssl_verify') 76 except ValueError: 77 # Value Error means the option exists but isn't a boolean. 78 # Get as a string instead as it should then be a local path to a 79 # CA bundle. 80 try: 81 self.ssl_verify = self._config.get(self.gitlab_id, 82 'ssl_verify') 83 except Exception: 84 pass 85 except Exception: 86 pass 87 88 self.timeout = 60 89 try: 90 self.timeout = self._config.getint('global', 'timeout') 91 except Exception: 92 pass 93 try: 94 self.timeout = self._config.getint(self.gitlab_id, 'timeout') 95 except Exception: 96 pass 97 98 self.private_token = None 99 try: 100 self.private_token = self._config.get(self.gitlab_id, 101 'private_token') 102 except Exception: 103 pass 104 105 self.oauth_token = None 106 try: 107 self.oauth_token = self._config.get(self.gitlab_id, 'oauth_token') 108 except Exception: 109 pass 110 111 self.http_username = None 112 self.http_password = None 113 try: 114 self.http_username = self._config.get(self.gitlab_id, 115 'http_username') 116 self.http_password = self._config.get(self.gitlab_id, 117 'http_password') 118 except Exception: 119 pass 120 121 self.http_username = None 122 self.http_password = None 123 try: 124 self.http_username = self._config.get(self.gitlab_id, 125 'http_username') 126 self.http_password = self._config.get(self.gitlab_id, 127 'http_password') 128 except Exception: 129 pass 130 131 self.api_version = '4' 132 try: 133 self.api_version = self._config.get('global', 'api_version') 134 except Exception: 135 pass 136 try: 137 self.api_version = self._config.get(self.gitlab_id, 'api_version') 138 except Exception: 139 pass 140 if self.api_version not in ('4',): 141 raise GitlabDataError("Unsupported API version: %s" % 142 self.api_version) 143 144 self.per_page = None 145 for section in ['global', self.gitlab_id]: 146 try: 147 self.per_page = self._config.getint(section, 'per_page') 148 except Exception: 149 pass 150 if self.per_page is not None and not 0 <= self.per_page <= 100: 151 raise GitlabDataError("Unsupported per_page number: %s" % 152 self.per_page) 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gitlab/cli.py b/gitlab/cli.py --- a/gitlab/cli.py +++ b/gitlab/cli.py @@ -17,6 +17,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function + import argparse import functools import importlib @@ -143,9 +144,13 @@ # load the propermodule (v3 or v4) accordingly. At that point we don't have # any subparser setup (options, args) = parser.parse_known_args(sys.argv) - - config = gitlab.config.GitlabConfigParser(options.gitlab, - options.config_file) + try: + config = gitlab.config.GitlabConfigParser( + options.gitlab, + options.config_file + ) + except gitlab.config.ConfigError as e: + sys.exit(e) cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version) # Now we build the entire set of subcommands and do the complete parsing diff --git a/gitlab/config.py b/gitlab/config.py --- a/gitlab/config.py +++ b/gitlab/config.py @@ -37,10 +37,27 @@ pass +class GitlabConfigMissingError(ConfigError): + pass + + class GitlabConfigParser(object): def __init__(self, gitlab_id=None, config_files=None): self.gitlab_id = gitlab_id _files = config_files or _DEFAULT_FILES + file_exist = False + for file in _files: + if os.path.exists(file): + file_exist = True + if not file_exist: + raise GitlabConfigMissingError( + "Config file not found. \nPlease create one in " + "one of the following locations: {} \nor " + "specify a config file using the '-c' parameter.".format( + ", ".join(_DEFAULT_FILES) + ) + ) + self._config = configparser.ConfigParser() self._config.read(_files)
{"golden_diff": "diff --git a/gitlab/cli.py b/gitlab/cli.py\n--- a/gitlab/cli.py\n+++ b/gitlab/cli.py\n@@ -17,6 +17,7 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n \n from __future__ import print_function\n+\n import argparse\n import functools\n import importlib\n@@ -143,9 +144,13 @@\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n-\n- config = gitlab.config.GitlabConfigParser(options.gitlab,\n- options.config_file)\n+ try:\n+ config = gitlab.config.GitlabConfigParser(\n+ options.gitlab,\n+ options.config_file\n+ )\n+ except gitlab.config.ConfigError as e:\n+ sys.exit(e)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n \n # Now we build the entire set of subcommands and do the complete parsing\ndiff --git a/gitlab/config.py b/gitlab/config.py\n--- a/gitlab/config.py\n+++ b/gitlab/config.py\n@@ -37,10 +37,27 @@\n pass\n \n \n+class GitlabConfigMissingError(ConfigError):\n+ pass\n+\n+\n class GitlabConfigParser(object):\n def __init__(self, gitlab_id=None, config_files=None):\n self.gitlab_id = gitlab_id\n _files = config_files or _DEFAULT_FILES\n+ file_exist = False\n+ for file in _files:\n+ if os.path.exists(file):\n+ file_exist = True\n+ if not file_exist:\n+ raise GitlabConfigMissingError(\n+ \"Config file not found. \\nPlease create one in \"\n+ \"one of the following locations: {} \\nor \"\n+ \"specify a config file using the '-c' parameter.\".format(\n+ \", \".join(_DEFAULT_FILES)\n+ )\n+ )\n+\n self._config = configparser.ConfigParser()\n self._config.read(_files)\n", "issue": "Handle missing config file\n## Description of the problem, including code/CLI snippet\r\n\r\nWhen '~/.python-gitlab.cfg' is missing, an full traceback is printed\r\n\r\n## Expected Behavior\r\n\r\nCreate a default '~/.python-gitlab.cfg' file and print\r\n\"The tool is not configured yet. Edit ~/.python-gitlab.cfg and run the command again\"\r\n\r\n## Actual Behavior\r\n\r\nSee https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=912537\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 1.6.0\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import print_function\nimport argparse\nimport functools\nimport importlib\nimport re\nimport sys\n\nimport gitlab.config\n\ncamel_re = re.compile('(.)([A-Z])')\n\n# custom_actions = {\n# cls: {\n# action: (mandatory_args, optional_args, in_obj),\n# },\n# }\ncustom_actions = {}\n\n\ndef register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):\n def wrap(f):\n @functools.wraps(f)\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n\n # in_obj defines whether the method belongs to the obj or the manager\n in_obj = True\n classes = cls_names\n if type(cls_names) != tuple:\n classes = (cls_names, )\n\n for cls_name in classes:\n final_name = cls_name\n if cls_name.endswith('Manager'):\n final_name = cls_name.replace('Manager', '')\n in_obj = False\n if final_name not in custom_actions:\n custom_actions[final_name] = {}\n\n action = f.__name__.replace('_', '-')\n custom_actions[final_name][action] = (mandatory, optional, in_obj)\n\n return wrapped_f\n return wrap\n\n\ndef die(msg, e=None):\n if e:\n msg = \"%s (%s)\" % (msg, e)\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)\n\n\ndef what_to_cls(what):\n return \"\".join([s.capitalize() for s in what.split(\"-\")])\n\n\ndef cls_to_what(cls):\n return camel_re.sub(r'\\1-\\2', cls.__name__).lower()\n\n\ndef _get_base_parser(add_help=True):\n parser = argparse.ArgumentParser(\n add_help=add_help,\n description=\"GitLab API Command Line Interface\")\n parser.add_argument(\"--version\", help=\"Display the version.\",\n action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", \"--fancy\",\n help=\"Verbose mode (legacy format only)\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\",\n help=\"Debug mode (display HTTP requests)\",\n action=\"store_true\")\n parser.add_argument(\"-c\", \"--config-file\", action='append',\n help=(\"Configuration file to use. Can be used \"\n \"multiple times.\"))\n parser.add_argument(\"-g\", \"--gitlab\",\n help=(\"Which configuration section should \"\n \"be used. If not defined, the default selection \"\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n help=\"Output format (v4 only): json|legacy|yaml\",\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n parser.add_argument(\"-f\", \"--fields\",\n help=(\"Fields to display in the output (comma \"\n \"separated). Not used with legacy output\"),\n required=False)\n\n return parser\n\n\ndef _get_parser(cli_module):\n parser = _get_base_parser()\n return cli_module.extend_parser(parser)\n\n\ndef _parse_value(v):\n if isinstance(v, str) and v.startswith('@'):\n # If the user-provided value starts with @, we try to read the file\n # path provided after @ as the real value. Exit on any error.\n try:\n with open(v[1:]) as fl:\n return fl.read()\n except Exception as e:\n sys.stderr.write(\"%s\\n\" % e)\n sys.exit(1)\n\n return v\n\n\ndef main():\n if \"--version\" in sys.argv:\n print(gitlab.__version__)\n exit(0)\n\n parser = _get_base_parser(add_help=False)\n if \"--help\" in sys.argv or \"-h\" in sys.argv:\n parser.print_help()\n exit(0)\n\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n\n config = gitlab.config.GitlabConfigParser(options.gitlab,\n options.config_file)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n\n # Now we build the entire set of subcommands and do the complete parsing\n parser = _get_parser(cli_module)\n args = parser.parse_args(sys.argv[1:])\n\n config_files = args.config_file\n gitlab_id = args.gitlab\n verbose = args.verbose\n output = args.output\n fields = []\n if args.fields:\n fields = [x.strip() for x in args.fields.split(',')]\n debug = args.debug\n action = args.action\n what = args.what\n\n args = args.__dict__\n # Remove CLI behavior-related args\n for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',\n 'version', 'output'):\n args.pop(item)\n args = {k: _parse_value(v) for k, v in args.items() if v is not None}\n\n try:\n gl = gitlab.Gitlab.from_config(gitlab_id, config_files)\n if gl.private_token or gl.oauth_token:\n gl.auth()\n except Exception as e:\n die(str(e))\n\n if debug:\n gl.enable_debug()\n\n cli_module.run(gl, what, action, args, verbose, output, fields)\n\n sys.exit(0)\n", "path": "gitlab/cli.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\n\nfrom six.moves import configparser\n\n_DEFAULT_FILES = [\n '/etc/python-gitlab.cfg',\n os.path.expanduser('~/.python-gitlab.cfg')\n]\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass GitlabIDError(ConfigError):\n pass\n\n\nclass GitlabDataError(ConfigError):\n pass\n\n\nclass GitlabConfigParser(object):\n def __init__(self, gitlab_id=None, config_files=None):\n self.gitlab_id = gitlab_id\n _files = config_files or _DEFAULT_FILES\n self._config = configparser.ConfigParser()\n self._config.read(_files)\n\n if self.gitlab_id is None:\n try:\n self.gitlab_id = self._config.get('global', 'default')\n except Exception:\n raise GitlabIDError(\"Impossible to get the gitlab id \"\n \"(not specified in config file)\")\n\n try:\n self.url = self._config.get(self.gitlab_id, 'url')\n except Exception:\n raise GitlabDataError(\"Impossible to get gitlab informations from \"\n \"configuration (%s)\" % self.gitlab_id)\n\n self.ssl_verify = True\n try:\n self.ssl_verify = self._config.getboolean('global', 'ssl_verify')\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get('global', 'ssl_verify')\n except Exception:\n pass\n except Exception:\n pass\n try:\n self.ssl_verify = self._config.getboolean(self.gitlab_id,\n 'ssl_verify')\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get(self.gitlab_id,\n 'ssl_verify')\n except Exception:\n pass\n except Exception:\n pass\n\n self.timeout = 60\n try:\n self.timeout = self._config.getint('global', 'timeout')\n except Exception:\n pass\n try:\n self.timeout = self._config.getint(self.gitlab_id, 'timeout')\n except Exception:\n pass\n\n self.private_token = None\n try:\n self.private_token = self._config.get(self.gitlab_id,\n 'private_token')\n except Exception:\n pass\n\n self.oauth_token = None\n try:\n self.oauth_token = self._config.get(self.gitlab_id, 'oauth_token')\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id,\n 'http_username')\n self.http_password = self._config.get(self.gitlab_id,\n 'http_password')\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id,\n 'http_username')\n self.http_password = self._config.get(self.gitlab_id,\n 'http_password')\n except Exception:\n pass\n\n self.api_version = '4'\n try:\n self.api_version = self._config.get('global', 'api_version')\n except Exception:\n pass\n try:\n self.api_version = self._config.get(self.gitlab_id, 'api_version')\n except Exception:\n pass\n if self.api_version not in ('4',):\n raise GitlabDataError(\"Unsupported API version: %s\" %\n self.api_version)\n\n self.per_page = None\n for section in ['global', self.gitlab_id]:\n try:\n self.per_page = self._config.getint(section, 'per_page')\n except Exception:\n pass\n if self.per_page is not None and not 0 <= self.per_page <= 100:\n raise GitlabDataError(\"Unsupported per_page number: %s\" %\n self.per_page)\n", "path": "gitlab/config.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import print_function\n\nimport argparse\nimport functools\nimport importlib\nimport re\nimport sys\n\nimport gitlab.config\n\ncamel_re = re.compile('(.)([A-Z])')\n\n# custom_actions = {\n# cls: {\n# action: (mandatory_args, optional_args, in_obj),\n# },\n# }\ncustom_actions = {}\n\n\ndef register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):\n def wrap(f):\n @functools.wraps(f)\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n\n # in_obj defines whether the method belongs to the obj or the manager\n in_obj = True\n classes = cls_names\n if type(cls_names) != tuple:\n classes = (cls_names, )\n\n for cls_name in classes:\n final_name = cls_name\n if cls_name.endswith('Manager'):\n final_name = cls_name.replace('Manager', '')\n in_obj = False\n if final_name not in custom_actions:\n custom_actions[final_name] = {}\n\n action = f.__name__.replace('_', '-')\n custom_actions[final_name][action] = (mandatory, optional, in_obj)\n\n return wrapped_f\n return wrap\n\n\ndef die(msg, e=None):\n if e:\n msg = \"%s (%s)\" % (msg, e)\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)\n\n\ndef what_to_cls(what):\n return \"\".join([s.capitalize() for s in what.split(\"-\")])\n\n\ndef cls_to_what(cls):\n return camel_re.sub(r'\\1-\\2', cls.__name__).lower()\n\n\ndef _get_base_parser(add_help=True):\n parser = argparse.ArgumentParser(\n add_help=add_help,\n description=\"GitLab API Command Line Interface\")\n parser.add_argument(\"--version\", help=\"Display the version.\",\n action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", \"--fancy\",\n help=\"Verbose mode (legacy format only)\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\",\n help=\"Debug mode (display HTTP requests)\",\n action=\"store_true\")\n parser.add_argument(\"-c\", \"--config-file\", action='append',\n help=(\"Configuration file to use. Can be used \"\n \"multiple times.\"))\n parser.add_argument(\"-g\", \"--gitlab\",\n help=(\"Which configuration section should \"\n \"be used. If not defined, the default selection \"\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n help=\"Output format (v4 only): json|legacy|yaml\",\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n parser.add_argument(\"-f\", \"--fields\",\n help=(\"Fields to display in the output (comma \"\n \"separated). Not used with legacy output\"),\n required=False)\n\n return parser\n\n\ndef _get_parser(cli_module):\n parser = _get_base_parser()\n return cli_module.extend_parser(parser)\n\n\ndef _parse_value(v):\n if isinstance(v, str) and v.startswith('@'):\n # If the user-provided value starts with @, we try to read the file\n # path provided after @ as the real value. Exit on any error.\n try:\n with open(v[1:]) as fl:\n return fl.read()\n except Exception as e:\n sys.stderr.write(\"%s\\n\" % e)\n sys.exit(1)\n\n return v\n\n\ndef main():\n if \"--version\" in sys.argv:\n print(gitlab.__version__)\n exit(0)\n\n parser = _get_base_parser(add_help=False)\n if \"--help\" in sys.argv or \"-h\" in sys.argv:\n parser.print_help()\n exit(0)\n\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n try:\n config = gitlab.config.GitlabConfigParser(\n options.gitlab,\n options.config_file\n )\n except gitlab.config.ConfigError as e:\n sys.exit(e)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n\n # Now we build the entire set of subcommands and do the complete parsing\n parser = _get_parser(cli_module)\n args = parser.parse_args(sys.argv[1:])\n\n config_files = args.config_file\n gitlab_id = args.gitlab\n verbose = args.verbose\n output = args.output\n fields = []\n if args.fields:\n fields = [x.strip() for x in args.fields.split(',')]\n debug = args.debug\n action = args.action\n what = args.what\n\n args = args.__dict__\n # Remove CLI behavior-related args\n for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',\n 'version', 'output'):\n args.pop(item)\n args = {k: _parse_value(v) for k, v in args.items() if v is not None}\n\n try:\n gl = gitlab.Gitlab.from_config(gitlab_id, config_files)\n if gl.private_token or gl.oauth_token:\n gl.auth()\n except Exception as e:\n die(str(e))\n\n if debug:\n gl.enable_debug()\n\n cli_module.run(gl, what, action, args, verbose, output, fields)\n\n sys.exit(0)\n", "path": "gitlab/cli.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\n\nfrom six.moves import configparser\n\n_DEFAULT_FILES = [\n '/etc/python-gitlab.cfg',\n os.path.expanduser('~/.python-gitlab.cfg')\n]\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass GitlabIDError(ConfigError):\n pass\n\n\nclass GitlabDataError(ConfigError):\n pass\n\n\nclass GitlabConfigMissingError(ConfigError):\n pass\n\n\nclass GitlabConfigParser(object):\n def __init__(self, gitlab_id=None, config_files=None):\n self.gitlab_id = gitlab_id\n _files = config_files or _DEFAULT_FILES\n file_exist = False\n for file in _files:\n if os.path.exists(file):\n file_exist = True\n if not file_exist:\n raise GitlabConfigMissingError(\n \"Config file not found. \\nPlease create one in \"\n \"one of the following locations: {} \\nor \"\n \"specify a config file using the '-c' parameter.\".format(\n \", \".join(_DEFAULT_FILES)\n )\n )\n\n self._config = configparser.ConfigParser()\n self._config.read(_files)\n\n if self.gitlab_id is None:\n try:\n self.gitlab_id = self._config.get('global', 'default')\n except Exception:\n raise GitlabIDError(\"Impossible to get the gitlab id \"\n \"(not specified in config file)\")\n\n try:\n self.url = self._config.get(self.gitlab_id, 'url')\n except Exception:\n raise GitlabDataError(\"Impossible to get gitlab informations from \"\n \"configuration (%s)\" % self.gitlab_id)\n\n self.ssl_verify = True\n try:\n self.ssl_verify = self._config.getboolean('global', 'ssl_verify')\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get('global', 'ssl_verify')\n except Exception:\n pass\n except Exception:\n pass\n try:\n self.ssl_verify = self._config.getboolean(self.gitlab_id,\n 'ssl_verify')\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get(self.gitlab_id,\n 'ssl_verify')\n except Exception:\n pass\n except Exception:\n pass\n\n self.timeout = 60\n try:\n self.timeout = self._config.getint('global', 'timeout')\n except Exception:\n pass\n try:\n self.timeout = self._config.getint(self.gitlab_id, 'timeout')\n except Exception:\n pass\n\n self.private_token = None\n try:\n self.private_token = self._config.get(self.gitlab_id,\n 'private_token')\n except Exception:\n pass\n\n self.oauth_token = None\n try:\n self.oauth_token = self._config.get(self.gitlab_id, 'oauth_token')\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id,\n 'http_username')\n self.http_password = self._config.get(self.gitlab_id,\n 'http_password')\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id,\n 'http_username')\n self.http_password = self._config.get(self.gitlab_id,\n 'http_password')\n except Exception:\n pass\n\n self.api_version = '4'\n try:\n self.api_version = self._config.get('global', 'api_version')\n except Exception:\n pass\n try:\n self.api_version = self._config.get(self.gitlab_id, 'api_version')\n except Exception:\n pass\n if self.api_version not in ('4',):\n raise GitlabDataError(\"Unsupported API version: %s\" %\n self.api_version)\n\n self.per_page = None\n for section in ['global', self.gitlab_id]:\n try:\n self.per_page = self._config.getint(section, 'per_page')\n except Exception:\n pass\n if self.per_page is not None and not 0 <= self.per_page <= 100:\n raise GitlabDataError(\"Unsupported per_page number: %s\" %\n self.per_page)\n", "path": "gitlab/config.py"}]}
3,664
478
gh_patches_debug_7311
rasdani/github-patches
git_diff
freedomofpress__securedrop-4644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- replace "hidden service" occurrences ## Status ready for review ## Description of Changes Changes Proposed: - no longer refer to [Onion Services](https://2019.www.torproject.org/docs/onion-services.html.en) as hidden services; - there are NO new images I added, it's just text; - all changed content here is either just a comment (playbook, or shell script); - changelog was kept as is. ## Testing I followed the _(slightly outdated)_ [Documentation Guidelines](https://docs.securedrop.org/en/latest/development/documentation_guidelines.html), and all looked fine: ``` # make docs ``` Gave me the following: ``` ... | copying static files... done | copying extra files... done | dumping search index in English (code: en) ... done | dumping object inventory... done | build succeeded. +-------------------------------------------------------------------------------- [I 190725 16:16:16 server:296] Serving on http://127.0.0.1:8000 [I 190725 16:16:16 handlers:62] Start watching changes [I 190725 16:16:16 handlers:64] Start detecting changes ``` `make docs-linkcheck` returned an error, but that's not related to the changes made here. `docs-lint` ran just fine. ## Deployment Any special considerations for deployment? - AFAIK, no. ## Checklist ### If you made changes to the server application code: - [ ] Linting (`make lint`) and tests (`make -C securedrop test`) pass in the development container ### If you made changes to `securedrop-admin`: - [ ] Linting and tests (`make -C admin test`) pass in the admin development container ### If you made changes to the system configuration: - [ ] [Configuration tests](https://docs.securedrop.org/en/latest/development/testing_configuration_tests.html) pass ### If you made non-trivial code changes: - [ ] I have written a test plan and validated it for this PR ### If you made changes to documentation: - [x] Doc linting (`make docs-lint`) passed locally --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/roles/backup/files/0.3_collect.py` Content: ``` 1 #!/usr/bin/python2.7 2 """ 3 4 This script should be copied to the App server and ran by the anisble 5 plabook. When run (as root), it collects all of the necessary information 6 to backup the 0.3 system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.zip.gpg 7 8 """ 9 10 import sys 11 import os 12 import io 13 import zipfile 14 from datetime import datetime 15 # Import the application config.py file 16 sys.path.append("/var/www/securedrop") 17 import config # noqa: F403 18 import gnupg # noqa: F403 19 20 TOR_SERVICES = "/var/lib/tor/services" 21 TOR_CONFIG = "/etc/tor/torrc" 22 23 24 def collect_config_file(zf): 25 config_file_path = os.path.join(config.SECUREDROP_ROOT, "config.py") 26 zf.write(config_file_path) 27 28 29 def collect_securedrop_data_root(zf): 30 # The store and key dirs are shared between both interfaces 31 for root, dirs, files in os.walk(config.SECUREDROP_DATA_ROOT): 32 for name in files: 33 zf.write(os.path.join(root, name)) 34 35 36 def collect_custom_header_image(zf): 37 # The custom header image is copied over the deafult `static/i/logo.png`. 38 zf.write(os.path.join(config.SECUREDROP_ROOT, "static/i/logo.png")) 39 40 41 def collect_tor_files(zf): 42 # All of the tor hidden service private keys are stored in the THS specific 43 # subdirectory `/var/lib/tor/services` backing up this directory will back 44 # up all of the THS and ATHS required keys needed to restore all the hidden 45 # services on that system. 46 for root, dirs, files in os.walk(TOR_SERVICES): 47 for name in files: 48 zf.write(os.path.join(root, name)) 49 50 # The tor config file has the ATHS client names required to restore 51 # the ATHS info. These names are also in the the specific client_key file 52 # but backing up this file makes it easier than parsing the files during a 53 # restore. 54 zf.write(TOR_CONFIG) 55 56 57 def encrypt_zip_file(zf_fn): 58 # Encrypt the backup zip file with the application's gpg public key 59 gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR) 60 e_fn = '{}.gpg'.format(zf_fn) 61 62 stream = io.open(zf_fn, "rb") 63 gpg.encrypt_file(stream, config.JOURNALIST_KEY, always_trust='True', 64 output=e_fn) 65 66 67 def main(): 68 # name append a timestamp to the sd-backup zip filename 69 dt = str(datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) 70 zf_fn = 'sd-backup-{}.zip'.format(dt) 71 with zipfile.ZipFile(zf_fn, 'w') as zf: 72 collect_config_file(zf) 73 collect_securedrop_data_root(zf) 74 collect_custom_header_image(zf) 75 collect_tor_files(zf) 76 encrypt_zip_file(zf_fn) 77 print(zf_fn) 78 79 80 if __name__ == "__main__": 81 main() 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/roles/backup/files/0.3_collect.py b/install_files/ansible-base/roles/backup/files/0.3_collect.py --- a/install_files/ansible-base/roles/backup/files/0.3_collect.py +++ b/install_files/ansible-base/roles/backup/files/0.3_collect.py @@ -39,7 +39,7 @@ def collect_tor_files(zf): - # All of the tor hidden service private keys are stored in the THS specific + # All of the tor Onion Service private keys are stored in the THS specific # subdirectory `/var/lib/tor/services` backing up this directory will back # up all of the THS and ATHS required keys needed to restore all the hidden # services on that system.
{"golden_diff": "diff --git a/install_files/ansible-base/roles/backup/files/0.3_collect.py b/install_files/ansible-base/roles/backup/files/0.3_collect.py\n--- a/install_files/ansible-base/roles/backup/files/0.3_collect.py\n+++ b/install_files/ansible-base/roles/backup/files/0.3_collect.py\n@@ -39,7 +39,7 @@\n \n \n def collect_tor_files(zf):\n- # All of the tor hidden service private keys are stored in the THS specific\n+ # All of the tor Onion Service private keys are stored in the THS specific\n # subdirectory `/var/lib/tor/services` backing up this directory will back\n # up all of the THS and ATHS required keys needed to restore all the hidden\n # services on that system.\n", "issue": "replace \"hidden service\" occurrences\n## Status\r\n\r\nready for review\r\n\r\n## Description of Changes\r\n\r\nChanges Proposed:\r\n\r\n - no longer refer to [Onion Services](https://2019.www.torproject.org/docs/onion-services.html.en) as hidden services;\r\n - there are NO new images I added, it's just text;\r\n - all changed content here is either just a comment (playbook, or shell script);\r\n - changelog was kept as is.\r\n\r\n## Testing\r\n\r\nI followed the _(slightly outdated)_ [Documentation Guidelines](https://docs.securedrop.org/en/latest/development/documentation_guidelines.html), and all looked fine:\r\n\r\n```\r\n# make docs\r\n```\r\n\r\nGave me the following:\r\n\r\n```\r\n ...\r\n\r\n| copying static files... done\r\n| copying extra files... done\r\n| dumping search index in English (code: en) ... done\r\n| dumping object inventory... done\r\n| build succeeded.\r\n+--------------------------------------------------------------------------------\r\n\r\n[I 190725 16:16:16 server:296] Serving on http://127.0.0.1:8000\r\n[I 190725 16:16:16 handlers:62] Start watching changes\r\n[I 190725 16:16:16 handlers:64] Start detecting changes\r\n```\r\n`make docs-linkcheck` returned an error, but that's not related to the changes made here. `docs-lint` ran just fine.\r\n\r\n## Deployment\r\n\r\nAny special considerations for deployment?\r\n\r\n - AFAIK, no.\r\n\r\n## Checklist\r\n\r\n### If you made changes to the server application code:\r\n\r\n- [ ] Linting (`make lint`) and tests (`make -C securedrop test`) pass in the development container\r\n\r\n### If you made changes to `securedrop-admin`:\r\n\r\n- [ ] Linting and tests (`make -C admin test`) pass in the admin development container\r\n\r\n### If you made changes to the system configuration:\r\n\r\n- [ ] [Configuration tests](https://docs.securedrop.org/en/latest/development/testing_configuration_tests.html) pass\r\n\r\n### If you made non-trivial code changes:\r\n\r\n- [ ] I have written a test plan and validated it for this PR\r\n\r\n### If you made changes to documentation:\r\n\r\n- [x] Doc linting (`make docs-lint`) passed locally\r\n\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\n\nThis script should be copied to the App server and ran by the anisble\nplabook. When run (as root), it collects all of the necessary information\nto backup the 0.3 system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.zip.gpg\n\n\"\"\"\n\nimport sys\nimport os\nimport io\nimport zipfile\nfrom datetime import datetime\n# Import the application config.py file\nsys.path.append(\"/var/www/securedrop\")\nimport config # noqa: F403\nimport gnupg # noqa: F403\n\nTOR_SERVICES = \"/var/lib/tor/services\"\nTOR_CONFIG = \"/etc/tor/torrc\"\n\n\ndef collect_config_file(zf):\n config_file_path = os.path.join(config.SECUREDROP_ROOT, \"config.py\")\n zf.write(config_file_path)\n\n\ndef collect_securedrop_data_root(zf):\n # The store and key dirs are shared between both interfaces\n for root, dirs, files in os.walk(config.SECUREDROP_DATA_ROOT):\n for name in files:\n zf.write(os.path.join(root, name))\n\n\ndef collect_custom_header_image(zf):\n # The custom header image is copied over the deafult `static/i/logo.png`.\n zf.write(os.path.join(config.SECUREDROP_ROOT, \"static/i/logo.png\"))\n\n\ndef collect_tor_files(zf):\n # All of the tor hidden service private keys are stored in the THS specific\n # subdirectory `/var/lib/tor/services` backing up this directory will back\n # up all of the THS and ATHS required keys needed to restore all the hidden\n # services on that system.\n for root, dirs, files in os.walk(TOR_SERVICES):\n for name in files:\n zf.write(os.path.join(root, name))\n\n # The tor config file has the ATHS client names required to restore\n # the ATHS info. These names are also in the the specific client_key file\n # but backing up this file makes it easier than parsing the files during a\n # restore.\n zf.write(TOR_CONFIG)\n\n\ndef encrypt_zip_file(zf_fn):\n # Encrypt the backup zip file with the application's gpg public key\n gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)\n e_fn = '{}.gpg'.format(zf_fn)\n\n stream = io.open(zf_fn, \"rb\")\n gpg.encrypt_file(stream, config.JOURNALIST_KEY, always_trust='True',\n output=e_fn)\n\n\ndef main():\n # name append a timestamp to the sd-backup zip filename\n dt = str(datetime.utcnow().strftime(\"%Y-%m-%d--%H-%M-%S\"))\n zf_fn = 'sd-backup-{}.zip'.format(dt)\n with zipfile.ZipFile(zf_fn, 'w') as zf:\n collect_config_file(zf)\n collect_securedrop_data_root(zf)\n collect_custom_header_image(zf)\n collect_tor_files(zf)\n encrypt_zip_file(zf_fn)\n print(zf_fn)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/backup/files/0.3_collect.py"}], "after_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\n\nThis script should be copied to the App server and ran by the anisble\nplabook. When run (as root), it collects all of the necessary information\nto backup the 0.3 system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.zip.gpg\n\n\"\"\"\n\nimport sys\nimport os\nimport io\nimport zipfile\nfrom datetime import datetime\n# Import the application config.py file\nsys.path.append(\"/var/www/securedrop\")\nimport config # noqa: F403\nimport gnupg # noqa: F403\n\nTOR_SERVICES = \"/var/lib/tor/services\"\nTOR_CONFIG = \"/etc/tor/torrc\"\n\n\ndef collect_config_file(zf):\n config_file_path = os.path.join(config.SECUREDROP_ROOT, \"config.py\")\n zf.write(config_file_path)\n\n\ndef collect_securedrop_data_root(zf):\n # The store and key dirs are shared between both interfaces\n for root, dirs, files in os.walk(config.SECUREDROP_DATA_ROOT):\n for name in files:\n zf.write(os.path.join(root, name))\n\n\ndef collect_custom_header_image(zf):\n # The custom header image is copied over the deafult `static/i/logo.png`.\n zf.write(os.path.join(config.SECUREDROP_ROOT, \"static/i/logo.png\"))\n\n\ndef collect_tor_files(zf):\n # All of the tor Onion Service private keys are stored in the THS specific\n # subdirectory `/var/lib/tor/services` backing up this directory will back\n # up all of the THS and ATHS required keys needed to restore all the hidden\n # services on that system.\n for root, dirs, files in os.walk(TOR_SERVICES):\n for name in files:\n zf.write(os.path.join(root, name))\n\n # The tor config file has the ATHS client names required to restore\n # the ATHS info. These names are also in the the specific client_key file\n # but backing up this file makes it easier than parsing the files during a\n # restore.\n zf.write(TOR_CONFIG)\n\n\ndef encrypt_zip_file(zf_fn):\n # Encrypt the backup zip file with the application's gpg public key\n gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR)\n e_fn = '{}.gpg'.format(zf_fn)\n\n stream = io.open(zf_fn, \"rb\")\n gpg.encrypt_file(stream, config.JOURNALIST_KEY, always_trust='True',\n output=e_fn)\n\n\ndef main():\n # name append a timestamp to the sd-backup zip filename\n dt = str(datetime.utcnow().strftime(\"%Y-%m-%d--%H-%M-%S\"))\n zf_fn = 'sd-backup-{}.zip'.format(dt)\n with zipfile.ZipFile(zf_fn, 'w') as zf:\n collect_config_file(zf)\n collect_securedrop_data_root(zf)\n collect_custom_header_image(zf)\n collect_tor_files(zf)\n encrypt_zip_file(zf_fn)\n print(zf_fn)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/backup/files/0.3_collect.py"}]}
1,626
180
gh_patches_debug_27620
rasdani/github-patches
git_diff
ros2__ros2_documentation-1098
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add "Edit on Github" links to the pages Currently there's a link at the bottom to the RST in the generated site. It would be valuable to have the link to the github sources to facilitate opening a pull request. There are ways to add this to the theme or side bar. There may be plugins to add this too. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # rosindex documentation build configuration file, created by 4 # sphinx-quickstart on Tue Oct 2 16:34:57 2018. 5 # 6 # This file is execfile()d with the current directory set to its 7 # containing dir. 8 # 9 # Note that not all possible configuration values are present in this 10 # autogenerated file. 11 # 12 # All configuration values have a default; values that are commented out 13 # serve to show the default. 14 15 # If extensions (or modules to document with autodoc) are in another directory, 16 # add these directories to sys.path here. If the directory is relative to the 17 # documentation root, use os.path.abspath to make it absolute, like shown here. 18 # 19 import sys, os 20 import itertools 21 from docutils.parsers.rst import Directive 22 23 sys.path.append(os.path.abspath('./sphinx-multiversion')) 24 25 26 # The suffix(es) of source filenames. 27 # You can specify multiple suffix as a list of string: 28 # 29 # The master toctree document. 30 master_doc = 'index' 31 32 # The default role 33 default_role = 'any' 34 35 # The set of warnings to suppress. 36 suppress_warnings = ['image.nonlocal_uri'] 37 38 # General information about the project. 39 project = u'ros2 documentation' 40 copyright = u'2018-2021, Open Robotics' 41 author = u'Open Robotics' 42 43 # The version info for the project you're documenting, acts as replacement for 44 # |version| and |release|, also used in various other places throughout the 45 # built documents. 46 # 47 # The short X.Y version. 48 version = u'' 49 # The full version, including alpha/beta/rc tags. 50 release = u'' 51 52 # Define the default role to use for links 53 default_role = 'any' 54 55 # The language for content autogenerated by Sphinx. Refer to documentation 56 # for a list of supported languages. 57 # 58 # This is also used if you do content translation via gettext catalogs. 59 # Usually you set "language" from the command line for these cases. 60 language = None 61 62 # List of patterns, relative to source directory, that match files and 63 # directories to ignore when looking for source files. 64 # This patterns also effect to html_static_path and html_extra_path 65 exclude_patterns = ['**/_*.rst'] 66 67 # The name of the Pygments (syntax highlighting) style to use. 68 pygments_style = 'sphinx' 69 70 # Add any Sphinx extension module names here, as strings. They can be 71 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 72 extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion'] 73 74 # Intersphinx mapping 75 76 intersphinx_mapping = { 77 'catkin_pkg': ('http://docs.ros.org/en/independent/api/catkin_pkg/html', None), 78 'jenkins_tools': ('http://docs.ros.org/en/independent/api/jenkins_tools/html', None), 79 'rosdep': ('http://docs.ros.org/en/independent/api/rosdep/html', None), 80 'rosdistro': ('http://docs.ros.org/en/independent/api/rosdistro/html', None), 81 'rosinstall': ('http://docs.ros.org/en/independent/api/rosinstall/html', None), 82 'rospkg': ('http://docs.ros.org/en/independent/api/rospkg/html', None), 83 'vcstools': ('http://docs.ros.org/en/independent/api/vcstools/html', None) 84 } 85 86 # -- Options for HTML output ---------------------------------------------- 87 88 # The theme to use for HTML and HTML Help pages. See the documentation for 89 # a list of builtin themes. 90 # 91 #html_theme = 'alabaster' 92 93 templates_path = [ 94 "source/_templates", 95 ] 96 97 html_sidebars = { 98 '**': ['navigation.html', 'versioning.html'], 99 } 100 101 # smv_tag_whitelist = None 102 103 smv_branch_whitelist = r'^(rolling|foxy|eloquent|dashing)$' 104 105 106 smv_released_pattern = r'^refs/(heads|remotes/[^/]+)/(foxy|eloquent|dashing).*$' 107 smv_remote_whitelist = r'^(origin)$' 108 smv_latest_version = 'foxy' 109 110 111 112 html_favicon = 'favicon.ico' 113 114 # Add any paths that contain custom static files (such as style sheets) here, 115 # relative to this directory. They are copied after the builtin static files, 116 # so a file named "default.css" will overwrite the builtin "default.css". 117 # html_static_path = ['_static'] 118 119 # Drop any source link suffix 120 html_sourcelink_suffix = '' 121 122 # -- Options for HTMLHelp output ------------------------------------------ 123 124 # Output file base name for HTML help builder. 125 htmlhelp_basename = 'ros2_docsdoc' 126 127 html_baseurl = 'https://docs.ros.org/en' 128 129 class RedirectFrom(Directive): 130 131 has_content = True 132 template_name = 'layout.html' 133 redirections = {} 134 135 @classmethod 136 def register(cls, app): 137 app.connect('html-collect-pages', cls.generate) 138 app.add_directive('redirect-from', cls) 139 return app 140 141 @classmethod 142 def generate(cls, app): 143 from sphinx.builders.html import StandaloneHTMLBuilder 144 if not isinstance(app.builder, StandaloneHTMLBuilder): 145 return 146 147 redirect_html_fragment = """ 148 <link rel="canonical" href="{base_url}/{url}" /> 149 <meta http-equiv="refresh" content="0; url={url}" /> 150 <script> 151 window.location.href = '{url}'; 152 </script> 153 """ 154 redirections = { 155 os.path.splitext(os.path.relpath( 156 document_path, app.srcdir 157 ))[0]: redirect_urls 158 for document_path, redirect_urls in cls.redirections.items() 159 } 160 redirection_conflict = next(( 161 (canon_1, canon_2, redirs_1.intersection(redirs_2)) 162 for (canon_1, redirs_1), (canon_2, redirs_2) 163 in itertools.combinations(redirections.items(), 2) 164 if redirs_1.intersection(redirs_2) 165 ), None) 166 if redirection_conflict: 167 canonical_url_1, canonical_url_2 = redirection_conflict[:2] 168 conflicting_redirect_urls = redirection_conflict[-1] 169 raise RuntimeError( 170 'Documents {} and {} define conflicting redirects: {}'.format( 171 canonical_url_1, canonical_url_2, conflicting_redirect_urls 172 ) 173 ) 174 all_canonical_urls = set(redirections.keys()) 175 all_redirect_urls = { 176 redirect_url 177 for redirect_urls in redirections.values() 178 for redirect_url in redirect_urls 179 } 180 conflicting_urls = all_canonical_urls.intersection(all_redirect_urls) 181 if conflicting_urls: 182 raise RuntimeError( 183 'Some redirects conflict with existing documents: {}'.format( 184 conflicting_urls 185 ) 186 ) 187 188 for canonical_url, redirect_urls in redirections.items(): 189 for redirect_url in redirect_urls: 190 context = { 191 'canonical_url': os.path.relpath( 192 canonical_url, redirect_url 193 ), 194 'title': os.path.basename(redirect_url), 195 'metatags': redirect_html_fragment.format( 196 base_url=app.config.html_baseurl, 197 url=app.builder.get_relative_uri( 198 redirect_url, canonical_url 199 ) 200 ) 201 } 202 yield (redirect_url, context, cls.template_name) 203 204 def run(self): 205 document_path = self.state.document.current_source 206 if document_path not in RedirectFrom.redirections: 207 RedirectFrom.redirections[document_path] = set() 208 RedirectFrom.redirections[document_path].update(self.content) 209 return [] 210 211 212 def make_router(origin, destination): 213 def _missing_reference(app, env, node, contnode): 214 from docutils import nodes 215 from docutils.utils import relative_path 216 from sphinx.util import docname_join 217 218 doctarget = docname_join(node['refdoc'], node['reftarget']) 219 if doctarget.startswith(origin): 220 routed_doctarget = doctarget.replace(origin, destination) 221 if routed_doctarget in env.all_docs: 222 newnode = nodes.reference( 223 '', contnode.astext(), internal=True 224 ) 225 newnode['refuri'] = app.builder.get_relative_uri( 226 node['refdoc'], routed_doctarget 227 ) 228 return newnode 229 return _missing_reference 230 231 def smv_rewrite_baseurl(app, config): 232 # When using Sphinx multiversion, there is no way at initial configuration time 233 # to determine the distribution we are currently targeting (conf.py is read before 234 # external defines are setup, and environment variables aren't passed through to 235 # conf.py). Instead, hook into the 'config-inited' event which is late enough 236 # to rewrite the html_baseurl with the current version. 237 if app.config.smv_current_version != '': 238 app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version 239 240 def setup(app): 241 app.connect('config-inited', smv_rewrite_baseurl) 242 RedirectFrom.register(app) 243 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conf.py b/conf.py --- a/conf.py +++ b/conf.py @@ -69,7 +69,7 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion'] +extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion', 'sphinxcontrib.srclinks'] # Intersphinx mapping @@ -90,12 +90,16 @@ # #html_theme = 'alabaster' +srclink_project = 'https://github.com/ros2/ros2_documentation' +srclink_src_path = 'source/' +srclink_branch = 'rolling' + templates_path = [ "source/_templates", ] html_sidebars = { - '**': ['navigation.html', 'versioning.html'], + '**': ['navigation.html', 'srclinks.html', 'versioning.html'], } # smv_tag_whitelist = None @@ -237,6 +241,11 @@ if app.config.smv_current_version != '': app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version +def srclink_rewrite_branch(app, config): + if app.config.smv_current_version != '': + app.config.srclink_branch = app.config.smv_current_version + def setup(app): app.connect('config-inited', smv_rewrite_baseurl) + app.connect('config-inited', srclink_rewrite_branch) RedirectFrom.register(app)
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -69,7 +69,7 @@\n \n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n-extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion']\n+extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion', 'sphinxcontrib.srclinks']\n \n # Intersphinx mapping\n \n@@ -90,12 +90,16 @@\n #\n #html_theme = 'alabaster'\n \n+srclink_project = 'https://github.com/ros2/ros2_documentation'\n+srclink_src_path = 'source/'\n+srclink_branch = 'rolling'\n+\n templates_path = [\n \"source/_templates\",\n ]\n \n html_sidebars = {\n- '**': ['navigation.html', 'versioning.html'],\n+ '**': ['navigation.html', 'srclinks.html', 'versioning.html'],\n }\n \n # smv_tag_whitelist = None\n@@ -237,6 +241,11 @@\n if app.config.smv_current_version != '':\n app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version\n \n+def srclink_rewrite_branch(app, config):\n+ if app.config.smv_current_version != '':\n+ app.config.srclink_branch = app.config.smv_current_version\n+\n def setup(app):\n app.connect('config-inited', smv_rewrite_baseurl)\n+ app.connect('config-inited', srclink_rewrite_branch)\n RedirectFrom.register(app)\n", "issue": "Add \"Edit on Github\" links to the pages\nCurrently there's a link at the bottom to the RST in the generated site. It would be valuable to have the link to the github sources to facilitate opening a pull request. \r\n\r\nThere are ways to add this to the theme or side bar. There may be plugins to add this too. \r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# rosindex documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 2 16:34:57 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport sys, os\nimport itertools\nfrom docutils.parsers.rst import Directive\n\nsys.path.append(os.path.abspath('./sphinx-multiversion'))\n\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# The master toctree document.\nmaster_doc = 'index'\n\n# The default role\ndefault_role = 'any'\n\n# The set of warnings to suppress.\nsuppress_warnings = ['image.nonlocal_uri']\n\n# General information about the project.\nproject = u'ros2 documentation'\ncopyright = u'2018-2021, Open Robotics'\nauthor = u'Open Robotics'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u''\n# The full version, including alpha/beta/rc tags.\nrelease = u''\n\n# Define the default role to use for links\ndefault_role = 'any'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['**/_*.rst']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\nextensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion']\n\n# Intersphinx mapping\n\nintersphinx_mapping = {\n 'catkin_pkg': ('http://docs.ros.org/en/independent/api/catkin_pkg/html', None),\n 'jenkins_tools': ('http://docs.ros.org/en/independent/api/jenkins_tools/html', None),\n 'rosdep': ('http://docs.ros.org/en/independent/api/rosdep/html', None),\n 'rosdistro': ('http://docs.ros.org/en/independent/api/rosdistro/html', None),\n 'rosinstall': ('http://docs.ros.org/en/independent/api/rosinstall/html', None),\n 'rospkg': ('http://docs.ros.org/en/independent/api/rospkg/html', None),\n 'vcstools': ('http://docs.ros.org/en/independent/api/vcstools/html', None)\n}\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n#html_theme = 'alabaster'\n\ntemplates_path = [\n \"source/_templates\",\n]\n\nhtml_sidebars = {\n '**': ['navigation.html', 'versioning.html'],\n}\n\n# smv_tag_whitelist = None\n\nsmv_branch_whitelist = r'^(rolling|foxy|eloquent|dashing)$'\n\n\nsmv_released_pattern = r'^refs/(heads|remotes/[^/]+)/(foxy|eloquent|dashing).*$'\nsmv_remote_whitelist = r'^(origin)$'\nsmv_latest_version = 'foxy'\n\n\n\nhtml_favicon = 'favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Drop any source link suffix\nhtml_sourcelink_suffix = ''\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ros2_docsdoc'\n\nhtml_baseurl = 'https://docs.ros.org/en'\n\nclass RedirectFrom(Directive):\n\n has_content = True\n template_name = 'layout.html'\n redirections = {}\n\n @classmethod\n def register(cls, app):\n app.connect('html-collect-pages', cls.generate)\n app.add_directive('redirect-from', cls)\n return app\n\n @classmethod\n def generate(cls, app):\n from sphinx.builders.html import StandaloneHTMLBuilder\n if not isinstance(app.builder, StandaloneHTMLBuilder):\n return\n\n redirect_html_fragment = \"\"\"\n <link rel=\"canonical\" href=\"{base_url}/{url}\" />\n <meta http-equiv=\"refresh\" content=\"0; url={url}\" />\n <script>\n window.location.href = '{url}';\n </script>\n \"\"\"\n redirections = {\n os.path.splitext(os.path.relpath(\n document_path, app.srcdir\n ))[0]: redirect_urls\n for document_path, redirect_urls in cls.redirections.items()\n }\n redirection_conflict = next((\n (canon_1, canon_2, redirs_1.intersection(redirs_2))\n for (canon_1, redirs_1), (canon_2, redirs_2)\n in itertools.combinations(redirections.items(), 2)\n if redirs_1.intersection(redirs_2)\n ), None)\n if redirection_conflict:\n canonical_url_1, canonical_url_2 = redirection_conflict[:2]\n conflicting_redirect_urls = redirection_conflict[-1]\n raise RuntimeError(\n 'Documents {} and {} define conflicting redirects: {}'.format(\n canonical_url_1, canonical_url_2, conflicting_redirect_urls\n )\n )\n all_canonical_urls = set(redirections.keys())\n all_redirect_urls = {\n redirect_url\n for redirect_urls in redirections.values()\n for redirect_url in redirect_urls\n }\n conflicting_urls = all_canonical_urls.intersection(all_redirect_urls)\n if conflicting_urls:\n raise RuntimeError(\n 'Some redirects conflict with existing documents: {}'.format(\n conflicting_urls\n )\n )\n\n for canonical_url, redirect_urls in redirections.items():\n for redirect_url in redirect_urls:\n context = {\n 'canonical_url': os.path.relpath(\n canonical_url, redirect_url\n ),\n 'title': os.path.basename(redirect_url),\n 'metatags': redirect_html_fragment.format(\n base_url=app.config.html_baseurl,\n url=app.builder.get_relative_uri(\n redirect_url, canonical_url\n )\n )\n }\n yield (redirect_url, context, cls.template_name)\n\n def run(self):\n document_path = self.state.document.current_source\n if document_path not in RedirectFrom.redirections:\n RedirectFrom.redirections[document_path] = set()\n RedirectFrom.redirections[document_path].update(self.content)\n return []\n\n\ndef make_router(origin, destination):\n def _missing_reference(app, env, node, contnode):\n from docutils import nodes\n from docutils.utils import relative_path\n from sphinx.util import docname_join\n\n doctarget = docname_join(node['refdoc'], node['reftarget'])\n if doctarget.startswith(origin):\n routed_doctarget = doctarget.replace(origin, destination)\n if routed_doctarget in env.all_docs:\n newnode = nodes.reference(\n '', contnode.astext(), internal=True\n )\n newnode['refuri'] = app.builder.get_relative_uri(\n node['refdoc'], routed_doctarget\n )\n return newnode\n return _missing_reference\n\ndef smv_rewrite_baseurl(app, config):\n # When using Sphinx multiversion, there is no way at initial configuration time\n # to determine the distribution we are currently targeting (conf.py is read before\n # external defines are setup, and environment variables aren't passed through to\n # conf.py). Instead, hook into the 'config-inited' event which is late enough\n # to rewrite the html_baseurl with the current version.\n if app.config.smv_current_version != '':\n app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version\n\ndef setup(app):\n app.connect('config-inited', smv_rewrite_baseurl)\n RedirectFrom.register(app)\n", "path": "conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# rosindex documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 2 16:34:57 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport sys, os\nimport itertools\nfrom docutils.parsers.rst import Directive\n\nsys.path.append(os.path.abspath('./sphinx-multiversion'))\n\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# The master toctree document.\nmaster_doc = 'index'\n\n# The default role\ndefault_role = 'any'\n\n# The set of warnings to suppress.\nsuppress_warnings = ['image.nonlocal_uri']\n\n# General information about the project.\nproject = u'ros2 documentation'\ncopyright = u'2018-2021, Open Robotics'\nauthor = u'Open Robotics'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u''\n# The full version, including alpha/beta/rc tags.\nrelease = u''\n\n# Define the default role to use for links\ndefault_role = 'any'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['**/_*.rst']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\nextensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion', 'sphinxcontrib.srclinks']\n\n# Intersphinx mapping\n\nintersphinx_mapping = {\n 'catkin_pkg': ('http://docs.ros.org/en/independent/api/catkin_pkg/html', None),\n 'jenkins_tools': ('http://docs.ros.org/en/independent/api/jenkins_tools/html', None),\n 'rosdep': ('http://docs.ros.org/en/independent/api/rosdep/html', None),\n 'rosdistro': ('http://docs.ros.org/en/independent/api/rosdistro/html', None),\n 'rosinstall': ('http://docs.ros.org/en/independent/api/rosinstall/html', None),\n 'rospkg': ('http://docs.ros.org/en/independent/api/rospkg/html', None),\n 'vcstools': ('http://docs.ros.org/en/independent/api/vcstools/html', None)\n}\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n#html_theme = 'alabaster'\n\nsrclink_project = 'https://github.com/ros2/ros2_documentation'\nsrclink_src_path = 'source/'\nsrclink_branch = 'rolling'\n\ntemplates_path = [\n \"source/_templates\",\n]\n\nhtml_sidebars = {\n '**': ['navigation.html', 'srclinks.html', 'versioning.html'],\n}\n\n# smv_tag_whitelist = None\n\nsmv_branch_whitelist = r'^(rolling|foxy|eloquent|dashing)$'\n\n\nsmv_released_pattern = r'^refs/(heads|remotes/[^/]+)/(foxy|eloquent|dashing).*$'\nsmv_remote_whitelist = r'^(origin)$'\nsmv_latest_version = 'foxy'\n\n\n\nhtml_favicon = 'favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Drop any source link suffix\nhtml_sourcelink_suffix = ''\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ros2_docsdoc'\n\nhtml_baseurl = 'https://docs.ros.org/en'\n\nclass RedirectFrom(Directive):\n\n has_content = True\n template_name = 'layout.html'\n redirections = {}\n\n @classmethod\n def register(cls, app):\n app.connect('html-collect-pages', cls.generate)\n app.add_directive('redirect-from', cls)\n return app\n\n @classmethod\n def generate(cls, app):\n from sphinx.builders.html import StandaloneHTMLBuilder\n if not isinstance(app.builder, StandaloneHTMLBuilder):\n return\n\n redirect_html_fragment = \"\"\"\n <link rel=\"canonical\" href=\"{base_url}/{url}\" />\n <meta http-equiv=\"refresh\" content=\"0; url={url}\" />\n <script>\n window.location.href = '{url}';\n </script>\n \"\"\"\n redirections = {\n os.path.splitext(os.path.relpath(\n document_path, app.srcdir\n ))[0]: redirect_urls\n for document_path, redirect_urls in cls.redirections.items()\n }\n redirection_conflict = next((\n (canon_1, canon_2, redirs_1.intersection(redirs_2))\n for (canon_1, redirs_1), (canon_2, redirs_2)\n in itertools.combinations(redirections.items(), 2)\n if redirs_1.intersection(redirs_2)\n ), None)\n if redirection_conflict:\n canonical_url_1, canonical_url_2 = redirection_conflict[:2]\n conflicting_redirect_urls = redirection_conflict[-1]\n raise RuntimeError(\n 'Documents {} and {} define conflicting redirects: {}'.format(\n canonical_url_1, canonical_url_2, conflicting_redirect_urls\n )\n )\n all_canonical_urls = set(redirections.keys())\n all_redirect_urls = {\n redirect_url\n for redirect_urls in redirections.values()\n for redirect_url in redirect_urls\n }\n conflicting_urls = all_canonical_urls.intersection(all_redirect_urls)\n if conflicting_urls:\n raise RuntimeError(\n 'Some redirects conflict with existing documents: {}'.format(\n conflicting_urls\n )\n )\n\n for canonical_url, redirect_urls in redirections.items():\n for redirect_url in redirect_urls:\n context = {\n 'canonical_url': os.path.relpath(\n canonical_url, redirect_url\n ),\n 'title': os.path.basename(redirect_url),\n 'metatags': redirect_html_fragment.format(\n base_url=app.config.html_baseurl,\n url=app.builder.get_relative_uri(\n redirect_url, canonical_url\n )\n )\n }\n yield (redirect_url, context, cls.template_name)\n\n def run(self):\n document_path = self.state.document.current_source\n if document_path not in RedirectFrom.redirections:\n RedirectFrom.redirections[document_path] = set()\n RedirectFrom.redirections[document_path].update(self.content)\n return []\n\n\ndef make_router(origin, destination):\n def _missing_reference(app, env, node, contnode):\n from docutils import nodes\n from docutils.utils import relative_path\n from sphinx.util import docname_join\n\n doctarget = docname_join(node['refdoc'], node['reftarget'])\n if doctarget.startswith(origin):\n routed_doctarget = doctarget.replace(origin, destination)\n if routed_doctarget in env.all_docs:\n newnode = nodes.reference(\n '', contnode.astext(), internal=True\n )\n newnode['refuri'] = app.builder.get_relative_uri(\n node['refdoc'], routed_doctarget\n )\n return newnode\n return _missing_reference\n\ndef smv_rewrite_baseurl(app, config):\n # When using Sphinx multiversion, there is no way at initial configuration time\n # to determine the distribution we are currently targeting (conf.py is read before\n # external defines are setup, and environment variables aren't passed through to\n # conf.py). Instead, hook into the 'config-inited' event which is late enough\n # to rewrite the html_baseurl with the current version.\n if app.config.smv_current_version != '':\n app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version\n\ndef srclink_rewrite_branch(app, config):\n if app.config.smv_current_version != '':\n app.config.srclink_branch = app.config.smv_current_version\n\ndef setup(app):\n app.connect('config-inited', smv_rewrite_baseurl)\n app.connect('config-inited', srclink_rewrite_branch)\n RedirectFrom.register(app)\n", "path": "conf.py"}]}
2,923
379
gh_patches_debug_7166
rasdani/github-patches
git_diff
pytorch__vision-7665
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bug when using PIL backend in classification references ### 🐛 Describe the bug When I try to train a model using the train.py script under references/classification with the PIL backend, I encounter an error: ```bash ValueError: backend can be 'tensor' or 'pil', but got pil ``` To reproduce this issue, you can write: ```bash git clone https://github.com/pytorch/vision && cd vision conda create -n vision_env python=3.9 conda activate vision_env pip install torch==1.13.1 torchvision cd references/classification/ python train.py --data-path "path-to-dataset" --test-only --backend pil ``` ### Versions [pip3] mypy-extensions==1.0.0 [pip3] numpy==1.24.3 [pip3] torch==1.13.1 [pip3] torchvision==0.14.1 [conda] numpy 1.24.3 pypi_0 pypi [conda] torch 1.13.1 pypi_0 pypi [conda] torchvision 0.14.1 pypi_0 pypi --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `references/classification/presets.py` Content: ``` 1 import torch 2 from torchvision.transforms import autoaugment, transforms 3 from torchvision.transforms.functional import InterpolationMode 4 5 6 class ClassificationPresetTrain: 7 def __init__( 8 self, 9 *, 10 crop_size, 11 mean=(0.485, 0.456, 0.406), 12 std=(0.229, 0.224, 0.225), 13 interpolation=InterpolationMode.BILINEAR, 14 hflip_prob=0.5, 15 auto_augment_policy=None, 16 ra_magnitude=9, 17 augmix_severity=3, 18 random_erase_prob=0.0, 19 backend="pil", 20 ): 21 trans = [] 22 backend = backend.lower() 23 if backend == "tensor": 24 trans.append(transforms.PILToTensor()) 25 elif backend != "pil": 26 raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") 27 28 trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True)) 29 if hflip_prob > 0: 30 trans.append(transforms.RandomHorizontalFlip(hflip_prob)) 31 if auto_augment_policy is not None: 32 if auto_augment_policy == "ra": 33 trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude)) 34 elif auto_augment_policy == "ta_wide": 35 trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation)) 36 elif auto_augment_policy == "augmix": 37 trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity)) 38 else: 39 aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy) 40 trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation)) 41 42 if backend == "pil": 43 trans.append(transforms.PILToTensor()) 44 45 trans.extend( 46 [ 47 transforms.ConvertImageDtype(torch.float), 48 transforms.Normalize(mean=mean, std=std), 49 ] 50 ) 51 if random_erase_prob > 0: 52 trans.append(transforms.RandomErasing(p=random_erase_prob)) 53 54 self.transforms = transforms.Compose(trans) 55 56 def __call__(self, img): 57 return self.transforms(img) 58 59 60 class ClassificationPresetEval: 61 def __init__( 62 self, 63 *, 64 crop_size, 65 resize_size=256, 66 mean=(0.485, 0.456, 0.406), 67 std=(0.229, 0.224, 0.225), 68 interpolation=InterpolationMode.BILINEAR, 69 backend="pil", 70 ): 71 trans = [] 72 73 backend = backend.lower() 74 if backend == "tensor": 75 trans.append(transforms.PILToTensor()) 76 else: 77 raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") 78 79 trans += [ 80 transforms.Resize(resize_size, interpolation=interpolation, antialias=True), 81 transforms.CenterCrop(crop_size), 82 ] 83 84 if backend == "pil": 85 trans.append(transforms.PILToTensor()) 86 87 trans += [ 88 transforms.ConvertImageDtype(torch.float), 89 transforms.Normalize(mean=mean, std=std), 90 ] 91 92 self.transforms = transforms.Compose(trans) 93 94 def __call__(self, img): 95 return self.transforms(img) 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/references/classification/presets.py b/references/classification/presets.py --- a/references/classification/presets.py +++ b/references/classification/presets.py @@ -69,11 +69,10 @@ backend="pil", ): trans = [] - backend = backend.lower() if backend == "tensor": trans.append(transforms.PILToTensor()) - else: + elif backend != "pil": raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}") trans += [
{"golden_diff": "diff --git a/references/classification/presets.py b/references/classification/presets.py\n--- a/references/classification/presets.py\n+++ b/references/classification/presets.py\n@@ -69,11 +69,10 @@\n backend=\"pil\",\n ):\n trans = []\n-\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n- else:\n+ elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n \n trans += [\n", "issue": "bug when using PIL backend in classification references\n### \ud83d\udc1b Describe the bug\n\nWhen I try to train a model using the train.py script under references/classification with the PIL backend, I encounter an error:\r\n```bash\r\nValueError: backend can be 'tensor' or 'pil', but got pil\r\n```\r\n\r\nTo reproduce this issue, you can write:\r\n```bash\r\ngit clone https://github.com/pytorch/vision && cd vision\r\nconda create -n vision_env python=3.9\r\nconda activate vision_env\r\npip install torch==1.13.1 torchvision\r\ncd references/classification/\r\npython train.py --data-path \"path-to-dataset\" --test-only --backend pil\r\n```\n\n### Versions\n\n[pip3] mypy-extensions==1.0.0\r\n[pip3] numpy==1.24.3\r\n[pip3] torch==1.13.1\r\n[pip3] torchvision==0.14.1\r\n[conda] numpy 1.24.3 pypi_0 pypi\r\n[conda] torch 1.13.1 pypi_0 pypi\r\n[conda] torchvision 0.14.1 pypi_0 pypi\n", "before_files": [{"content": "import torch\nfrom torchvision.transforms import autoaugment, transforms\nfrom torchvision.transforms.functional import InterpolationMode\n\n\nclass ClassificationPresetTrain:\n def __init__(\n self,\n *,\n crop_size,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n hflip_prob=0.5,\n auto_augment_policy=None,\n ra_magnitude=9,\n augmix_severity=3,\n random_erase_prob=0.0,\n backend=\"pil\",\n ):\n trans = []\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))\n if hflip_prob > 0:\n trans.append(transforms.RandomHorizontalFlip(hflip_prob))\n if auto_augment_policy is not None:\n if auto_augment_policy == \"ra\":\n trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))\n elif auto_augment_policy == \"ta_wide\":\n trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))\n elif auto_augment_policy == \"augmix\":\n trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))\n else:\n aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)\n trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans.extend(\n [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n )\n if random_erase_prob > 0:\n trans.append(transforms.RandomErasing(p=random_erase_prob))\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n\n\nclass ClassificationPresetEval:\n def __init__(\n self,\n *,\n crop_size,\n resize_size=256,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n backend=\"pil\",\n ):\n trans = []\n\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n else:\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans += [\n transforms.Resize(resize_size, interpolation=interpolation, antialias=True),\n transforms.CenterCrop(crop_size),\n ]\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans += [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n", "path": "references/classification/presets.py"}], "after_files": [{"content": "import torch\nfrom torchvision.transforms import autoaugment, transforms\nfrom torchvision.transforms.functional import InterpolationMode\n\n\nclass ClassificationPresetTrain:\n def __init__(\n self,\n *,\n crop_size,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n hflip_prob=0.5,\n auto_augment_policy=None,\n ra_magnitude=9,\n augmix_severity=3,\n random_erase_prob=0.0,\n backend=\"pil\",\n ):\n trans = []\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))\n if hflip_prob > 0:\n trans.append(transforms.RandomHorizontalFlip(hflip_prob))\n if auto_augment_policy is not None:\n if auto_augment_policy == \"ra\":\n trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))\n elif auto_augment_policy == \"ta_wide\":\n trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))\n elif auto_augment_policy == \"augmix\":\n trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))\n else:\n aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)\n trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans.extend(\n [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n )\n if random_erase_prob > 0:\n trans.append(transforms.RandomErasing(p=random_erase_prob))\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n\n\nclass ClassificationPresetEval:\n def __init__(\n self,\n *,\n crop_size,\n resize_size=256,\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n interpolation=InterpolationMode.BILINEAR,\n backend=\"pil\",\n ):\n trans = []\n backend = backend.lower()\n if backend == \"tensor\":\n trans.append(transforms.PILToTensor())\n elif backend != \"pil\":\n raise ValueError(f\"backend can be 'tensor' or 'pil', but got {backend}\")\n\n trans += [\n transforms.Resize(resize_size, interpolation=interpolation, antialias=True),\n transforms.CenterCrop(crop_size),\n ]\n\n if backend == \"pil\":\n trans.append(transforms.PILToTensor())\n\n trans += [\n transforms.ConvertImageDtype(torch.float),\n transforms.Normalize(mean=mean, std=std),\n ]\n\n self.transforms = transforms.Compose(trans)\n\n def __call__(self, img):\n return self.transforms(img)\n", "path": "references/classification/presets.py"}]}
1,446
133
gh_patches_debug_13460
rasdani/github-patches
git_diff
hedyorg__hedy-1739
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Teacher's link copied incompletely **Describe the bug** Link for teachers only copies second half of the link (f.e. /hedy/l/123456) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/teacher.py` Content: ``` 1 import json 2 3 from website.auth import requires_login, is_teacher, current_user 4 import utils 5 import uuid 6 from flask import g, request, jsonify, redirect 7 from flask_helpers import render_template 8 import os 9 import hedyweb 10 import hedy_content 11 TRANSLATIONS = hedyweb.Translations () 12 from config import config 13 cookie_name = config ['session'] ['cookie_name'] 14 15 16 def routes (app, database, achievements): 17 global DATABASE 18 global ACHIEVEMENTS 19 DATABASE = database 20 ACHIEVEMENTS = achievements 21 22 @app.route('/classes', methods=['GET']) 23 @requires_login 24 def get_classes (user): 25 if not is_teacher(user): 26 return utils.error_page_403(error=403, ui_message='retrieve_class') 27 return jsonify (DATABASE.get_teacher_classes (user ['username'], True)) 28 29 @app.route('/for-teachers/class/<class_id>', methods=['GET']) 30 @requires_login 31 def get_class (user, class_id): 32 app.logger.info('This is info output') 33 if not is_teacher(user): 34 return utils.error_page_403(error=403, ui_message='retrieve_class') 35 Class = DATABASE.get_class (class_id) 36 if not Class or Class ['teacher'] != user ['username']: 37 return utils.error_page(error=404, ui_message='no_such_class') 38 students = [] 39 for student_username in Class.get ('students', []): 40 student = DATABASE.user_by_username (student_username) 41 programs = DATABASE.programs_for_user(student_username) 42 highest_level = max(program['level'] for program in programs) if len(programs) else 0 43 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date'])) 44 if sorted_public_programs: 45 latest_shared = sorted_public_programs[-1] 46 latest_shared['link'] = f"/hedy/{latest_shared['id']}/view" 47 else: 48 latest_shared = None 49 students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared}) 50 51 if utils.is_testing_request (request): 52 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']}) 53 54 achievement = None 55 if len(students) > 20: 56 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "full_house") 57 if achievement: 58 achievement = json.dumps(achievement) 59 60 teachers = os.getenv('BETA_TEACHERS', '').split(',') 61 is_beta_teacher = user['username'] in teachers 62 63 return render_template ('class-overview.html', current_page='for-teachers', 64 page_title=hedyweb.get_page_title('class overview'), 65 achievement=achievement, 66 is_beta_teacher=is_beta_teacher, 67 class_info={'students': students, 'link': '/hedy/l/' + Class ['link'], 68 'name': Class ['name'], 'id': Class ['id']}) 69 70 @app.route('/class', methods=['POST']) 71 @requires_login 72 def create_class (user): 73 if not is_teacher(user): 74 return 'Only teachers can create classes', 403 75 76 body = request.json 77 # Validations 78 if not isinstance(body, dict): 79 return 'body must be an object', 400 80 if not isinstance(body.get('name'), str): 81 return 'name must be a string', 400 82 83 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate 84 Classes = DATABASE.get_teacher_classes(user['username'], True) 85 for Class in Classes: 86 if Class['name'] == body['name']: 87 return "duplicate", 200 88 89 Class = { 90 'id': uuid.uuid4().hex, 91 'date': utils.timems (), 92 'teacher': user ['username'], 93 'link': utils.random_id_generator (7), 94 'name': body ['name'] 95 } 96 97 DATABASE.store_class (Class) 98 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "ready_set_education") 99 if achievement: 100 return {'id': Class['id'], 'achievement': achievement}, 200 101 return {'id': Class['id']}, 200 102 103 @app.route('/class/<class_id>', methods=['PUT']) 104 @requires_login 105 def update_class (user, class_id): 106 if not is_teacher(user): 107 return 'Only teachers can update classes', 403 108 109 body = request.json 110 # Validations 111 if not isinstance(body, dict): 112 return 'body must be an object', 400 113 if not isinstance(body.get('name'), str): 114 return 'name must be a string', 400 115 116 Class = DATABASE.get_class (class_id) 117 if not Class or Class ['teacher'] != user ['username']: 118 return 'No such class', 404 119 120 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate 121 Classes = DATABASE.get_teacher_classes(user ['username'], True) 122 for Class in Classes: 123 if Class['name'] == body['name']: 124 return "duplicate", 200 125 126 Class = DATABASE.update_class (class_id, body ['name']) 127 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "on_second_thoughts") 128 if achievement: 129 return {'achievement': achievement}, 200 130 return {}, 200 131 132 @app.route('/class/<class_id>', methods=['DELETE']) 133 @requires_login 134 def delete_class (user, class_id): 135 Class = DATABASE.get_class (class_id) 136 if not Class or Class ['teacher'] != user ['username']: 137 return 'No such class', 404 138 139 DATABASE.delete_class (Class) 140 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "end_of_semester") 141 if achievement: 142 return {'achievement': achievement}, 200 143 return {}, 200 144 145 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET']) 146 def prejoin_class (class_id, link): 147 Class = DATABASE.get_class (class_id) 148 if not Class or Class ['link'] != link: 149 return utils.error_page(error=404, ui_message='invalid_class_link') 150 user = {} 151 if request.cookies.get (cookie_name): 152 token = DATABASE.get_token(request.cookies.get (cookie_name)) 153 if token: 154 if token ['username'] in Class.get ('students', []): 155 return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'), 156 current_page='my-profile', class_info={'name': Class ['name']}) 157 user = DATABASE.user_by_username(token ['username']) 158 159 return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'), 160 current_page='my-profile', 161 class_info={ 162 'id': Class ['id'], 163 'name': Class ['name'], 164 }) 165 166 @app.route('/class/join', methods=['POST']) 167 @requires_login 168 def join_class(user): 169 body = request.json 170 if 'id' in body: 171 Class = DATABASE.get_class(body['id']) 172 if not Class or Class ['id'] != body['id']: 173 return utils.error_page(error=404, ui_message='invalid_class_link') 174 175 DATABASE.add_student_to_class(Class['id'], user['username']) 176 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "epic_education") 177 if achievement: 178 return {'achievement': achievement}, 200 179 return {}, 200 180 181 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE']) 182 @requires_login 183 def leave_class (user, class_id, student_id): 184 Class = DATABASE.get_class (class_id) 185 if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']: 186 return 'No such class', 404 187 188 DATABASE.remove_student_from_class (Class ['id'], student_id) 189 if Class['teacher'] == user['username']: 190 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "detention") 191 if achievement: 192 return {'achievement': achievement}, 200 193 return {}, 200 194 195 @app.route('/for-teachers/customize-class/<class_id>', methods=['GET']) 196 @requires_login 197 def get_class_info(user, class_id): 198 if not is_teacher(user): 199 return utils.error_page_403(error=403, ui_message='retrieve_class') 200 Class = DATABASE.get_class(class_id) 201 if not Class or Class['teacher'] != user['username']: 202 return utils.error_page(error=404, ui_message='no_such_class') 203 204 if hedy_content.Adventures(g.lang).has_adventures(): 205 adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels() 206 else: 207 adventures = hedy_content.Adventures("en").get_adventure_keyname_name_levels() 208 levels = hedy_content.LevelDefaults(g.lang).levels 209 preferences = DATABASE.get_customizations_class(class_id) 210 211 return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'), 212 class_info={'name': Class['name'], 'id': Class['id']}, levels=levels, 213 adventures=adventures, preferences=preferences, current_page='for-teachers') 214 215 @app.route('/customize-class/<class_id>', methods=['PUT']) 216 @requires_login 217 def update_level_preferences(user, class_id): 218 if not is_teacher(user): 219 return 'Only teachers can update class preferences', 403 220 221 body = request.json 222 print(body) 223 # Validations 224 if not isinstance(body, dict): 225 return 'body must be an object', 400 226 if not isinstance(body.get('example_programs'), bool): 227 return 'amount of example programs must be an integer', 400 228 if not isinstance(body.get('hide_level'), bool): 229 return 'level switch must be a boolean', 400 230 if not isinstance(body.get('hide_prev_level'), bool): 231 return 'level switch must be a boolean', 400 232 if not isinstance(body.get('hide_next_level'), bool): 233 return 'level switch must be a boolean', 400 234 if not isinstance(int(body.get('level')), int): 235 return 'level must ben an integer', 400 236 237 Class = DATABASE.get_class(class_id) 238 if not Class or Class['teacher'] != user['username']: 239 return 'No such class', 404 240 241 customizations = {} 242 customizations['id'] = class_id 243 customizations['level'] = int(body.get('level')) 244 customizations['adventures'] = body.get('adventures') 245 customizations['example_programs'] = body.get('example_programs') 246 customizations['hide'] = body.get('hide_level') 247 customizations['hide_prev_level'] = body.get('hide_prev_level') 248 customizations['hide_next_level'] = body.get('hide_next_level') 249 250 DATABASE.update_customizations_class(customizations) 251 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "my_class_my_rules") 252 if achievement: 253 return {'achievement': achievement}, 200 254 return {}, 200 255 256 @app.route('/hedy/l/<link_id>', methods=['GET']) 257 def resolve_class_link (link_id): 258 Class = DATABASE.resolve_class_link (link_id) 259 if not Class: 260 return utils.error_page(error=404, ui_message='invalid_class_link') 261 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302) 262 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/teacher.py b/website/teacher.py --- a/website/teacher.py +++ b/website/teacher.py @@ -64,7 +64,7 @@ page_title=hedyweb.get_page_title('class overview'), achievement=achievement, is_beta_teacher=is_beta_teacher, - class_info={'students': students, 'link': '/hedy/l/' + Class ['link'], + class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']}) @app.route('/class', methods=['POST'])
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -64,7 +64,7 @@\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n- class_info={'students': students, 'link': '/hedy/l/' + Class ['link'],\n+ class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n \n @app.route('/class', methods=['POST'])\n", "issue": "[BUG] Teacher's link copied incompletely\n**Describe the bug**\r\nLink for teachers only copies second half of the link (f.e. /hedy/l/123456)\r\n\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\n\ndef routes (app, database, achievements):\n global DATABASE\n global ACHIEVEMENTS\n DATABASE = database\n ACHIEVEMENTS = achievements\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n achievement = None\n if len(students) > 20:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"full_house\")\n if achievement:\n achievement = json.dumps(achievement)\n\n teachers = os.getenv('BETA_TEACHERS', '').split(',')\n is_beta_teacher = user['username'] in teachers\n\n return render_template ('class-overview.html', current_page='for-teachers',\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n class_info={'students': students, 'link': '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"ready_set_education\")\n if achievement:\n return {'id': Class['id'], 'achievement': achievement}, 200\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"on_second_thoughts\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"end_of_semester\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile',\n class_info={\n 'id': Class ['id'],\n 'name': Class ['name'],\n })\n\n @app.route('/class/join', methods=['POST'])\n @requires_login\n def join_class(user):\n body = request.json\n if 'id' in body:\n Class = DATABASE.get_class(body['id'])\n if not Class or Class ['id'] != body['id']:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n\n DATABASE.add_student_to_class(Class['id'], user['username'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"epic_education\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n if Class['teacher'] == user['username']:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"detention\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='for-teachers')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n DATABASE.update_customizations_class(customizations)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"my_class_my_rules\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}], "after_files": [{"content": "import json\n\nfrom website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\n\ndef routes (app, database, achievements):\n global DATABASE\n global ACHIEVEMENTS\n DATABASE = database\n ACHIEVEMENTS = achievements\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n achievement = None\n if len(students) > 20:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"full_house\")\n if achievement:\n achievement = json.dumps(achievement)\n\n teachers = os.getenv('BETA_TEACHERS', '').split(',')\n is_beta_teacher = user['username'] in teachers\n\n return render_template ('class-overview.html', current_page='for-teachers',\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"ready_set_education\")\n if achievement:\n return {'id': Class['id'], 'achievement': achievement}, 200\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"on_second_thoughts\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"end_of_semester\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile',\n class_info={\n 'id': Class ['id'],\n 'name': Class ['name'],\n })\n\n @app.route('/class/join', methods=['POST'])\n @requires_login\n def join_class(user):\n body = request.json\n if 'id' in body:\n Class = DATABASE.get_class(body['id'])\n if not Class or Class ['id'] != body['id']:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n\n DATABASE.add_student_to_class(Class['id'], user['username'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"epic_education\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n if Class['teacher'] == user['username']:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"detention\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='for-teachers')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n DATABASE.update_customizations_class(customizations)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"my_class_my_rules\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
3,657
149
gh_patches_debug_9707
rasdani/github-patches
git_diff
zulip__zulip-1112
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Commit c74a74d causes docker dev image build to fail. Hi, I had 1.3.13 successfully install in a docker dev environment, which fully built and installed. I decided to try the latest source in master today and this error occurred: ``` running install running build running build_py creating build creating build/lib.linux-x86_64-3.4 creating build/lib.linux-x86_64-3.4/typed_ast copying typed_ast/ast27.py -> build/lib.linux-x86_64-3.4/typed_ast copying typed_ast/ast35.py -> build/lib.linux-x86_64-3.4/typed_ast copying typed_ast/conversions.py -> build/lib.linux-x86_64-3.4/typed_ast copying typed_ast/__init__.py -> build/lib.linux-x86_64-3.4/typed_ast running build_ext building '_ast27' extension creating build/temp.linux-x86_64-3.4 creating build/temp.linux-x86_64-3.4/ast27 creating build/temp.linux-x86_64-3.4/ast27/Parser creating build/temp.linux-x86_64-3.4/ast27/Python creating build/temp.linux-x86_64-3.4/ast27/Custom x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2 -fPIC -Iast27/Include -I/usr/include/python3.4m -I/srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/include/python3.4m -c ast27/Parser/acceler.c -o build/temp.linux-x86_64-3.4/ast27/Parser/acceler.o unable to execute 'x86_64-linux-gnu-gcc': No such file or directory error: command 'x86_64-linux-gnu-gcc' failed with exit status 1 ---------------------------------------- Command "/srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-u0_6ev0b/typed-ast/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-eq6ql577-record/install-record.txt --single-version-externally-managed --compile --install-headers /srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/include/site/python3.4/typed-ast" failed with error code 1 in /tmp/pip-build-u0_6ev0b/typed-ast/ Traceback (most recent call last): File "/srv/zulip/provision.py", line 202, in <module> sys.exit(main()) File "/srv/zulip/provision.py", line 153, in main virtualenv_args=['-p', 'python3']) File "/srv/zulip/scripts/lib/setup_venv.py", line 44, in setup_virtualenv do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or []) File "/srv/zulip/scripts/lib/setup_venv.py", line 68, in do_setup_virtualenv run(["pip", "install", "--no-deps", "--requirement", requirements_file]) File "/srv/zulip/zulip_tools.py", line 78, in run raise subprocess.CalledProcessError(rc, args) subprocess.CalledProcessError: Command '['pip', 'install', '--no-deps', '--requirement', '/srv/zulip/requirements/mypy.txt']' returned non-zero exit status 1 ``` At the time of this writing commit https://github.com/zulip/zulip/commit/8cecb3774321adf638b93781a175e74e68a6564b is the latest version I used, so the error must have been between this commit and last commit for release 1.3.13. When I reverted this commit https://github.com/zulip/zulip/commit/c74a74dc7427549eba4ececcf91c483ca2876796 everything compiled as normal. There must be a dependency that is pulled in that makes it work. Hope this helps. Jon --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/setup_venv.py` Content: ``` 1 from __future__ import print_function 2 3 import os 4 import sys 5 from os.path import dirname, abspath 6 import subprocess 7 from zulip_tools import run 8 9 ZULIP_PATH = dirname(dirname(dirname(abspath(__file__)))) 10 VENV_CACHE_PATH = "/srv/zulip-venv-cache" 11 12 if '--travis' in sys.argv: 13 # In Travis CI, we don't have root access 14 VENV_CACHE_PATH = os.path.join(os.environ['HOME'], "zulip-venv-cache") 15 16 if False: 17 # Don't add a runtime dependency on typing 18 from typing import List, Optional 19 20 VENV_DEPENDENCIES = [ 21 "libffi-dev", 22 "libfreetype6-dev", 23 "libldap2-dev", 24 "libmemcached-dev", 25 "postgresql-server-dev-all", 26 "python3-dev", # Needed to install typed-ast dependency of mypy 27 "python-dev", 28 "python-virtualenv", 29 ] 30 31 def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None): 32 # type: (Optional[str], str, Optional[List[str]]) -> str 33 34 # Check if a cached version already exists 35 path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py') 36 output = subprocess.check_output([path, requirements_file]) 37 sha1sum = output.split()[0] 38 if target_venv_path is None: 39 cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv') 40 else: 41 cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path)) 42 success_stamp = os.path.join(cached_venv_path, "success-stamp") 43 if not os.path.exists(success_stamp): 44 do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or []) 45 run(["touch", success_stamp]) 46 47 print("Using cached Python venv from %s" % (cached_venv_path,)) 48 if target_venv_path is not None: 49 run(["sudo", "ln", "-nsf", cached_venv_path, target_venv_path]) 50 activate_this = os.path.join(cached_venv_path, "bin", "activate_this.py") 51 exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577 52 return cached_venv_path 53 54 def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args): 55 # type: (str, str, List[str]) -> None 56 57 # Setup Python virtualenv 58 run(["sudo", "rm", "-rf", venv_path]) 59 run(["sudo", "mkdir", "-p", venv_path]) 60 run(["sudo", "chown", "{}:{}".format(os.getuid(), os.getgid()), venv_path]) 61 run(["virtualenv"] + virtualenv_args + [venv_path]) 62 63 # Switch current Python context to the virtualenv. 64 activate_this = os.path.join(venv_path, "bin", "activate_this.py") 65 exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577 66 67 run(["pip", "install", "--upgrade", "pip"]) 68 run(["pip", "install", "--no-deps", "--requirement", requirements_file]) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/setup_venv.py b/scripts/lib/setup_venv.py --- a/scripts/lib/setup_venv.py +++ b/scripts/lib/setup_venv.py @@ -18,6 +18,7 @@ from typing import List, Optional VENV_DEPENDENCIES = [ + "build-essential", "libffi-dev", "libfreetype6-dev", "libldap2-dev", @@ -25,6 +26,7 @@ "postgresql-server-dev-all", "python3-dev", # Needed to install typed-ast dependency of mypy "python-dev", + "python-pip", "python-virtualenv", ]
{"golden_diff": "diff --git a/scripts/lib/setup_venv.py b/scripts/lib/setup_venv.py\n--- a/scripts/lib/setup_venv.py\n+++ b/scripts/lib/setup_venv.py\n@@ -18,6 +18,7 @@\n from typing import List, Optional\n \n VENV_DEPENDENCIES = [\n+ \"build-essential\",\n \"libffi-dev\",\n \"libfreetype6-dev\",\n \"libldap2-dev\",\n@@ -25,6 +26,7 @@\n \"postgresql-server-dev-all\",\n \"python3-dev\", # Needed to install typed-ast dependency of mypy\n \"python-dev\",\n+ \"python-pip\",\n \"python-virtualenv\",\n ]\n", "issue": "Commit c74a74d causes docker dev image build to fail.\nHi,\n\nI had 1.3.13 successfully install in a docker dev environment, which fully built and installed. I decided to try the latest source in master today and this error occurred:\n\n```\n running install\n running build\n running build_py\n creating build\n creating build/lib.linux-x86_64-3.4\n creating build/lib.linux-x86_64-3.4/typed_ast\n copying typed_ast/ast27.py -> build/lib.linux-x86_64-3.4/typed_ast\n copying typed_ast/ast35.py -> build/lib.linux-x86_64-3.4/typed_ast\n copying typed_ast/conversions.py -> build/lib.linux-x86_64-3.4/typed_ast\n copying typed_ast/__init__.py -> build/lib.linux-x86_64-3.4/typed_ast\n running build_ext\n building '_ast27' extension\n creating build/temp.linux-x86_64-3.4\n creating build/temp.linux-x86_64-3.4/ast27\n creating build/temp.linux-x86_64-3.4/ast27/Parser\n creating build/temp.linux-x86_64-3.4/ast27/Python\n creating build/temp.linux-x86_64-3.4/ast27/Custom\n x86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2 -fPIC -Iast27/Include -I/usr/include/python3.4m -I/srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/include/python3.4m -c ast27/Parser/acceler.c -o build/temp.linux-x86_64-3.4/ast27/Parser/acceler.o\n unable to execute 'x86_64-linux-gnu-gcc': No such file or directory\n error: command 'x86_64-linux-gnu-gcc' failed with exit status 1\n\n ----------------------------------------\nCommand \"/srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/bin/python3 -u -c \"import setuptools, tokenize;__file__='/tmp/pip-build-u0_6ev0b/typed-ast/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))\" install --record /tmp/pip-eq6ql577-record/install-record.txt --single-version-externally-managed --compile --install-headers /srv/zulip-venv-cache/2d0d4429f07d3367f31c904bcf110e00407cafa8/zulip-py3-venv/include/site/python3.4/typed-ast\" failed with error code 1 in /tmp/pip-build-u0_6ev0b/typed-ast/ \nTraceback (most recent call last):\n File \"/srv/zulip/provision.py\", line 202, in <module>\n sys.exit(main())\n File \"/srv/zulip/provision.py\", line 153, in main\n virtualenv_args=['-p', 'python3'])\n File \"/srv/zulip/scripts/lib/setup_venv.py\", line 44, in setup_virtualenv\n do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])\n File \"/srv/zulip/scripts/lib/setup_venv.py\", line 68, in do_setup_virtualenv\n run([\"pip\", \"install\", \"--no-deps\", \"--requirement\", requirements_file])\n File \"/srv/zulip/zulip_tools.py\", line 78, in run\n raise subprocess.CalledProcessError(rc, args)\nsubprocess.CalledProcessError: Command '['pip', 'install', '--no-deps', '--requirement', '/srv/zulip/requirements/mypy.txt']' returned non-zero exit status 1\n```\n\nAt the time of this writing commit https://github.com/zulip/zulip/commit/8cecb3774321adf638b93781a175e74e68a6564b is the latest version I used, so the error must have been between this commit and last commit for release 1.3.13.\n\nWhen I reverted this commit https://github.com/zulip/zulip/commit/c74a74dc7427549eba4ececcf91c483ca2876796 everything compiled as normal. There must be a dependency that is pulled in that makes it work.\n\nHope this helps.\n\nJon\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport os\nimport sys\nfrom os.path import dirname, abspath\nimport subprocess\nfrom zulip_tools import run\n\nZULIP_PATH = dirname(dirname(dirname(abspath(__file__))))\nVENV_CACHE_PATH = \"/srv/zulip-venv-cache\"\n\nif '--travis' in sys.argv:\n # In Travis CI, we don't have root access\n VENV_CACHE_PATH = os.path.join(os.environ['HOME'], \"zulip-venv-cache\")\n\nif False:\n # Don't add a runtime dependency on typing\n from typing import List, Optional\n\nVENV_DEPENDENCIES = [\n \"libffi-dev\",\n \"libfreetype6-dev\",\n \"libldap2-dev\",\n \"libmemcached-dev\",\n \"postgresql-server-dev-all\",\n \"python3-dev\", # Needed to install typed-ast dependency of mypy\n \"python-dev\",\n \"python-virtualenv\",\n]\n\ndef setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None):\n # type: (Optional[str], str, Optional[List[str]]) -> str\n\n # Check if a cached version already exists\n path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')\n output = subprocess.check_output([path, requirements_file])\n sha1sum = output.split()[0]\n if target_venv_path is None:\n cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')\n else:\n cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))\n success_stamp = os.path.join(cached_venv_path, \"success-stamp\")\n if not os.path.exists(success_stamp):\n do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])\n run([\"touch\", success_stamp])\n\n print(\"Using cached Python venv from %s\" % (cached_venv_path,))\n if target_venv_path is not None:\n run([\"sudo\", \"ln\", \"-nsf\", cached_venv_path, target_venv_path])\n activate_this = os.path.join(cached_venv_path, \"bin\", \"activate_this.py\")\n exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577\n return cached_venv_path\n\ndef do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):\n # type: (str, str, List[str]) -> None\n\n # Setup Python virtualenv\n run([\"sudo\", \"rm\", \"-rf\", venv_path])\n run([\"sudo\", \"mkdir\", \"-p\", venv_path])\n run([\"sudo\", \"chown\", \"{}:{}\".format(os.getuid(), os.getgid()), venv_path])\n run([\"virtualenv\"] + virtualenv_args + [venv_path])\n\n # Switch current Python context to the virtualenv.\n activate_this = os.path.join(venv_path, \"bin\", \"activate_this.py\")\n exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577\n\n run([\"pip\", \"install\", \"--upgrade\", \"pip\"])\n run([\"pip\", \"install\", \"--no-deps\", \"--requirement\", requirements_file])\n", "path": "scripts/lib/setup_venv.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport os\nimport sys\nfrom os.path import dirname, abspath\nimport subprocess\nfrom zulip_tools import run\n\nZULIP_PATH = dirname(dirname(dirname(abspath(__file__))))\nVENV_CACHE_PATH = \"/srv/zulip-venv-cache\"\n\nif '--travis' in sys.argv:\n # In Travis CI, we don't have root access\n VENV_CACHE_PATH = os.path.join(os.environ['HOME'], \"zulip-venv-cache\")\n\nif False:\n # Don't add a runtime dependency on typing\n from typing import List, Optional\n\nVENV_DEPENDENCIES = [\n \"build-essential\",\n \"libffi-dev\",\n \"libfreetype6-dev\",\n \"libldap2-dev\",\n \"libmemcached-dev\",\n \"postgresql-server-dev-all\",\n \"python3-dev\", # Needed to install typed-ast dependency of mypy\n \"python-dev\",\n \"python-pip\",\n \"python-virtualenv\",\n]\n\ndef setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None):\n # type: (Optional[str], str, Optional[List[str]]) -> str\n\n # Check if a cached version already exists\n path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')\n output = subprocess.check_output([path, requirements_file])\n sha1sum = output.split()[0]\n if target_venv_path is None:\n cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')\n else:\n cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))\n success_stamp = os.path.join(cached_venv_path, \"success-stamp\")\n if not os.path.exists(success_stamp):\n do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])\n run([\"touch\", success_stamp])\n\n print(\"Using cached Python venv from %s\" % (cached_venv_path,))\n if target_venv_path is not None:\n run([\"sudo\", \"ln\", \"-nsf\", cached_venv_path, target_venv_path])\n activate_this = os.path.join(cached_venv_path, \"bin\", \"activate_this.py\")\n exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577\n return cached_venv_path\n\ndef do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):\n # type: (str, str, List[str]) -> None\n\n # Setup Python virtualenv\n run([\"sudo\", \"rm\", \"-rf\", venv_path])\n run([\"sudo\", \"mkdir\", \"-p\", venv_path])\n run([\"sudo\", \"chown\", \"{}:{}\".format(os.getuid(), os.getgid()), venv_path])\n run([\"virtualenv\"] + virtualenv_args + [venv_path])\n\n # Switch current Python context to the virtualenv.\n activate_this = os.path.join(venv_path, \"bin\", \"activate_this.py\")\n exec(open(activate_this).read(), {}, dict(__file__=activate_this)) # type: ignore # https://github.com/python/mypy/issues/1577\n\n run([\"pip\", \"install\", \"--upgrade\", \"pip\"])\n run([\"pip\", \"install\", \"--no-deps\", \"--requirement\", requirements_file])\n", "path": "scripts/lib/setup_venv.py"}]}
2,369
153
gh_patches_debug_6577
rasdani/github-patches
git_diff
rlworkgroup__garage-1927
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- On policy algos stop learning midway Avg return either drops dramatically or the run stops completely due to NaN errors. Could affect off policy as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/garage/envs/normalized_env.py` Content: ``` 1 """An environment wrapper that normalizes action, observation and reward.""" 2 import akro 3 import numpy as np 4 5 from garage import EnvStep, Wrapper 6 7 8 class NormalizedEnv(Wrapper): 9 """An environment wrapper for normalization. 10 11 This wrapper normalizes action, and optionally observation and reward. 12 13 Args: 14 env (Environment): An environment instance. 15 scale_reward (float): Scale of environment reward. 16 normalize_obs (bool): If True, normalize observation. 17 normalize_reward (bool): If True, normalize reward. scale_reward is 18 applied after normalization. 19 expected_action_scale (float): Assuming action falls in the range of 20 [-expected_action_scale, expected_action_scale] when normalize it. 21 flatten_obs (bool): Flatten observation if True. 22 obs_alpha (float): Update rate of moving average when estimating the 23 mean and variance of observations. 24 reward_alpha (float): Update rate of moving average when estimating the 25 mean and variance of rewards. 26 27 """ 28 29 def __init__( 30 self, 31 env, 32 scale_reward=1., 33 normalize_obs=False, 34 normalize_reward=False, 35 expected_action_scale=1., 36 flatten_obs=True, 37 obs_alpha=0.001, 38 reward_alpha=0.001, 39 ): 40 super().__init__(env) 41 42 self._scale_reward = scale_reward 43 self._normalize_obs = normalize_obs 44 self._normalize_reward = normalize_reward 45 self._expected_action_scale = expected_action_scale 46 self._flatten_obs = flatten_obs 47 48 self._obs_alpha = obs_alpha 49 flat_obs_dim = self._env.observation_space.flat_dim 50 self._obs_mean = np.zeros(flat_obs_dim) 51 self._obs_var = np.ones(flat_obs_dim) 52 53 self._reward_alpha = reward_alpha 54 self._reward_mean = 0. 55 self._reward_var = 1. 56 57 def reset(self): 58 """Call reset on wrapped env. 59 60 Returns: 61 numpy.ndarray: The first observation conforming to 62 `observation_space`. 63 dict: The episode-level information. 64 Note that this is not part of `env_info` provided in `step()`. 65 It contains information of he entire episode, which could be 66 needed to determine the first action (e.g. in the case of 67 goal-conditioned or MTRL.) 68 69 """ 70 first_obs, episode_info = self._env.reset() 71 if self._normalize_obs: 72 return self._apply_normalize_obs(first_obs), episode_info 73 else: 74 return first_obs, episode_info 75 76 def step(self, action): 77 """Call step on wrapped env. 78 79 Args: 80 action (np.ndarray): An action provided by the agent. 81 82 Returns: 83 EnvStep: The environment step resulting from the action. 84 85 Raises: 86 RuntimeError: if `step()` is called after the environment has been 87 constructed and `reset()` has not been called. 88 89 """ 90 if isinstance(self.action_space, akro.Box): 91 # rescale the action when the bounds are not inf 92 lb, ub = self.action_space.low, self.action_space.high 93 if np.all(lb != -np.inf) and np.all(ub != -np.inf): 94 scaled_action = lb + (action + self._expected_action_scale) * ( 95 0.5 * (ub - lb) / self._expected_action_scale) 96 scaled_action = np.clip(scaled_action, lb, ub) 97 else: 98 scaled_action = action 99 else: 100 scaled_action = action 101 102 es = self._env.step(scaled_action) 103 next_obs = es.observation 104 reward = es.reward 105 106 if self._normalize_obs: 107 next_obs = self._apply_normalize_obs(next_obs) 108 if self._normalize_reward: 109 reward = self._apply_normalize_reward(reward) 110 111 return EnvStep(env_spec=es.env_spec, 112 action=es.action, 113 reward=reward * self._scale_reward, 114 observation=next_obs, 115 env_info=es.env_info, 116 step_type=es.step_type) 117 118 def _update_obs_estimate(self, obs): 119 flat_obs = self._env.observation_space.flatten(obs) 120 self._obs_mean = ( 121 1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs 122 self._obs_var = ( 123 1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square( 124 flat_obs - self._obs_mean) 125 126 def _update_reward_estimate(self, reward): 127 self._reward_mean = (1 - self._reward_alpha) * \ 128 self._reward_mean + self._reward_alpha * reward 129 self._reward_var = ( 130 1 - self._reward_alpha 131 ) * self._reward_var + self._reward_alpha * np.square( 132 reward - self._reward_mean) 133 134 def _apply_normalize_obs(self, obs): 135 """Compute normalized observation. 136 137 Args: 138 obs (np.ndarray): Observation. 139 140 Returns: 141 np.ndarray: Normalized observation. 142 143 """ 144 self._update_obs_estimate(obs) 145 flat_obs = self._env.observation_space.flatten(obs) 146 normalized_obs = (flat_obs - 147 self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8) 148 if not self._flatten_obs: 149 normalized_obs = self._env.observation_space.unflatten( 150 self._env.observation_space, normalized_obs) 151 return normalized_obs 152 153 def _apply_normalize_reward(self, reward): 154 """Compute normalized reward. 155 156 Args: 157 reward (float): Reward. 158 159 Returns: 160 float: Normalized reward. 161 162 """ 163 self._update_reward_estimate(reward) 164 return reward / (np.sqrt(self._reward_var) + 1e-8) 165 166 167 normalize = NormalizedEnv 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/garage/envs/normalized_env.py b/src/garage/envs/normalized_env.py --- a/src/garage/envs/normalized_env.py +++ b/src/garage/envs/normalized_env.py @@ -109,7 +109,7 @@ reward = self._apply_normalize_reward(reward) return EnvStep(env_spec=es.env_spec, - action=es.action, + action=action, reward=reward * self._scale_reward, observation=next_obs, env_info=es.env_info,
{"golden_diff": "diff --git a/src/garage/envs/normalized_env.py b/src/garage/envs/normalized_env.py\n--- a/src/garage/envs/normalized_env.py\n+++ b/src/garage/envs/normalized_env.py\n@@ -109,7 +109,7 @@\n reward = self._apply_normalize_reward(reward)\n \n return EnvStep(env_spec=es.env_spec,\n- action=es.action,\n+ action=action,\n reward=reward * self._scale_reward,\n observation=next_obs,\n env_info=es.env_info,\n", "issue": "On policy algos stop learning midway\nAvg return either drops dramatically or the run stops completely due to NaN errors. Could affect off policy as well.\n", "before_files": [{"content": "\"\"\"An environment wrapper that normalizes action, observation and reward.\"\"\"\nimport akro\nimport numpy as np\n\nfrom garage import EnvStep, Wrapper\n\n\nclass NormalizedEnv(Wrapper):\n \"\"\"An environment wrapper for normalization.\n\n This wrapper normalizes action, and optionally observation and reward.\n\n Args:\n env (Environment): An environment instance.\n scale_reward (float): Scale of environment reward.\n normalize_obs (bool): If True, normalize observation.\n normalize_reward (bool): If True, normalize reward. scale_reward is\n applied after normalization.\n expected_action_scale (float): Assuming action falls in the range of\n [-expected_action_scale, expected_action_scale] when normalize it.\n flatten_obs (bool): Flatten observation if True.\n obs_alpha (float): Update rate of moving average when estimating the\n mean and variance of observations.\n reward_alpha (float): Update rate of moving average when estimating the\n mean and variance of rewards.\n\n \"\"\"\n\n def __init__(\n self,\n env,\n scale_reward=1.,\n normalize_obs=False,\n normalize_reward=False,\n expected_action_scale=1.,\n flatten_obs=True,\n obs_alpha=0.001,\n reward_alpha=0.001,\n ):\n super().__init__(env)\n\n self._scale_reward = scale_reward\n self._normalize_obs = normalize_obs\n self._normalize_reward = normalize_reward\n self._expected_action_scale = expected_action_scale\n self._flatten_obs = flatten_obs\n\n self._obs_alpha = obs_alpha\n flat_obs_dim = self._env.observation_space.flat_dim\n self._obs_mean = np.zeros(flat_obs_dim)\n self._obs_var = np.ones(flat_obs_dim)\n\n self._reward_alpha = reward_alpha\n self._reward_mean = 0.\n self._reward_var = 1.\n\n def reset(self):\n \"\"\"Call reset on wrapped env.\n\n Returns:\n numpy.ndarray: The first observation conforming to\n `observation_space`.\n dict: The episode-level information.\n Note that this is not part of `env_info` provided in `step()`.\n It contains information of he entire episode\uff0c which could be\n needed to determine the first action (e.g. in the case of\n goal-conditioned or MTRL.)\n\n \"\"\"\n first_obs, episode_info = self._env.reset()\n if self._normalize_obs:\n return self._apply_normalize_obs(first_obs), episode_info\n else:\n return first_obs, episode_info\n\n def step(self, action):\n \"\"\"Call step on wrapped env.\n\n Args:\n action (np.ndarray): An action provided by the agent.\n\n Returns:\n EnvStep: The environment step resulting from the action.\n\n Raises:\n RuntimeError: if `step()` is called after the environment has been\n constructed and `reset()` has not been called.\n\n \"\"\"\n if isinstance(self.action_space, akro.Box):\n # rescale the action when the bounds are not inf\n lb, ub = self.action_space.low, self.action_space.high\n if np.all(lb != -np.inf) and np.all(ub != -np.inf):\n scaled_action = lb + (action + self._expected_action_scale) * (\n 0.5 * (ub - lb) / self._expected_action_scale)\n scaled_action = np.clip(scaled_action, lb, ub)\n else:\n scaled_action = action\n else:\n scaled_action = action\n\n es = self._env.step(scaled_action)\n next_obs = es.observation\n reward = es.reward\n\n if self._normalize_obs:\n next_obs = self._apply_normalize_obs(next_obs)\n if self._normalize_reward:\n reward = self._apply_normalize_reward(reward)\n\n return EnvStep(env_spec=es.env_spec,\n action=es.action,\n reward=reward * self._scale_reward,\n observation=next_obs,\n env_info=es.env_info,\n step_type=es.step_type)\n\n def _update_obs_estimate(self, obs):\n flat_obs = self._env.observation_space.flatten(obs)\n self._obs_mean = (\n 1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs\n self._obs_var = (\n 1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(\n flat_obs - self._obs_mean)\n\n def _update_reward_estimate(self, reward):\n self._reward_mean = (1 - self._reward_alpha) * \\\n self._reward_mean + self._reward_alpha * reward\n self._reward_var = (\n 1 - self._reward_alpha\n ) * self._reward_var + self._reward_alpha * np.square(\n reward - self._reward_mean)\n\n def _apply_normalize_obs(self, obs):\n \"\"\"Compute normalized observation.\n\n Args:\n obs (np.ndarray): Observation.\n\n Returns:\n np.ndarray: Normalized observation.\n\n \"\"\"\n self._update_obs_estimate(obs)\n flat_obs = self._env.observation_space.flatten(obs)\n normalized_obs = (flat_obs -\n self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)\n if not self._flatten_obs:\n normalized_obs = self._env.observation_space.unflatten(\n self._env.observation_space, normalized_obs)\n return normalized_obs\n\n def _apply_normalize_reward(self, reward):\n \"\"\"Compute normalized reward.\n\n Args:\n reward (float): Reward.\n\n Returns:\n float: Normalized reward.\n\n \"\"\"\n self._update_reward_estimate(reward)\n return reward / (np.sqrt(self._reward_var) + 1e-8)\n\n\nnormalize = NormalizedEnv\n", "path": "src/garage/envs/normalized_env.py"}], "after_files": [{"content": "\"\"\"An environment wrapper that normalizes action, observation and reward.\"\"\"\nimport akro\nimport numpy as np\n\nfrom garage import EnvStep, Wrapper\n\n\nclass NormalizedEnv(Wrapper):\n \"\"\"An environment wrapper for normalization.\n\n This wrapper normalizes action, and optionally observation and reward.\n\n Args:\n env (Environment): An environment instance.\n scale_reward (float): Scale of environment reward.\n normalize_obs (bool): If True, normalize observation.\n normalize_reward (bool): If True, normalize reward. scale_reward is\n applied after normalization.\n expected_action_scale (float): Assuming action falls in the range of\n [-expected_action_scale, expected_action_scale] when normalize it.\n flatten_obs (bool): Flatten observation if True.\n obs_alpha (float): Update rate of moving average when estimating the\n mean and variance of observations.\n reward_alpha (float): Update rate of moving average when estimating the\n mean and variance of rewards.\n\n \"\"\"\n\n def __init__(\n self,\n env,\n scale_reward=1.,\n normalize_obs=False,\n normalize_reward=False,\n expected_action_scale=1.,\n flatten_obs=True,\n obs_alpha=0.001,\n reward_alpha=0.001,\n ):\n super().__init__(env)\n\n self._scale_reward = scale_reward\n self._normalize_obs = normalize_obs\n self._normalize_reward = normalize_reward\n self._expected_action_scale = expected_action_scale\n self._flatten_obs = flatten_obs\n\n self._obs_alpha = obs_alpha\n flat_obs_dim = self._env.observation_space.flat_dim\n self._obs_mean = np.zeros(flat_obs_dim)\n self._obs_var = np.ones(flat_obs_dim)\n\n self._reward_alpha = reward_alpha\n self._reward_mean = 0.\n self._reward_var = 1.\n\n def reset(self):\n \"\"\"Call reset on wrapped env.\n\n Returns:\n numpy.ndarray: The first observation conforming to\n `observation_space`.\n dict: The episode-level information.\n Note that this is not part of `env_info` provided in `step()`.\n It contains information of he entire episode\uff0c which could be\n needed to determine the first action (e.g. in the case of\n goal-conditioned or MTRL.)\n\n \"\"\"\n first_obs, episode_info = self._env.reset()\n if self._normalize_obs:\n return self._apply_normalize_obs(first_obs), episode_info\n else:\n return first_obs, episode_info\n\n def step(self, action):\n \"\"\"Call step on wrapped env.\n\n Args:\n action (np.ndarray): An action provided by the agent.\n\n Returns:\n EnvStep: The environment step resulting from the action.\n\n Raises:\n RuntimeError: if `step()` is called after the environment has been\n constructed and `reset()` has not been called.\n\n \"\"\"\n if isinstance(self.action_space, akro.Box):\n # rescale the action when the bounds are not inf\n lb, ub = self.action_space.low, self.action_space.high\n if np.all(lb != -np.inf) and np.all(ub != -np.inf):\n scaled_action = lb + (action + self._expected_action_scale) * (\n 0.5 * (ub - lb) / self._expected_action_scale)\n scaled_action = np.clip(scaled_action, lb, ub)\n else:\n scaled_action = action\n else:\n scaled_action = action\n\n es = self._env.step(scaled_action)\n next_obs = es.observation\n reward = es.reward\n\n if self._normalize_obs:\n next_obs = self._apply_normalize_obs(next_obs)\n if self._normalize_reward:\n reward = self._apply_normalize_reward(reward)\n\n return EnvStep(env_spec=es.env_spec,\n action=action,\n reward=reward * self._scale_reward,\n observation=next_obs,\n env_info=es.env_info,\n step_type=es.step_type)\n\n def _update_obs_estimate(self, obs):\n flat_obs = self._env.observation_space.flatten(obs)\n self._obs_mean = (\n 1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs\n self._obs_var = (\n 1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(\n flat_obs - self._obs_mean)\n\n def _update_reward_estimate(self, reward):\n self._reward_mean = (1 - self._reward_alpha) * \\\n self._reward_mean + self._reward_alpha * reward\n self._reward_var = (\n 1 - self._reward_alpha\n ) * self._reward_var + self._reward_alpha * np.square(\n reward - self._reward_mean)\n\n def _apply_normalize_obs(self, obs):\n \"\"\"Compute normalized observation.\n\n Args:\n obs (np.ndarray): Observation.\n\n Returns:\n np.ndarray: Normalized observation.\n\n \"\"\"\n self._update_obs_estimate(obs)\n flat_obs = self._env.observation_space.flatten(obs)\n normalized_obs = (flat_obs -\n self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)\n if not self._flatten_obs:\n normalized_obs = self._env.observation_space.unflatten(\n self._env.observation_space, normalized_obs)\n return normalized_obs\n\n def _apply_normalize_reward(self, reward):\n \"\"\"Compute normalized reward.\n\n Args:\n reward (float): Reward.\n\n Returns:\n float: Normalized reward.\n\n \"\"\"\n self._update_reward_estimate(reward)\n return reward / (np.sqrt(self._reward_var) + 1e-8)\n\n\nnormalize = NormalizedEnv\n", "path": "src/garage/envs/normalized_env.py"}]}
1,940
126
gh_patches_debug_20997
rasdani/github-patches
git_diff
microsoft__presidio-259
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- crypto_recognizer throws an exception When calling the engine analyze API like ``` response = engine.analyze(correlation_id=0, text=text_to_analyze, language='en', entities=[], all_fields=True, score_threshold=0.5) ``` and the value of 'text_to_analyze' is "/boardingPass/v1/devices/34e7b5e1a0aa1d6f3d862b52a289cdb7/registrations/pass.apoc.wallet/" The exception below is thrown ` File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/analyzer_engine.py", line 204, in analyze current_results = recognizer.analyze(text, entities, nlp_artifacts) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py", line 61, in analyze pattern_result = self.__analyze_patterns(text) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py", line 144, in __analyze_patterns validation_result = self.validate_result(current_match) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py", line 23, in validate_result bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) File "/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py", line 33, in __decode_base58 n = n * 58 + digits58.index(char)` ValueError: substring not found --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py` Content: ``` 1 from hashlib import sha256 2 from analyzer import Pattern 3 from analyzer import PatternRecognizer 4 5 # Copied from: 6 # http://rosettacode.org/wiki/Bitcoin/address_validation#Python 7 REGEX = r'\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\b' 8 CONTEXT = ["wallet", "btc", "bitcoin", "crypto"] 9 10 11 class CryptoRecognizer(PatternRecognizer): 12 """ 13 Recognizes common crypto account numbers using regex + checksum 14 """ 15 16 def __init__(self): 17 patterns = [Pattern('Crypto (Medium)', REGEX, 0.5)] 18 super().__init__(supported_entity="CRYPTO", patterns=patterns, 19 context=CONTEXT) 20 21 def validate_result(self, pattern_text): 22 # try: 23 bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) 24 result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) 25 .digest()).digest()[:4] 26 return result 27 28 @staticmethod 29 def __decode_base58(bc, length): 30 digits58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' 31 n = 0 32 for char in bc: 33 n = n * 58 + digits58.index(char) 34 return n.to_bytes(length, 'big') 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py --- a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py +++ b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py @@ -4,7 +4,7 @@ # Copied from: # http://rosettacode.org/wiki/Bitcoin/address_validation#Python -REGEX = r'\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\b' +REGEX = r'\b[13][a-km-zA-HJ-NP-Z1-9]{26,33}\b' CONTEXT = ["wallet", "btc", "bitcoin", "crypto"] @@ -19,11 +19,12 @@ context=CONTEXT) def validate_result(self, pattern_text): - # try: - bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) - result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) - .digest()).digest()[:4] - return result + try: + bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25) + return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]) + .digest()).digest()[:4] + except ValueError: + return False @staticmethod def __decode_base58(bc, length):
{"golden_diff": "diff --git a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n--- a/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n+++ b/presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py\n@@ -4,7 +4,7 @@\n \n # Copied from:\n # http://rosettacode.org/wiki/Bitcoin/address_validation#Python\n-REGEX = r'\\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\\b'\n+REGEX = r'\\b[13][a-km-zA-HJ-NP-Z1-9]{26,33}\\b'\n CONTEXT = [\"wallet\", \"btc\", \"bitcoin\", \"crypto\"]\n \n \n@@ -19,11 +19,12 @@\n context=CONTEXT)\n \n def validate_result(self, pattern_text):\n- # try:\n- bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n- result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n- .digest()).digest()[:4]\n- return result\n+ try:\n+ bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n+ return bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n+ .digest()).digest()[:4]\n+ except ValueError:\n+ return False\n \n @staticmethod\n def __decode_base58(bc, length):\n", "issue": "crypto_recognizer throws an exception\n\r\nWhen calling the engine analyze API like\r\n\r\n```\r\n response = engine.analyze(correlation_id=0,\r\n text=text_to_analyze,\r\n language='en',\r\n entities=[],\r\n all_fields=True,\r\n score_threshold=0.5)\r\n```\r\n\r\nand the value of 'text_to_analyze' is \r\n\r\n\"/boardingPass/v1/devices/34e7b5e1a0aa1d6f3d862b52a289cdb7/registrations/pass.apoc.wallet/\"\r\n\r\nThe exception below is thrown\r\n\r\n\r\n` File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/analyzer_engine.py\", line 204, in analyze\r\n current_results = recognizer.analyze(text, entities, nlp_artifacts)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py\", line 61, in analyze\r\n pattern_result = self.__analyze_patterns(text)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/pattern_recognizer.py\", line 144, in __analyze_patterns\r\n validation_result = self.validate_result(current_match)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py\", line 23, in validate_result\r\n bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\r\n File \"/home/folder_name/presidio_testing/my_venv/lib/python3.6/site-packages/analyzer/predefined_recognizers/crypto_recognizer.py\", line 33, in __decode_base58\r\n n = n * 58 + digits58.index(char)`\r\n\r\nValueError: substring not found\n", "before_files": [{"content": "from hashlib import sha256\nfrom analyzer import Pattern\nfrom analyzer import PatternRecognizer\n\n# Copied from:\n# http://rosettacode.org/wiki/Bitcoin/address_validation#Python\nREGEX = r'\\b[13][a-km-zA-HJ-NP-Z0-9]{26,33}\\b'\nCONTEXT = [\"wallet\", \"btc\", \"bitcoin\", \"crypto\"]\n\n\nclass CryptoRecognizer(PatternRecognizer):\n \"\"\"\n Recognizes common crypto account numbers using regex + checksum\n \"\"\"\n\n def __init__(self):\n patterns = [Pattern('Crypto (Medium)', REGEX, 0.5)]\n super().__init__(supported_entity=\"CRYPTO\", patterns=patterns,\n context=CONTEXT)\n\n def validate_result(self, pattern_text):\n # try:\n bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n result = bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n .digest()).digest()[:4]\n return result\n\n @staticmethod\n def __decode_base58(bc, length):\n digits58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for char in bc:\n n = n * 58 + digits58.index(char)\n return n.to_bytes(length, 'big')\n", "path": "presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py"}], "after_files": [{"content": "from hashlib import sha256\nfrom analyzer import Pattern\nfrom analyzer import PatternRecognizer\n\n# Copied from:\n# http://rosettacode.org/wiki/Bitcoin/address_validation#Python\nREGEX = r'\\b[13][a-km-zA-HJ-NP-Z1-9]{26,33}\\b'\nCONTEXT = [\"wallet\", \"btc\", \"bitcoin\", \"crypto\"]\n\n\nclass CryptoRecognizer(PatternRecognizer):\n \"\"\"\n Recognizes common crypto account numbers using regex + checksum\n \"\"\"\n\n def __init__(self):\n patterns = [Pattern('Crypto (Medium)', REGEX, 0.5)]\n super().__init__(supported_entity=\"CRYPTO\", patterns=patterns,\n context=CONTEXT)\n\n def validate_result(self, pattern_text):\n try:\n bcbytes = CryptoRecognizer.__decode_base58(pattern_text, 25)\n return bcbytes[-4:] == sha256(sha256(bcbytes[:-4])\n .digest()).digest()[:4]\n except ValueError:\n return False\n\n @staticmethod\n def __decode_base58(bc, length):\n digits58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for char in bc:\n n = n * 58 + digits58.index(char)\n return n.to_bytes(length, 'big')\n", "path": "presidio-analyzer/analyzer/predefined_recognizers/crypto_recognizer.py"}]}
1,054
368
gh_patches_debug_25560
rasdani/github-patches
git_diff
avocado-framework__avocado-4381
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Better output message on cloudint when missing pycdlib pycdlib is necessary when running `utils/cloudinit.py` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `avocado/utils/cloudinit.py` Content: ``` 1 # This program is free software; you can redistribute it and/or modify 2 # it under the terms of the GNU General Public License as published by 3 # the Free Software Foundation; either version 2 of the License, or 4 # (at your option) any later version. 5 # 6 # This program is distributed in the hope that it will be useful, 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 9 # 10 # See LICENSE for more details. 11 # 12 # Copyright: Red Hat Inc. 2018 13 # Author: Cleber Rosa <[email protected]> 14 15 """ 16 cloudinit configuration support 17 18 This module can be easily used with :mod:`avocado.utils.vmimage`, 19 to configure operating system images via the cloudinit tooling. 20 21 :see: http://cloudinit.readthedocs.io. 22 """ 23 24 from http.server import BaseHTTPRequestHandler, HTTPServer 25 26 from . import astring, iso9660 27 28 #: The meta-data file template 29 #: 30 #: Positional template variables are: instance-id, hostname 31 METADATA_TEMPLATE = """instance-id: {0} 32 hostname: {1} 33 """ 34 35 #: The header expected to be found at the beginning of the user-data file 36 USERDATA_HEADER = "#cloud-config" 37 38 #: A username configuration as per cloudinit/config/cc_set_passwords.py 39 #: 40 #: Positional template variables : username 41 USERNAME_TEMPLATE = """ 42 ssh_pwauth: True 43 44 system_info: 45 default_user: 46 name: {0} 47 """ 48 49 #: A username configuration as per cloudinit/config/cc_set_passwords.py 50 #: 51 #: Positional template variables are: password 52 PASSWORD_TEMPLATE = """ 53 password: {0} 54 chpasswd: 55 expire: False 56 """ 57 58 #: An authorized key configuration for the default user 59 #: 60 #: Positional template variables are: ssh_authorized_keys 61 AUTHORIZED_KEY_TEMPLATE = """ 62 ssh_authorized_keys: 63 - {0} 64 """ 65 66 #: A phone home configuration that will post just the instance id 67 #: 68 #: Positional template variables are: address, port 69 PHONE_HOME_TEMPLATE = """ 70 phone_home: 71 url: http://{0}:{1}/$INSTANCE_ID/ 72 post: [ instance_id ] 73 """ 74 75 76 def iso(output_path, instance_id, username=None, password=None, 77 phone_home_host=None, phone_home_port=None, authorized_key=None): 78 """ 79 Generates an ISO image with cloudinit configuration 80 81 The content always include the cloudinit metadata, and optionally 82 the userdata content. On the userdata file, it may contain a 83 username/password section (if both parameters are given) and/or a 84 phone home section (if both host and port are given). 85 86 :param output_path: the location of the resulting (to be created) ISO 87 image containing the cloudinit configuration 88 :param instance_id: the ID of the cloud instance, a form of identification 89 for the dynamically created executing instances 90 :param username: the username to be used when logging interactively on the 91 instance 92 :param password: the password to be used along with username when 93 authenticating with the login services on the instance 94 :param phone_home_host: the address of the host the instance 95 should contact once it has finished 96 booting 97 :param phone_home_port: the port acting as an HTTP phone home 98 server that the instance should contact 99 once it has finished booting 100 :param authorized_key: a SSH public key to be added as an authorized key 101 for the default user, similar to "ssh-rsa ..." 102 :type authorized_key: str 103 :raises: RuntimeError if the system can not create ISO images. On such 104 a case, user is expected to install supporting packages, such as 105 pycdlib. 106 """ 107 out = iso9660.iso9660(output_path, ["create", "write"]) 108 if out is None: 109 raise RuntimeError("The system lacks support for creating ISO images") 110 out.create(flags={"interchange_level": 3, "joliet": 3, "vol_ident": 'cidata'}) 111 metadata = METADATA_TEMPLATE.format(instance_id, 112 instance_id).encode(astring.ENCODING) 113 out.write("/meta-data", metadata) 114 userdata = USERDATA_HEADER 115 if username: 116 userdata += USERNAME_TEMPLATE.format(username) 117 if username == "root": 118 userdata += "\ndisable_root: False\n" 119 if password: 120 userdata += PASSWORD_TEMPLATE.format(password) 121 if authorized_key: 122 userdata += AUTHORIZED_KEY_TEMPLATE.format(authorized_key) 123 if phone_home_host and phone_home_port: 124 userdata += PHONE_HOME_TEMPLATE.format(phone_home_host, phone_home_port) 125 out.write("/user-data", userdata.encode(astring.ENCODING)) 126 out.close() 127 128 129 class PhoneHomeServerHandler(BaseHTTPRequestHandler): 130 """Handles HTTP requests to the phone home server.""" 131 132 def do_POST(self): 133 """Handles an HTTP POST request. 134 135 Respond with status 200 if the instance phoned back. 136 """ 137 path = self.path[1:] 138 if path[-1] == '/': 139 path = path[:-1] 140 if path == self.server.instance_id: 141 self.server.instance_phoned_back = True 142 self.send_response(200) 143 144 def log_message(self, format_, *args): # pylint: disable=W0221 145 """Logs an arbitrary message. 146 147 :note: It currently disables any message logging. 148 """ 149 150 151 class PhoneHomeServer(HTTPServer): 152 """Implements the phone home HTTP server. 153 154 Wait the phone home from a given instance. 155 """ 156 157 def __init__(self, address, instance_id): 158 """Initialize the server. 159 160 :param address: a hostname or IP address and port, in the same format 161 given to socket and other servers 162 :type address: tuple 163 :param instance_id: the identification for the instance that should be 164 calling back, and the condition for the wait to end 165 :type instance_id: str 166 """ 167 HTTPServer.__init__(self, address, PhoneHomeServerHandler) 168 self.instance_id = instance_id 169 self.instance_phoned_back = False 170 171 172 def wait_for_phone_home(address, instance_id): 173 """ 174 Sets up a phone home server and waits for the given instance to call 175 176 This is a shorthand for setting up a server that will keep handling 177 requests, until it has heard from the specific instance requested. 178 179 :param address: a hostname or IP address and port, in the same format 180 given to socket and other servers 181 :type address: tuple 182 :param instance_id: the identification for the instance that should be 183 calling back, and the condition for the wait to end 184 :type instance_id: str 185 """ 186 s = PhoneHomeServer(address, instance_id) 187 while not s.instance_phoned_back: 188 s.handle_request() 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/avocado/utils/cloudinit.py b/avocado/utils/cloudinit.py --- a/avocado/utils/cloudinit.py +++ b/avocado/utils/cloudinit.py @@ -18,6 +18,9 @@ This module can be easily used with :mod:`avocado.utils.vmimage`, to configure operating system images via the cloudinit tooling. +Please, keep in mind that if you would like to create/write in ISO images, you +need pycdlib module installed in your environment. + :see: http://cloudinit.readthedocs.io. """ @@ -104,9 +107,12 @@ a case, user is expected to install supporting packages, such as pycdlib. """ + # The only supported method to create/write in an ISO today is via pycdlib out = iso9660.iso9660(output_path, ["create", "write"]) if out is None: - raise RuntimeError("The system lacks support for creating ISO images") + msg = ("The system lacks support for creating ISO images. ", + "Please install pycdlib dependency and run again.") + raise RuntimeError(msg) out.create(flags={"interchange_level": 3, "joliet": 3, "vol_ident": 'cidata'}) metadata = METADATA_TEMPLATE.format(instance_id, instance_id).encode(astring.ENCODING)
{"golden_diff": "diff --git a/avocado/utils/cloudinit.py b/avocado/utils/cloudinit.py\n--- a/avocado/utils/cloudinit.py\n+++ b/avocado/utils/cloudinit.py\n@@ -18,6 +18,9 @@\n This module can be easily used with :mod:`avocado.utils.vmimage`,\n to configure operating system images via the cloudinit tooling.\n \n+Please, keep in mind that if you would like to create/write in ISO images, you\n+need pycdlib module installed in your environment.\n+\n :see: http://cloudinit.readthedocs.io.\n \"\"\"\n \n@@ -104,9 +107,12 @@\n a case, user is expected to install supporting packages, such as\n pycdlib.\n \"\"\"\n+ # The only supported method to create/write in an ISO today is via pycdlib\n out = iso9660.iso9660(output_path, [\"create\", \"write\"])\n if out is None:\n- raise RuntimeError(\"The system lacks support for creating ISO images\")\n+ msg = (\"The system lacks support for creating ISO images. \",\n+ \"Please install pycdlib dependency and run again.\")\n+ raise RuntimeError(msg)\n out.create(flags={\"interchange_level\": 3, \"joliet\": 3, \"vol_ident\": 'cidata'})\n metadata = METADATA_TEMPLATE.format(instance_id,\n instance_id).encode(astring.ENCODING)\n", "issue": "Better output message on cloudint when missing pycdlib\npycdlib is necessary when running `utils/cloudinit.py`\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2018\n# Author: Cleber Rosa <[email protected]>\n\n\"\"\"\ncloudinit configuration support\n\nThis module can be easily used with :mod:`avocado.utils.vmimage`,\nto configure operating system images via the cloudinit tooling.\n\n:see: http://cloudinit.readthedocs.io.\n\"\"\"\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom . import astring, iso9660\n\n#: The meta-data file template\n#:\n#: Positional template variables are: instance-id, hostname\nMETADATA_TEMPLATE = \"\"\"instance-id: {0}\nhostname: {1}\n\"\"\"\n\n#: The header expected to be found at the beginning of the user-data file\nUSERDATA_HEADER = \"#cloud-config\"\n\n#: A username configuration as per cloudinit/config/cc_set_passwords.py\n#:\n#: Positional template variables : username\nUSERNAME_TEMPLATE = \"\"\"\nssh_pwauth: True\n\nsystem_info:\n default_user:\n name: {0}\n\"\"\"\n\n#: A username configuration as per cloudinit/config/cc_set_passwords.py\n#:\n#: Positional template variables are: password\nPASSWORD_TEMPLATE = \"\"\"\npassword: {0}\nchpasswd:\n expire: False\n\"\"\"\n\n#: An authorized key configuration for the default user\n#:\n#: Positional template variables are: ssh_authorized_keys\nAUTHORIZED_KEY_TEMPLATE = \"\"\"\nssh_authorized_keys:\n - {0}\n\"\"\"\n\n#: A phone home configuration that will post just the instance id\n#:\n#: Positional template variables are: address, port\nPHONE_HOME_TEMPLATE = \"\"\"\nphone_home:\n url: http://{0}:{1}/$INSTANCE_ID/\n post: [ instance_id ]\n\"\"\"\n\n\ndef iso(output_path, instance_id, username=None, password=None,\n phone_home_host=None, phone_home_port=None, authorized_key=None):\n \"\"\"\n Generates an ISO image with cloudinit configuration\n\n The content always include the cloudinit metadata, and optionally\n the userdata content. On the userdata file, it may contain a\n username/password section (if both parameters are given) and/or a\n phone home section (if both host and port are given).\n\n :param output_path: the location of the resulting (to be created) ISO\n image containing the cloudinit configuration\n :param instance_id: the ID of the cloud instance, a form of identification\n for the dynamically created executing instances\n :param username: the username to be used when logging interactively on the\n instance\n :param password: the password to be used along with username when\n authenticating with the login services on the instance\n :param phone_home_host: the address of the host the instance\n should contact once it has finished\n booting\n :param phone_home_port: the port acting as an HTTP phone home\n server that the instance should contact\n once it has finished booting\n :param authorized_key: a SSH public key to be added as an authorized key\n for the default user, similar to \"ssh-rsa ...\"\n :type authorized_key: str\n :raises: RuntimeError if the system can not create ISO images. On such\n a case, user is expected to install supporting packages, such as\n pycdlib.\n \"\"\"\n out = iso9660.iso9660(output_path, [\"create\", \"write\"])\n if out is None:\n raise RuntimeError(\"The system lacks support for creating ISO images\")\n out.create(flags={\"interchange_level\": 3, \"joliet\": 3, \"vol_ident\": 'cidata'})\n metadata = METADATA_TEMPLATE.format(instance_id,\n instance_id).encode(astring.ENCODING)\n out.write(\"/meta-data\", metadata)\n userdata = USERDATA_HEADER\n if username:\n userdata += USERNAME_TEMPLATE.format(username)\n if username == \"root\":\n userdata += \"\\ndisable_root: False\\n\"\n if password:\n userdata += PASSWORD_TEMPLATE.format(password)\n if authorized_key:\n userdata += AUTHORIZED_KEY_TEMPLATE.format(authorized_key)\n if phone_home_host and phone_home_port:\n userdata += PHONE_HOME_TEMPLATE.format(phone_home_host, phone_home_port)\n out.write(\"/user-data\", userdata.encode(astring.ENCODING))\n out.close()\n\n\nclass PhoneHomeServerHandler(BaseHTTPRequestHandler):\n \"\"\"Handles HTTP requests to the phone home server.\"\"\"\n\n def do_POST(self):\n \"\"\"Handles an HTTP POST request.\n\n Respond with status 200 if the instance phoned back.\n \"\"\"\n path = self.path[1:]\n if path[-1] == '/':\n path = path[:-1]\n if path == self.server.instance_id:\n self.server.instance_phoned_back = True\n self.send_response(200)\n\n def log_message(self, format_, *args): # pylint: disable=W0221\n \"\"\"Logs an arbitrary message.\n\n :note: It currently disables any message logging.\n \"\"\"\n\n\nclass PhoneHomeServer(HTTPServer):\n \"\"\"Implements the phone home HTTP server.\n\n Wait the phone home from a given instance.\n \"\"\"\n\n def __init__(self, address, instance_id):\n \"\"\"Initialize the server.\n\n :param address: a hostname or IP address and port, in the same format\n given to socket and other servers\n :type address: tuple\n :param instance_id: the identification for the instance that should be\n calling back, and the condition for the wait to end\n :type instance_id: str\n \"\"\"\n HTTPServer.__init__(self, address, PhoneHomeServerHandler)\n self.instance_id = instance_id\n self.instance_phoned_back = False\n\n\ndef wait_for_phone_home(address, instance_id):\n \"\"\"\n Sets up a phone home server and waits for the given instance to call\n\n This is a shorthand for setting up a server that will keep handling\n requests, until it has heard from the specific instance requested.\n\n :param address: a hostname or IP address and port, in the same format\n given to socket and other servers\n :type address: tuple\n :param instance_id: the identification for the instance that should be\n calling back, and the condition for the wait to end\n :type instance_id: str\n \"\"\"\n s = PhoneHomeServer(address, instance_id)\n while not s.instance_phoned_back:\n s.handle_request()\n", "path": "avocado/utils/cloudinit.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2018\n# Author: Cleber Rosa <[email protected]>\n\n\"\"\"\ncloudinit configuration support\n\nThis module can be easily used with :mod:`avocado.utils.vmimage`,\nto configure operating system images via the cloudinit tooling.\n\nPlease, keep in mind that if you would like to create/write in ISO images, you\nneed pycdlib module installed in your environment.\n\n:see: http://cloudinit.readthedocs.io.\n\"\"\"\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom . import astring, iso9660\n\n#: The meta-data file template\n#:\n#: Positional template variables are: instance-id, hostname\nMETADATA_TEMPLATE = \"\"\"instance-id: {0}\nhostname: {1}\n\"\"\"\n\n#: The header expected to be found at the beginning of the user-data file\nUSERDATA_HEADER = \"#cloud-config\"\n\n#: A username configuration as per cloudinit/config/cc_set_passwords.py\n#:\n#: Positional template variables : username\nUSERNAME_TEMPLATE = \"\"\"\nssh_pwauth: True\n\nsystem_info:\n default_user:\n name: {0}\n\"\"\"\n\n#: A username configuration as per cloudinit/config/cc_set_passwords.py\n#:\n#: Positional template variables are: password\nPASSWORD_TEMPLATE = \"\"\"\npassword: {0}\nchpasswd:\n expire: False\n\"\"\"\n\n#: An authorized key configuration for the default user\n#:\n#: Positional template variables are: ssh_authorized_keys\nAUTHORIZED_KEY_TEMPLATE = \"\"\"\nssh_authorized_keys:\n - {0}\n\"\"\"\n\n#: A phone home configuration that will post just the instance id\n#:\n#: Positional template variables are: address, port\nPHONE_HOME_TEMPLATE = \"\"\"\nphone_home:\n url: http://{0}:{1}/$INSTANCE_ID/\n post: [ instance_id ]\n\"\"\"\n\n\ndef iso(output_path, instance_id, username=None, password=None,\n phone_home_host=None, phone_home_port=None, authorized_key=None):\n \"\"\"\n Generates an ISO image with cloudinit configuration\n\n The content always include the cloudinit metadata, and optionally\n the userdata content. On the userdata file, it may contain a\n username/password section (if both parameters are given) and/or a\n phone home section (if both host and port are given).\n\n :param output_path: the location of the resulting (to be created) ISO\n image containing the cloudinit configuration\n :param instance_id: the ID of the cloud instance, a form of identification\n for the dynamically created executing instances\n :param username: the username to be used when logging interactively on the\n instance\n :param password: the password to be used along with username when\n authenticating with the login services on the instance\n :param phone_home_host: the address of the host the instance\n should contact once it has finished\n booting\n :param phone_home_port: the port acting as an HTTP phone home\n server that the instance should contact\n once it has finished booting\n :param authorized_key: a SSH public key to be added as an authorized key\n for the default user, similar to \"ssh-rsa ...\"\n :type authorized_key: str\n :raises: RuntimeError if the system can not create ISO images. On such\n a case, user is expected to install supporting packages, such as\n pycdlib.\n \"\"\"\n # The only supported method to create/write in an ISO today is via pycdlib\n out = iso9660.iso9660(output_path, [\"create\", \"write\"])\n if out is None:\n msg = (\"The system lacks support for creating ISO images. \",\n \"Please install pycdlib dependency and run again.\")\n raise RuntimeError(msg)\n out.create(flags={\"interchange_level\": 3, \"joliet\": 3, \"vol_ident\": 'cidata'})\n metadata = METADATA_TEMPLATE.format(instance_id,\n instance_id).encode(astring.ENCODING)\n out.write(\"/meta-data\", metadata)\n userdata = USERDATA_HEADER\n if username:\n userdata += USERNAME_TEMPLATE.format(username)\n if username == \"root\":\n userdata += \"\\ndisable_root: False\\n\"\n if password:\n userdata += PASSWORD_TEMPLATE.format(password)\n if authorized_key:\n userdata += AUTHORIZED_KEY_TEMPLATE.format(authorized_key)\n if phone_home_host and phone_home_port:\n userdata += PHONE_HOME_TEMPLATE.format(phone_home_host, phone_home_port)\n out.write(\"/user-data\", userdata.encode(astring.ENCODING))\n out.close()\n\n\nclass PhoneHomeServerHandler(BaseHTTPRequestHandler):\n \"\"\"Handles HTTP requests to the phone home server.\"\"\"\n\n def do_POST(self):\n \"\"\"Handles an HTTP POST request.\n\n Respond with status 200 if the instance phoned back.\n \"\"\"\n path = self.path[1:]\n if path[-1] == '/':\n path = path[:-1]\n if path == self.server.instance_id:\n self.server.instance_phoned_back = True\n self.send_response(200)\n\n def log_message(self, format_, *args): # pylint: disable=W0221\n \"\"\"Logs an arbitrary message.\n\n :note: It currently disables any message logging.\n \"\"\"\n\n\nclass PhoneHomeServer(HTTPServer):\n \"\"\"Implements the phone home HTTP server.\n\n Wait the phone home from a given instance.\n \"\"\"\n\n def __init__(self, address, instance_id):\n \"\"\"Initialize the server.\n\n :param address: a hostname or IP address and port, in the same format\n given to socket and other servers\n :type address: tuple\n :param instance_id: the identification for the instance that should be\n calling back, and the condition for the wait to end\n :type instance_id: str\n \"\"\"\n HTTPServer.__init__(self, address, PhoneHomeServerHandler)\n self.instance_id = instance_id\n self.instance_phoned_back = False\n\n\ndef wait_for_phone_home(address, instance_id):\n \"\"\"\n Sets up a phone home server and waits for the given instance to call\n\n This is a shorthand for setting up a server that will keep handling\n requests, until it has heard from the specific instance requested.\n\n :param address: a hostname or IP address and port, in the same format\n given to socket and other servers\n :type address: tuple\n :param instance_id: the identification for the instance that should be\n calling back, and the condition for the wait to end\n :type instance_id: str\n \"\"\"\n s = PhoneHomeServer(address, instance_id)\n while not s.instance_phoned_back:\n s.handle_request()\n", "path": "avocado/utils/cloudinit.py"}]}
2,220
314
gh_patches_debug_6327
rasdani/github-patches
git_diff
pretix__pretix-1525
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- External authentication without second login backend enabled not possible When using multiple authentication backends, `nav-pills` are shown shown to select the authentication method. These buttons will replace the `backend` parameter for form based authentication methods. For `authentication_url`-based backends clicking the button will redirect to an external webpage. We exclusively use an `authentication_url`-based backend, which results in the following login screen: ![image](https://user-images.githubusercontent.com/904824/70807661-495c5100-1dbe-11ea-87b8-609770c85497.png) Since the `nav-pills` are not rendered, we are unable to redirect to the login page. My suggestion would be to: 1. Hide the `Log In` button when no backends are used which are submitted using form fields. 2. Always show the `nav-pills` when the only available backend uses `authentication_url` based authentication. External authentication without second login backend enabled not possible When using multiple authentication backends, `nav-pills` are shown shown to select the authentication method. These buttons will replace the `backend` parameter for form based authentication methods. For `authentication_url`-based backends clicking the button will redirect to an external webpage. We exclusively use an `authentication_url`-based backend, which results in the following login screen: ![image](https://user-images.githubusercontent.com/904824/70807661-495c5100-1dbe-11ea-87b8-609770c85497.png) Since the `nav-pills` are not rendered, we are unable to redirect to the login page. My suggestion would be to: 1. Hide the `Log In` button when no backends are used which are submitted using form fields. 2. Always show the `nav-pills` when the only available backend uses `authentication_url` based authentication. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pretix/base/forms/auth.py` Content: ``` 1 from django import forms 2 from django.conf import settings 3 from django.contrib.auth.password_validation import ( 4 password_validators_help_texts, validate_password, 5 ) 6 from django.utils.translation import ugettext_lazy as _ 7 8 from pretix.base.models import User 9 10 11 class LoginForm(forms.Form): 12 """ 13 Base class for authenticating users. Extend this to get a form that accepts 14 username/password logins. 15 """ 16 keep_logged_in = forms.BooleanField(label=_("Keep me logged in"), required=False) 17 18 error_messages = { 19 'invalid_login': _("This combination of credentials is not known to our system."), 20 'inactive': _("This account is inactive.") 21 } 22 23 def __init__(self, backend, request=None, *args, **kwargs): 24 """ 25 The 'request' parameter is set for custom auth use by subclasses. 26 The form data comes in via the standard 'data' kwarg. 27 """ 28 self.request = request 29 self.user_cache = None 30 self.backend = backend 31 super().__init__(*args, **kwargs) 32 for k, f in backend.login_form_fields.items(): 33 self.fields[k] = f 34 35 if not settings.PRETIX_LONG_SESSIONS: 36 del self.fields['keep_logged_in'] 37 else: 38 self.fields.move_to_end('keep_logged_in') 39 40 def clean(self): 41 if all(k in self.cleaned_data for k, f in self.fields.items() if f.required): 42 self.user_cache = self.backend.form_authenticate(self.request, self.cleaned_data) 43 if self.user_cache is None: 44 raise forms.ValidationError( 45 self.error_messages['invalid_login'], 46 code='invalid_login' 47 ) 48 else: 49 self.confirm_login_allowed(self.user_cache) 50 51 return self.cleaned_data 52 53 def confirm_login_allowed(self, user: User): 54 """ 55 Controls whether the given User may log in. This is a policy setting, 56 independent of end-user authentication. This default behavior is to 57 allow login by active users, and reject login by inactive users. 58 59 If the given user cannot log in, this method should raise a 60 ``forms.ValidationError``. 61 62 If the given user may log in, this method should return None. 63 """ 64 if not user.is_active: 65 raise forms.ValidationError( 66 self.error_messages['inactive'], 67 code='inactive', 68 ) 69 70 def get_user(self): 71 return self.user_cache 72 73 74 class RegistrationForm(forms.Form): 75 error_messages = { 76 'duplicate_email': _("You already registered with that email address, please use the login form."), 77 'pw_mismatch': _("Please enter the same password twice"), 78 } 79 email = forms.EmailField( 80 label=_('Email address'), 81 required=True 82 ) 83 password = forms.CharField( 84 label=_('Password'), 85 widget=forms.PasswordInput(attrs={ 86 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7 87 }), 88 required=True 89 ) 90 password_repeat = forms.CharField( 91 label=_('Repeat password'), 92 widget=forms.PasswordInput(attrs={ 93 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7 94 }), 95 required=True 96 ) 97 keep_logged_in = forms.BooleanField(label=_("Keep me logged in"), required=False) 98 99 def __init__(self, *args, **kwargs): 100 super().__init__(*args, **kwargs) 101 if not settings.PRETIX_LONG_SESSIONS: 102 del self.fields['keep_logged_in'] 103 104 def clean(self): 105 password1 = self.cleaned_data.get('password', '') 106 password2 = self.cleaned_data.get('password_repeat') 107 108 if password1 and password1 != password2: 109 raise forms.ValidationError({ 110 'password_repeat': self.error_messages['pw_mismatch'], 111 }, code='pw_mismatch') 112 return self.cleaned_data 113 114 def clean_password(self): 115 password1 = self.cleaned_data.get('password', '') 116 user = User(email=self.cleaned_data.get('email')) 117 if validate_password(password1, user=user) is not None: 118 raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid') 119 return password1 120 121 def clean_email(self): 122 email = self.cleaned_data['email'] 123 if User.objects.filter(email__iexact=email).exists(): 124 raise forms.ValidationError( 125 self.error_messages['duplicate_email'], 126 code='duplicate_email' 127 ) 128 return email 129 130 131 class PasswordRecoverForm(forms.Form): 132 error_messages = { 133 'pw_mismatch': _("Please enter the same password twice"), 134 } 135 password = forms.CharField( 136 label=_('Password'), 137 widget=forms.PasswordInput, 138 required=True 139 ) 140 password_repeat = forms.CharField( 141 label=_('Repeat password'), 142 widget=forms.PasswordInput 143 ) 144 145 def __init__(self, user_id=None, *args, **kwargs): 146 self.user_id = user_id 147 super().__init__(*args, **kwargs) 148 149 def clean(self): 150 password1 = self.cleaned_data.get('password', '') 151 password2 = self.cleaned_data.get('password_repeat') 152 153 if password1 and password1 != password2: 154 raise forms.ValidationError({ 155 'password_repeat': self.error_messages['pw_mismatch'], 156 }, code='pw_mismatch') 157 158 return self.cleaned_data 159 160 def clean_password(self): 161 password1 = self.cleaned_data.get('password', '') 162 try: 163 user = User.objects.get(id=self.user_id) 164 except User.DoesNotExist: 165 user = None 166 if validate_password(password1, user=user) is not None: 167 raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid') 168 return password1 169 170 171 class PasswordForgotForm(forms.Form): 172 email = forms.EmailField( 173 label=_('E-mail'), 174 ) 175 176 def __init__(self, *args, **kwargs): 177 if 'event' in kwargs: 178 # Backwards compatibility 179 del kwargs['event'] 180 super().__init__(*args, **kwargs) 181 182 def clean_email(self): 183 return self.cleaned_data['email'] 184 185 186 class ReauthForm(forms.Form): 187 error_messages = { 188 'invalid_login': _("This combination of credentials is not known to our system."), 189 'inactive': _("This account is inactive.") 190 } 191 192 def __init__(self, backend, user, request=None, *args, **kwargs): 193 """ 194 The 'request' parameter is set for custom auth use by subclasses. 195 The form data comes in via the standard 'data' kwarg. 196 """ 197 self.request = request 198 self.user = user 199 self.backend = backend 200 super().__init__(*args, **kwargs) 201 for k, f in backend.login_form_fields.items(): 202 self.fields[k] = f 203 if 'email' in self.fields: 204 self.fields['email'].disabled = True 205 206 def clean(self): 207 self.cleaned_data['email'] = self.user.email 208 user_cache = self.backend.form_authenticate(self.request, self.cleaned_data) 209 if user_cache != self.user: 210 raise forms.ValidationError( 211 self.error_messages['invalid_login'], 212 code='invalid_login' 213 ) 214 else: 215 self.confirm_login_allowed(user_cache) 216 217 return self.cleaned_data 218 219 def confirm_login_allowed(self, user: User): 220 if not user.is_active: 221 raise forms.ValidationError( 222 self.error_messages['inactive'], 223 code='inactive', 224 ) 225 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pretix/base/forms/auth.py b/src/pretix/base/forms/auth.py --- a/src/pretix/base/forms/auth.py +++ b/src/pretix/base/forms/auth.py @@ -32,7 +32,8 @@ for k, f in backend.login_form_fields.items(): self.fields[k] = f - if not settings.PRETIX_LONG_SESSIONS: + # Authentication backends which use urls cannot have long sessions. + if not settings.PRETIX_LONG_SESSIONS or backend.url: del self.fields['keep_logged_in'] else: self.fields.move_to_end('keep_logged_in')
{"golden_diff": "diff --git a/src/pretix/base/forms/auth.py b/src/pretix/base/forms/auth.py\n--- a/src/pretix/base/forms/auth.py\n+++ b/src/pretix/base/forms/auth.py\n@@ -32,7 +32,8 @@\n for k, f in backend.login_form_fields.items():\n self.fields[k] = f\n \n- if not settings.PRETIX_LONG_SESSIONS:\n+ # Authentication backends which use urls cannot have long sessions.\n+ if not settings.PRETIX_LONG_SESSIONS or backend.url:\n del self.fields['keep_logged_in']\n else:\n self.fields.move_to_end('keep_logged_in')\n", "issue": "External authentication without second login backend enabled not possible\nWhen using multiple authentication backends, `nav-pills` are shown shown to select the authentication method. These buttons will replace the `backend` parameter for form based authentication methods. For `authentication_url`-based backends clicking the button will redirect to an external webpage.\r\n\r\nWe exclusively use an `authentication_url`-based backend, which results in the following login screen:\r\n\r\n![image](https://user-images.githubusercontent.com/904824/70807661-495c5100-1dbe-11ea-87b8-609770c85497.png)\r\n\r\nSince the `nav-pills` are not rendered, we are unable to redirect to the login page.\r\n\r\nMy suggestion would be to:\r\n1. Hide the `Log In` button when no backends are used which are submitted using form fields.\r\n2. Always show the `nav-pills` when the only available backend uses `authentication_url` based authentication.\r\n\nExternal authentication without second login backend enabled not possible\nWhen using multiple authentication backends, `nav-pills` are shown shown to select the authentication method. These buttons will replace the `backend` parameter for form based authentication methods. For `authentication_url`-based backends clicking the button will redirect to an external webpage.\r\n\r\nWe exclusively use an `authentication_url`-based backend, which results in the following login screen:\r\n\r\n![image](https://user-images.githubusercontent.com/904824/70807661-495c5100-1dbe-11ea-87b8-609770c85497.png)\r\n\r\nSince the `nav-pills` are not rendered, we are unable to redirect to the login page.\r\n\r\nMy suggestion would be to:\r\n1. Hide the `Log In` button when no backends are used which are submitted using form fields.\r\n2. Always show the `nav-pills` when the only available backend uses `authentication_url` based authentication.\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.password_validation import (\n password_validators_help_texts, validate_password,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.models import User\n\n\nclass LoginForm(forms.Form):\n \"\"\"\n Base class for authenticating users. Extend this to get a form that accepts\n username/password logins.\n \"\"\"\n keep_logged_in = forms.BooleanField(label=_(\"Keep me logged in\"), required=False)\n\n error_messages = {\n 'invalid_login': _(\"This combination of credentials is not known to our system.\"),\n 'inactive': _(\"This account is inactive.\")\n }\n\n def __init__(self, backend, request=None, *args, **kwargs):\n \"\"\"\n The 'request' parameter is set for custom auth use by subclasses.\n The form data comes in via the standard 'data' kwarg.\n \"\"\"\n self.request = request\n self.user_cache = None\n self.backend = backend\n super().__init__(*args, **kwargs)\n for k, f in backend.login_form_fields.items():\n self.fields[k] = f\n\n if not settings.PRETIX_LONG_SESSIONS:\n del self.fields['keep_logged_in']\n else:\n self.fields.move_to_end('keep_logged_in')\n\n def clean(self):\n if all(k in self.cleaned_data for k, f in self.fields.items() if f.required):\n self.user_cache = self.backend.form_authenticate(self.request, self.cleaned_data)\n if self.user_cache is None:\n raise forms.ValidationError(\n self.error_messages['invalid_login'],\n code='invalid_login'\n )\n else:\n self.confirm_login_allowed(self.user_cache)\n\n return self.cleaned_data\n\n def confirm_login_allowed(self, user: User):\n \"\"\"\n Controls whether the given User may log in. This is a policy setting,\n independent of end-user authentication. This default behavior is to\n allow login by active users, and reject login by inactive users.\n\n If the given user cannot log in, this method should raise a\n ``forms.ValidationError``.\n\n If the given user may log in, this method should return None.\n \"\"\"\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )\n\n def get_user(self):\n return self.user_cache\n\n\nclass RegistrationForm(forms.Form):\n error_messages = {\n 'duplicate_email': _(\"You already registered with that email address, please use the login form.\"),\n 'pw_mismatch': _(\"Please enter the same password twice\"),\n }\n email = forms.EmailField(\n label=_('Email address'),\n required=True\n )\n password = forms.CharField(\n label=_('Password'),\n widget=forms.PasswordInput(attrs={\n 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7\n }),\n required=True\n )\n password_repeat = forms.CharField(\n label=_('Repeat password'),\n widget=forms.PasswordInput(attrs={\n 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7\n }),\n required=True\n )\n keep_logged_in = forms.BooleanField(label=_(\"Keep me logged in\"), required=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not settings.PRETIX_LONG_SESSIONS:\n del self.fields['keep_logged_in']\n\n def clean(self):\n password1 = self.cleaned_data.get('password', '')\n password2 = self.cleaned_data.get('password_repeat')\n\n if password1 and password1 != password2:\n raise forms.ValidationError({\n 'password_repeat': self.error_messages['pw_mismatch'],\n }, code='pw_mismatch')\n return self.cleaned_data\n\n def clean_password(self):\n password1 = self.cleaned_data.get('password', '')\n user = User(email=self.cleaned_data.get('email'))\n if validate_password(password1, user=user) is not None:\n raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid')\n return password1\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email__iexact=email).exists():\n raise forms.ValidationError(\n self.error_messages['duplicate_email'],\n code='duplicate_email'\n )\n return email\n\n\nclass PasswordRecoverForm(forms.Form):\n error_messages = {\n 'pw_mismatch': _(\"Please enter the same password twice\"),\n }\n password = forms.CharField(\n label=_('Password'),\n widget=forms.PasswordInput,\n required=True\n )\n password_repeat = forms.CharField(\n label=_('Repeat password'),\n widget=forms.PasswordInput\n )\n\n def __init__(self, user_id=None, *args, **kwargs):\n self.user_id = user_id\n super().__init__(*args, **kwargs)\n\n def clean(self):\n password1 = self.cleaned_data.get('password', '')\n password2 = self.cleaned_data.get('password_repeat')\n\n if password1 and password1 != password2:\n raise forms.ValidationError({\n 'password_repeat': self.error_messages['pw_mismatch'],\n }, code='pw_mismatch')\n\n return self.cleaned_data\n\n def clean_password(self):\n password1 = self.cleaned_data.get('password', '')\n try:\n user = User.objects.get(id=self.user_id)\n except User.DoesNotExist:\n user = None\n if validate_password(password1, user=user) is not None:\n raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid')\n return password1\n\n\nclass PasswordForgotForm(forms.Form):\n email = forms.EmailField(\n label=_('E-mail'),\n )\n\n def __init__(self, *args, **kwargs):\n if 'event' in kwargs:\n # Backwards compatibility\n del kwargs['event']\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n return self.cleaned_data['email']\n\n\nclass ReauthForm(forms.Form):\n error_messages = {\n 'invalid_login': _(\"This combination of credentials is not known to our system.\"),\n 'inactive': _(\"This account is inactive.\")\n }\n\n def __init__(self, backend, user, request=None, *args, **kwargs):\n \"\"\"\n The 'request' parameter is set for custom auth use by subclasses.\n The form data comes in via the standard 'data' kwarg.\n \"\"\"\n self.request = request\n self.user = user\n self.backend = backend\n super().__init__(*args, **kwargs)\n for k, f in backend.login_form_fields.items():\n self.fields[k] = f\n if 'email' in self.fields:\n self.fields['email'].disabled = True\n\n def clean(self):\n self.cleaned_data['email'] = self.user.email\n user_cache = self.backend.form_authenticate(self.request, self.cleaned_data)\n if user_cache != self.user:\n raise forms.ValidationError(\n self.error_messages['invalid_login'],\n code='invalid_login'\n )\n else:\n self.confirm_login_allowed(user_cache)\n\n return self.cleaned_data\n\n def confirm_login_allowed(self, user: User):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )\n", "path": "src/pretix/base/forms/auth.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.password_validation import (\n password_validators_help_texts, validate_password,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.models import User\n\n\nclass LoginForm(forms.Form):\n \"\"\"\n Base class for authenticating users. Extend this to get a form that accepts\n username/password logins.\n \"\"\"\n keep_logged_in = forms.BooleanField(label=_(\"Keep me logged in\"), required=False)\n\n error_messages = {\n 'invalid_login': _(\"This combination of credentials is not known to our system.\"),\n 'inactive': _(\"This account is inactive.\")\n }\n\n def __init__(self, backend, request=None, *args, **kwargs):\n \"\"\"\n The 'request' parameter is set for custom auth use by subclasses.\n The form data comes in via the standard 'data' kwarg.\n \"\"\"\n self.request = request\n self.user_cache = None\n self.backend = backend\n super().__init__(*args, **kwargs)\n for k, f in backend.login_form_fields.items():\n self.fields[k] = f\n\n # Authentication backends which use urls cannot have long sessions.\n if not settings.PRETIX_LONG_SESSIONS or backend.url:\n del self.fields['keep_logged_in']\n else:\n self.fields.move_to_end('keep_logged_in')\n\n def clean(self):\n if all(k in self.cleaned_data for k, f in self.fields.items() if f.required):\n self.user_cache = self.backend.form_authenticate(self.request, self.cleaned_data)\n if self.user_cache is None:\n raise forms.ValidationError(\n self.error_messages['invalid_login'],\n code='invalid_login'\n )\n else:\n self.confirm_login_allowed(self.user_cache)\n\n return self.cleaned_data\n\n def confirm_login_allowed(self, user: User):\n \"\"\"\n Controls whether the given User may log in. This is a policy setting,\n independent of end-user authentication. This default behavior is to\n allow login by active users, and reject login by inactive users.\n\n If the given user cannot log in, this method should raise a\n ``forms.ValidationError``.\n\n If the given user may log in, this method should return None.\n \"\"\"\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )\n\n def get_user(self):\n return self.user_cache\n\n\nclass RegistrationForm(forms.Form):\n error_messages = {\n 'duplicate_email': _(\"You already registered with that email address, please use the login form.\"),\n 'pw_mismatch': _(\"Please enter the same password twice\"),\n }\n email = forms.EmailField(\n label=_('Email address'),\n required=True\n )\n password = forms.CharField(\n label=_('Password'),\n widget=forms.PasswordInput(attrs={\n 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7\n }),\n required=True\n )\n password_repeat = forms.CharField(\n label=_('Repeat password'),\n widget=forms.PasswordInput(attrs={\n 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7\n }),\n required=True\n )\n keep_logged_in = forms.BooleanField(label=_(\"Keep me logged in\"), required=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not settings.PRETIX_LONG_SESSIONS:\n del self.fields['keep_logged_in']\n\n def clean(self):\n password1 = self.cleaned_data.get('password', '')\n password2 = self.cleaned_data.get('password_repeat')\n\n if password1 and password1 != password2:\n raise forms.ValidationError({\n 'password_repeat': self.error_messages['pw_mismatch'],\n }, code='pw_mismatch')\n return self.cleaned_data\n\n def clean_password(self):\n password1 = self.cleaned_data.get('password', '')\n user = User(email=self.cleaned_data.get('email'))\n if validate_password(password1, user=user) is not None:\n raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid')\n return password1\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email__iexact=email).exists():\n raise forms.ValidationError(\n self.error_messages['duplicate_email'],\n code='duplicate_email'\n )\n return email\n\n\nclass PasswordRecoverForm(forms.Form):\n error_messages = {\n 'pw_mismatch': _(\"Please enter the same password twice\"),\n }\n password = forms.CharField(\n label=_('Password'),\n widget=forms.PasswordInput,\n required=True\n )\n password_repeat = forms.CharField(\n label=_('Repeat password'),\n widget=forms.PasswordInput\n )\n\n def __init__(self, user_id=None, *args, **kwargs):\n self.user_id = user_id\n super().__init__(*args, **kwargs)\n\n def clean(self):\n password1 = self.cleaned_data.get('password', '')\n password2 = self.cleaned_data.get('password_repeat')\n\n if password1 and password1 != password2:\n raise forms.ValidationError({\n 'password_repeat': self.error_messages['pw_mismatch'],\n }, code='pw_mismatch')\n\n return self.cleaned_data\n\n def clean_password(self):\n password1 = self.cleaned_data.get('password', '')\n try:\n user = User.objects.get(id=self.user_id)\n except User.DoesNotExist:\n user = None\n if validate_password(password1, user=user) is not None:\n raise forms.ValidationError(_(password_validators_help_texts()), code='pw_invalid')\n return password1\n\n\nclass PasswordForgotForm(forms.Form):\n email = forms.EmailField(\n label=_('E-mail'),\n )\n\n def __init__(self, *args, **kwargs):\n if 'event' in kwargs:\n # Backwards compatibility\n del kwargs['event']\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n return self.cleaned_data['email']\n\n\nclass ReauthForm(forms.Form):\n error_messages = {\n 'invalid_login': _(\"This combination of credentials is not known to our system.\"),\n 'inactive': _(\"This account is inactive.\")\n }\n\n def __init__(self, backend, user, request=None, *args, **kwargs):\n \"\"\"\n The 'request' parameter is set for custom auth use by subclasses.\n The form data comes in via the standard 'data' kwarg.\n \"\"\"\n self.request = request\n self.user = user\n self.backend = backend\n super().__init__(*args, **kwargs)\n for k, f in backend.login_form_fields.items():\n self.fields[k] = f\n if 'email' in self.fields:\n self.fields['email'].disabled = True\n\n def clean(self):\n self.cleaned_data['email'] = self.user.email\n user_cache = self.backend.form_authenticate(self.request, self.cleaned_data)\n if user_cache != self.user:\n raise forms.ValidationError(\n self.error_messages['invalid_login'],\n code='invalid_login'\n )\n else:\n self.confirm_login_allowed(user_cache)\n\n return self.cleaned_data\n\n def confirm_login_allowed(self, user: User):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )\n", "path": "src/pretix/base/forms/auth.py"}]}
2,838
140
gh_patches_debug_10157
rasdani/github-patches
git_diff
huggingface__transformers-193
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py error attributeError: 'BertForPreTraining' object has no attribute 'global_step' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2018 The HugginFace Inc. team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Convert BERT checkpoint.""" 16 17 from __future__ import absolute_import 18 from __future__ import division 19 from __future__ import print_function 20 21 import os 22 import re 23 import argparse 24 import tensorflow as tf 25 import torch 26 import numpy as np 27 28 from .modeling import BertConfig, BertForPreTraining 29 30 def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): 31 config_path = os.path.abspath(bert_config_file) 32 tf_path = os.path.abspath(tf_checkpoint_path) 33 print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path)) 34 # Load weights from TF model 35 init_vars = tf.train.list_variables(tf_path) 36 names = [] 37 arrays = [] 38 for name, shape in init_vars: 39 print("Loading TF weight {} with shape {}".format(name, shape)) 40 array = tf.train.load_variable(tf_path, name) 41 names.append(name) 42 arrays.append(array) 43 44 # Initialise PyTorch model 45 config = BertConfig.from_json_file(bert_config_file) 46 print("Building PyTorch model from configuration: {}".format(str(config))) 47 model = BertForPreTraining(config) 48 49 for name, array in zip(names, arrays): 50 name = name.split('/') 51 # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v 52 # which are not required for using pretrained model 53 if any(n in ["adam_v", "adam_m"] for n in name): 54 print("Skipping {}".format("/".join(name))) 55 continue 56 pointer = model 57 for m_name in name: 58 if re.fullmatch(r'[A-Za-z]+_\d+', m_name): 59 l = re.split(r'_(\d+)', m_name) 60 else: 61 l = [m_name] 62 if l[0] == 'kernel' or l[0] == 'gamma': 63 pointer = getattr(pointer, 'weight') 64 elif l[0] == 'output_bias' or l[0] == 'beta': 65 pointer = getattr(pointer, 'bias') 66 elif l[0] == 'output_weights': 67 pointer = getattr(pointer, 'weight') 68 else: 69 pointer = getattr(pointer, l[0]) 70 if len(l) >= 2: 71 num = int(l[1]) 72 pointer = pointer[num] 73 if m_name[-11:] == '_embeddings': 74 pointer = getattr(pointer, 'weight') 75 elif m_name == 'kernel': 76 array = np.transpose(array) 77 try: 78 assert pointer.shape == array.shape 79 except AssertionError as e: 80 e.args += (pointer.shape, array.shape) 81 raise 82 print("Initialize PyTorch weight {}".format(name)) 83 pointer.data = torch.from_numpy(array) 84 85 # Save pytorch-model 86 print("Save PyTorch model to {}".format(pytorch_dump_path)) 87 torch.save(model.state_dict(), pytorch_dump_path) 88 89 90 if __name__ == "__main__": 91 parser = argparse.ArgumentParser() 92 ## Required parameters 93 parser.add_argument("--tf_checkpoint_path", 94 default = None, 95 type = str, 96 required = True, 97 help = "Path the TensorFlow checkpoint path.") 98 parser.add_argument("--bert_config_file", 99 default = None, 100 type = str, 101 required = True, 102 help = "The config json file corresponding to the pre-trained BERT model. \n" 103 "This specifies the model architecture.") 104 parser.add_argument("--pytorch_dump_path", 105 default = None, 106 type = str, 107 required = True, 108 help = "Path to the output PyTorch model.") 109 args = parser.parse_args() 110 convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, 111 args.bert_config_file, 112 args.pytorch_dump_path) 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py --- a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py +++ b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py @@ -50,7 +50,7 @@ name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model - if any(n in ["adam_v", "adam_m"] for n in name): + if any(n in ["adam_v", "adam_m", "global_step"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model
{"golden_diff": "diff --git a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n--- a/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n+++ b/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py\n@@ -50,7 +50,7 @@\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n- if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n+ if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n", "issue": "pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py error\nattributeError: 'BertForPreTraining' object has no attribute 'global_step'\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert BERT checkpoint.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport argparse\nimport tensorflow as tf\nimport torch\nimport numpy as np\n\nfrom .modeling import BertConfig, BertForPreTraining\n\ndef convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):\n config_path = os.path.abspath(bert_config_file)\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {} with config at {}\".format(tf_path, config_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n # Initialise PyTorch model\n config = BertConfig.from_json_file(bert_config_file)\n print(\"Building PyTorch model from configuration: {}\".format(str(config)))\n model = BertForPreTraining(config)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n\n # Save pytorch-model\n print(\"Save PyTorch model to {}\".format(pytorch_dump_path))\n torch.save(model.state_dict(), pytorch_dump_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n ## Required parameters\n parser.add_argument(\"--tf_checkpoint_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path the TensorFlow checkpoint path.\")\n parser.add_argument(\"--bert_config_file\",\n default = None,\n type = str,\n required = True,\n help = \"The config json file corresponding to the pre-trained BERT model. \\n\"\n \"This specifies the model architecture.\")\n parser.add_argument(\"--pytorch_dump_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path to the output PyTorch model.\")\n args = parser.parse_args()\n convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,\n args.bert_config_file,\n args.pytorch_dump_path)\n", "path": "pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert BERT checkpoint.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport argparse\nimport tensorflow as tf\nimport torch\nimport numpy as np\n\nfrom .modeling import BertConfig, BertForPreTraining\n\ndef convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):\n config_path = os.path.abspath(bert_config_file)\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {} with config at {}\".format(tf_path, config_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n # Initialise PyTorch model\n config = BertConfig.from_json_file(bert_config_file)\n print(\"Building PyTorch model from configuration: {}\".format(str(config)))\n model = BertForPreTraining(config)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n\n # Save pytorch-model\n print(\"Save PyTorch model to {}\".format(pytorch_dump_path))\n torch.save(model.state_dict(), pytorch_dump_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n ## Required parameters\n parser.add_argument(\"--tf_checkpoint_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path the TensorFlow checkpoint path.\")\n parser.add_argument(\"--bert_config_file\",\n default = None,\n type = str,\n required = True,\n help = \"The config json file corresponding to the pre-trained BERT model. \\n\"\n \"This specifies the model architecture.\")\n parser.add_argument(\"--pytorch_dump_path\",\n default = None,\n type = str,\n required = True,\n help = \"Path to the output PyTorch model.\")\n args = parser.parse_args()\n convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,\n args.bert_config_file,\n args.pytorch_dump_path)\n", "path": "pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py"}]}
1,480
180
gh_patches_debug_8111
rasdani/github-patches
git_diff
scrapy__scrapy-5253
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ints in Cookies are not getting parsed correctly <!-- Thanks for taking an interest in Scrapy! If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/. The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself. Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs --> ### Description Using an int in a cookie causes the spider to not work. Replacing the int with a str fixes the problem. ### Steps to Reproduce Failing Code Example: ``` request = scrapy.Request( url = url, callback = callback ) request.cookies['some_cookie'] = 1 ``` Successful Code Example: ``` request = scrapy.Request( url = url, callback = callback ) request.cookies['some_cookie'] = '1' ``` **Expected behavior:** I expect that an int should be parsed to a str without me needing to do it myself. **Actual behavior:** Get following error: ``` Traceback (most recent call last): File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\twisted\internet\defer.py", line 1661, in _inlineCallbacks result = current_context.run(gen.send, result) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\core\downloader\middleware.py", line 36, in process_request response = yield deferred_from_coro(method(request=request, spider=spider)) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\downloadermiddlewares\cookies.py", line 32, in process_request for cookie in self._get_request_cookies(jar, request): File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\downloadermiddlewares\cookies.py", line 109, in _get_request_cookies response = Response(request.url, headers={"Set-Cookie": formatted}) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\http\response\__init__.py", line 32, in __init__ self.headers = Headers(headers or {}) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\http\headers.py", line 11, in __init__ super().__init__(seq) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\utils\datatypes.py", line 20, in __init__ self.update(seq) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\utils\datatypes.py", line 56, in update super().update(iseq) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\utils\datatypes.py", line 55, in <genexpr> iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\http\headers.py", line 26, in normvalue return [self._tobytes(x) for x in value] File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\http\headers.py", line 26, in <listcomp> File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\downloadermiddlewares\cookies.py", line 108, in <genexpr> formatted = filter(None, (self._format_cookie(c, request) for c in cookies)) File "C:\Users\Michael\AppData\Local\Programs\Python\Python39\lib\site-packages\scrapy\downloadermiddlewares\cookies.py", line 87, in _format_cookie decoded[key] = cookie[key].decode("utf8") AttributeError: 'int' object has no attribute 'decode' ``` **Reproduces how often:** [What percentage of the time does it reproduce?] ### Versions Scrapy : 2.5.0 lxml : 4.6.3.0 libxml2 : 2.9.5 cssselect : 1.1.0 parsel : 1.6.0 w3lib : 1.22.0 Twisted : 21.7.0 Python : 3.9.7 (tags/v3.9.7:1016ef3, Aug 30 2021, 20:19:38) [MSC v.1929 64 bit (AMD64)] pyOpenSSL : 21.0.0 (OpenSSL 1.1.1l 24 Aug 2021) cryptography : 35.0.0 Platform : Windows-10-10.0.19043-SP0 ### Additional context from scrapy.http import Request Failing Code Example: ``` request = scrapy.Request( url = url, callback = callback ) request.cookies['some_cookie'] = 1 ``` Successful Code Example: ``` request = scrapy.Request( url = url, callback = callback ) request.cookies['some_cookie'] = '1' ``` (edited to add code formatting to the traceback) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/downloadermiddlewares/cookies.py` Content: ``` 1 import logging 2 from collections import defaultdict 3 4 from scrapy.exceptions import NotConfigured 5 from scrapy.http import Response 6 from scrapy.http.cookies import CookieJar 7 from scrapy.utils.python import to_unicode 8 9 10 logger = logging.getLogger(__name__) 11 12 13 class CookiesMiddleware: 14 """This middleware enables working with sites that need cookies""" 15 16 def __init__(self, debug=False): 17 self.jars = defaultdict(CookieJar) 18 self.debug = debug 19 20 @classmethod 21 def from_crawler(cls, crawler): 22 if not crawler.settings.getbool('COOKIES_ENABLED'): 23 raise NotConfigured 24 return cls(crawler.settings.getbool('COOKIES_DEBUG')) 25 26 def process_request(self, request, spider): 27 if request.meta.get('dont_merge_cookies', False): 28 return 29 30 cookiejarkey = request.meta.get("cookiejar") 31 jar = self.jars[cookiejarkey] 32 for cookie in self._get_request_cookies(jar, request): 33 jar.set_cookie_if_ok(cookie, request) 34 35 # set Cookie header 36 request.headers.pop('Cookie', None) 37 jar.add_cookie_header(request) 38 self._debug_cookie(request, spider) 39 40 def process_response(self, request, response, spider): 41 if request.meta.get('dont_merge_cookies', False): 42 return response 43 44 # extract cookies from Set-Cookie and drop invalid/expired cookies 45 cookiejarkey = request.meta.get("cookiejar") 46 jar = self.jars[cookiejarkey] 47 jar.extract_cookies(response, request) 48 self._debug_set_cookie(response, spider) 49 50 return response 51 52 def _debug_cookie(self, request, spider): 53 if self.debug: 54 cl = [to_unicode(c, errors='replace') 55 for c in request.headers.getlist('Cookie')] 56 if cl: 57 cookies = "\n".join(f"Cookie: {c}\n" for c in cl) 58 msg = f"Sending cookies to: {request}\n{cookies}" 59 logger.debug(msg, extra={'spider': spider}) 60 61 def _debug_set_cookie(self, response, spider): 62 if self.debug: 63 cl = [to_unicode(c, errors='replace') 64 for c in response.headers.getlist('Set-Cookie')] 65 if cl: 66 cookies = "\n".join(f"Set-Cookie: {c}\n" for c in cl) 67 msg = f"Received cookies from: {response}\n{cookies}" 68 logger.debug(msg, extra={'spider': spider}) 69 70 def _format_cookie(self, cookie, request): 71 """ 72 Given a dict consisting of cookie components, return its string representation. 73 Decode from bytes if necessary. 74 """ 75 decoded = {} 76 for key in ("name", "value", "path", "domain"): 77 if cookie.get(key) is None: 78 if key in ("name", "value"): 79 msg = "Invalid cookie found in request {}: {} ('{}' is missing)" 80 logger.warning(msg.format(request, cookie, key)) 81 return 82 continue 83 if isinstance(cookie[key], str): 84 decoded[key] = cookie[key] 85 else: 86 try: 87 decoded[key] = cookie[key].decode("utf8") 88 except UnicodeDecodeError: 89 logger.warning("Non UTF-8 encoded cookie found in request %s: %s", 90 request, cookie) 91 decoded[key] = cookie[key].decode("latin1", errors="replace") 92 93 cookie_str = f"{decoded.pop('name')}={decoded.pop('value')}" 94 for key, value in decoded.items(): # path, domain 95 cookie_str += f"; {key.capitalize()}={value}" 96 return cookie_str 97 98 def _get_request_cookies(self, jar, request): 99 """ 100 Extract cookies from the Request.cookies attribute 101 """ 102 if not request.cookies: 103 return [] 104 elif isinstance(request.cookies, dict): 105 cookies = ({"name": k, "value": v} for k, v in request.cookies.items()) 106 else: 107 cookies = request.cookies 108 formatted = filter(None, (self._format_cookie(c, request) for c in cookies)) 109 response = Response(request.url, headers={"Set-Cookie": formatted}) 110 return jar.make_cookies(response, request) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/downloadermiddlewares/cookies.py b/scrapy/downloadermiddlewares/cookies.py --- a/scrapy/downloadermiddlewares/cookies.py +++ b/scrapy/downloadermiddlewares/cookies.py @@ -80,8 +80,8 @@ logger.warning(msg.format(request, cookie, key)) return continue - if isinstance(cookie[key], str): - decoded[key] = cookie[key] + if isinstance(cookie[key], (bool, float, int, str)): + decoded[key] = str(cookie[key]) else: try: decoded[key] = cookie[key].decode("utf8")
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/cookies.py b/scrapy/downloadermiddlewares/cookies.py\n--- a/scrapy/downloadermiddlewares/cookies.py\n+++ b/scrapy/downloadermiddlewares/cookies.py\n@@ -80,8 +80,8 @@\n logger.warning(msg.format(request, cookie, key))\n return\n continue\n- if isinstance(cookie[key], str):\n- decoded[key] = cookie[key]\n+ if isinstance(cookie[key], (bool, float, int, str)):\n+ decoded[key] = str(cookie[key])\n else:\n try:\n decoded[key] = cookie[key].decode(\"utf8\")\n", "issue": "ints in Cookies are not getting parsed correctly\n<!--\r\n\r\nThanks for taking an interest in Scrapy!\r\n\r\nIf you have a question that starts with \"How to...\", please see the Scrapy Community page: https://scrapy.org/community/.\r\nThe GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.\r\n\r\nKeep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md\r\n\r\nThe following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs\r\n\r\n-->\r\n\r\n### Description\r\n\r\nUsing an int in a cookie causes the spider to not work. Replacing the int with a str fixes the problem.\r\n\r\n### Steps to Reproduce\r\n\r\nFailing Code Example:\r\n```\r\nrequest = scrapy.Request(\r\n url = url, \r\n callback = callback\r\n)\r\nrequest.cookies['some_cookie'] = 1\r\n```\r\n\r\nSuccessful Code Example:\r\n```\r\nrequest = scrapy.Request(\r\n url = url, \r\n callback = callback\r\n)\r\nrequest.cookies['some_cookie'] = '1'\r\n```\r\n\r\n**Expected behavior:** I expect that an int should be parsed to a str without me needing to do it myself.\r\n\r\n**Actual behavior:** Get following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\twisted\\internet\\defer.py\", line 1661, in _inlineCallbacks\r\n result = current_context.run(gen.send, result)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\core\\downloader\\middleware.py\", line 36, in process_request\r\n response = yield deferred_from_coro(method(request=request, spider=spider))\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\downloadermiddlewares\\cookies.py\", line 32, in process_request\r\n for cookie in self._get_request_cookies(jar, request):\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\downloadermiddlewares\\cookies.py\", line 109, in _get_request_cookies\r\n response = Response(request.url, headers={\"Set-Cookie\": formatted})\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\http\\response\\__init__.py\", line 32, in __init__\r\n self.headers = Headers(headers or {})\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\http\\headers.py\", line 11, in __init__\r\n super().__init__(seq)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\utils\\datatypes.py\", line 20, in __init__\r\n self.update(seq)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\utils\\datatypes.py\", line 56, in update\r\n super().update(iseq)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\utils\\datatypes.py\", line 55, in <genexpr>\r\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\http\\headers.py\", line 26, in normvalue\r\n return [self._tobytes(x) for x in value]\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\http\\headers.py\", line 26, in <listcomp>\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\downloadermiddlewares\\cookies.py\", line 108, in <genexpr>\r\n formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\scrapy\\downloadermiddlewares\\cookies.py\", line 87, in _format_cookie\r\n decoded[key] = cookie[key].decode(\"utf8\")\r\nAttributeError: 'int' object has no attribute 'decode'\r\n```\r\n\r\n**Reproduces how often:** [What percentage of the time does it reproduce?]\r\n\r\n### Versions\r\n\r\nScrapy : 2.5.0\r\nlxml : 4.6.3.0\r\nlibxml2 : 2.9.5\r\ncssselect : 1.1.0\r\nparsel : 1.6.0\r\nw3lib : 1.22.0\r\nTwisted : 21.7.0\r\nPython : 3.9.7 (tags/v3.9.7:1016ef3, Aug 30 2021, 20:19:38) [MSC v.1929 64 bit (AMD64)]\r\npyOpenSSL : 21.0.0 (OpenSSL 1.1.1l 24 Aug 2021)\r\ncryptography : 35.0.0\r\nPlatform : Windows-10-10.0.19043-SP0\r\n\r\n### Additional context\r\n\r\nfrom scrapy.http import Request\r\n\r\nFailing Code Example:\r\n```\r\nrequest = scrapy.Request(\r\n url = url, \r\n callback = callback\r\n)\r\nrequest.cookies['some_cookie'] = 1\r\n```\r\n\r\nSuccessful Code Example:\r\n```\r\nrequest = scrapy.Request(\r\n url = url, \r\n callback = callback\r\n)\r\nrequest.cookies['some_cookie'] = '1'\r\n```\r\n\r\n(edited to add code formatting to the traceback)\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.http import Response\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.utils.python import to_unicode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CookiesMiddleware:\n \"\"\"This middleware enables working with sites that need cookies\"\"\"\n\n def __init__(self, debug=False):\n self.jars = defaultdict(CookieJar)\n self.debug = debug\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COOKIES_ENABLED'):\n raise NotConfigured\n return cls(crawler.settings.getbool('COOKIES_DEBUG'))\n\n def process_request(self, request, spider):\n if request.meta.get('dont_merge_cookies', False):\n return\n\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n for cookie in self._get_request_cookies(jar, request):\n jar.set_cookie_if_ok(cookie, request)\n\n # set Cookie header\n request.headers.pop('Cookie', None)\n jar.add_cookie_header(request)\n self._debug_cookie(request, spider)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_merge_cookies', False):\n return response\n\n # extract cookies from Set-Cookie and drop invalid/expired cookies\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n jar.extract_cookies(response, request)\n self._debug_set_cookie(response, spider)\n\n return response\n\n def _debug_cookie(self, request, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in request.headers.getlist('Cookie')]\n if cl:\n cookies = \"\\n\".join(f\"Cookie: {c}\\n\" for c in cl)\n msg = f\"Sending cookies to: {request}\\n{cookies}\"\n logger.debug(msg, extra={'spider': spider})\n\n def _debug_set_cookie(self, response, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in response.headers.getlist('Set-Cookie')]\n if cl:\n cookies = \"\\n\".join(f\"Set-Cookie: {c}\\n\" for c in cl)\n msg = f\"Received cookies from: {response}\\n{cookies}\"\n logger.debug(msg, extra={'spider': spider})\n\n def _format_cookie(self, cookie, request):\n \"\"\"\n Given a dict consisting of cookie components, return its string representation.\n Decode from bytes if necessary.\n \"\"\"\n decoded = {}\n for key in (\"name\", \"value\", \"path\", \"domain\"):\n if cookie.get(key) is None:\n if key in (\"name\", \"value\"):\n msg = \"Invalid cookie found in request {}: {} ('{}' is missing)\"\n logger.warning(msg.format(request, cookie, key))\n return\n continue\n if isinstance(cookie[key], str):\n decoded[key] = cookie[key]\n else:\n try:\n decoded[key] = cookie[key].decode(\"utf8\")\n except UnicodeDecodeError:\n logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n request, cookie)\n decoded[key] = cookie[key].decode(\"latin1\", errors=\"replace\")\n\n cookie_str = f\"{decoded.pop('name')}={decoded.pop('value')}\"\n for key, value in decoded.items(): # path, domain\n cookie_str += f\"; {key.capitalize()}={value}\"\n return cookie_str\n\n def _get_request_cookies(self, jar, request):\n \"\"\"\n Extract cookies from the Request.cookies attribute\n \"\"\"\n if not request.cookies:\n return []\n elif isinstance(request.cookies, dict):\n cookies = ({\"name\": k, \"value\": v} for k, v in request.cookies.items())\n else:\n cookies = request.cookies\n formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\n response = Response(request.url, headers={\"Set-Cookie\": formatted})\n return jar.make_cookies(response, request)\n", "path": "scrapy/downloadermiddlewares/cookies.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.http import Response\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.utils.python import to_unicode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CookiesMiddleware:\n \"\"\"This middleware enables working with sites that need cookies\"\"\"\n\n def __init__(self, debug=False):\n self.jars = defaultdict(CookieJar)\n self.debug = debug\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COOKIES_ENABLED'):\n raise NotConfigured\n return cls(crawler.settings.getbool('COOKIES_DEBUG'))\n\n def process_request(self, request, spider):\n if request.meta.get('dont_merge_cookies', False):\n return\n\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n for cookie in self._get_request_cookies(jar, request):\n jar.set_cookie_if_ok(cookie, request)\n\n # set Cookie header\n request.headers.pop('Cookie', None)\n jar.add_cookie_header(request)\n self._debug_cookie(request, spider)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_merge_cookies', False):\n return response\n\n # extract cookies from Set-Cookie and drop invalid/expired cookies\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n jar.extract_cookies(response, request)\n self._debug_set_cookie(response, spider)\n\n return response\n\n def _debug_cookie(self, request, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in request.headers.getlist('Cookie')]\n if cl:\n cookies = \"\\n\".join(f\"Cookie: {c}\\n\" for c in cl)\n msg = f\"Sending cookies to: {request}\\n{cookies}\"\n logger.debug(msg, extra={'spider': spider})\n\n def _debug_set_cookie(self, response, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in response.headers.getlist('Set-Cookie')]\n if cl:\n cookies = \"\\n\".join(f\"Set-Cookie: {c}\\n\" for c in cl)\n msg = f\"Received cookies from: {response}\\n{cookies}\"\n logger.debug(msg, extra={'spider': spider})\n\n def _format_cookie(self, cookie, request):\n \"\"\"\n Given a dict consisting of cookie components, return its string representation.\n Decode from bytes if necessary.\n \"\"\"\n decoded = {}\n for key in (\"name\", \"value\", \"path\", \"domain\"):\n if cookie.get(key) is None:\n if key in (\"name\", \"value\"):\n msg = \"Invalid cookie found in request {}: {} ('{}' is missing)\"\n logger.warning(msg.format(request, cookie, key))\n return\n continue\n if isinstance(cookie[key], (bool, float, int, str)):\n decoded[key] = str(cookie[key])\n else:\n try:\n decoded[key] = cookie[key].decode(\"utf8\")\n except UnicodeDecodeError:\n logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n request, cookie)\n decoded[key] = cookie[key].decode(\"latin1\", errors=\"replace\")\n\n cookie_str = f\"{decoded.pop('name')}={decoded.pop('value')}\"\n for key, value in decoded.items(): # path, domain\n cookie_str += f\"; {key.capitalize()}={value}\"\n return cookie_str\n\n def _get_request_cookies(self, jar, request):\n \"\"\"\n Extract cookies from the Request.cookies attribute\n \"\"\"\n if not request.cookies:\n return []\n elif isinstance(request.cookies, dict):\n cookies = ({\"name\": k, \"value\": v} for k, v in request.cookies.items())\n else:\n cookies = request.cookies\n formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\n response = Response(request.url, headers={\"Set-Cookie\": formatted})\n return jar.make_cookies(response, request)\n", "path": "scrapy/downloadermiddlewares/cookies.py"}]}
2,754
136
gh_patches_debug_14405
rasdani/github-patches
git_diff
RedHatInsights__insights-core-2165
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- lscpu datasource missing for sos reports If we add a new spec, let's check if it exists in sos report and include a spec for it if it does. lscpu is in `sos_commands/processor/lscpu`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/specs/sos_archive.py` Content: ``` 1 from functools import partial 2 from insights.specs import Specs 3 from insights.core.context import SosArchiveContext 4 from insights.core.spec_factory import simple_file, first_of, first_file, glob_file 5 6 first_file = partial(first_file, context=SosArchiveContext) 7 glob_file = partial(glob_file, context=SosArchiveContext) 8 simple_file = partial(simple_file, context=SosArchiveContext) 9 10 11 class SosSpecs(Specs): 12 auditctl_status = simple_file("sos_commands/auditd/auditctl_-s") 13 blkid = simple_file("sos_commands/block/blkid_-c_.dev.null") 14 candlepin_log = first_of([ 15 simple_file("/var/log/candlepin/candlepin.log"), 16 simple_file("sos_commands/foreman/foreman-debug/var/log/candlepin/candlepin.log") 17 ]) 18 candlepin_error_log = first_of([ 19 simple_file("var/log/candlepin/error.log"), 20 simple_file(r"sos_commands/foreman/foreman-debug/var/log/candlepin/error.log") 21 ]) 22 catalina_out = glob_file("var/log/tomcat*/catalina.out") 23 catalina_server_log = glob_file("var/log/tomcat*/catalina*.log") 24 ceph_osd_tree_text = simple_file("sos_commands/ceph/ceph_osd_tree") 25 ceph_report = simple_file("sos_commands/ceph/ceph_report") 26 ceph_health_detail = simple_file("sos_commands/ceph/ceph_health_detail_--format_json-pretty") 27 chkconfig = first_file(["sos_commands/startup/chkconfig_--list", "sos_commands/services/chkconfig_--list"]) 28 date = first_of([simple_file("sos_commands/general/date"), simple_file("sos_commands/date/date")]) 29 df__al = first_file(["sos_commands/filesys/df_-al", "sos_commands/filesys/df_-al_-x_autofs"]) 30 display_java = simple_file("sos_commands/java/alternatives_--display_java") 31 docker_info = simple_file("sos_commands/docker/docker_info") 32 docker_list_containers = first_file(["sos_commands/docker/docker_ps_-a", "sos_commands/docker/docker_ps"]) 33 dmesg = first_file(["sos_commands/kernel/dmesg", "var/log/dmesg"]) 34 dmidecode = simple_file("sos_commands/hardware/dmidecode") 35 dmsetup_info = simple_file("sos_commands/devicemapper/dmsetup_info_-c") 36 dumpe2fs_h = glob_file("sos_commands/filesys/dumpe2fs_-h_*") 37 ethtool = glob_file("sos_commands/networking/ethtool_*", ignore="ethtool_-.*") 38 ethtool_S = glob_file("sos_commands/networking/ethtool_-S_*") 39 ethtool_T = glob_file("sos_commands/networking/ethtool_-T_*") 40 ethtool_a = glob_file("sos_commands/networking/ethtool_-a_*") 41 ethtool_c = glob_file("sos_commands/networking/ethtool_-c_*") 42 ethtool_g = glob_file("sos_commands/networking/ethtool_-g_*") 43 ethtool_i = glob_file("sos_commands/networking/ethtool_-i_*") 44 ethtool_k = glob_file("sos_commands/networking/ethtool_-k_*") 45 fdisk_l_sos = first_of([glob_file(r"sos_commands/filesys/fdisk_-l_*"), glob_file(r"sos_commands/block/fdisk_-l_*")]) 46 foreman_production_log = first_of([simple_file("/var/log/foreman/production.log"), simple_file("sos_commands/foreman/foreman-debug/var/log/foreman/production.log")]) 47 foreman_proxy_conf = first_of([simple_file("/etc/foreman-proxy/settings.yml"), simple_file("sos_commands/foreman/foreman-debug/etc/foreman-proxy/settings.yml")]) 48 foreman_proxy_log = first_of([simple_file("/var/log/foreman-proxy/proxy.log"), simple_file("sos_commands/foreman/foreman-debug/var/log/foreman-proxy/proxy.log")]) 49 foreman_satellite_log = first_of([simple_file("/var/log/foreman-installer/satellite.log"), simple_file("sos_commands/foreman/foreman-debug/var/log/foreman-installer/satellite.log")]) 50 foreman_ssl_access_ssl_log = first_file(["var/log/httpd/foreman-ssl_access_ssl.log", r"sos_commands/foreman/foreman-debug/var/log/httpd/foreman-ssl_access_ssl.log"]) 51 getcert_list = first_file(["sos_commands/ipa/ipa-getcert_list", "sos_commands/ipa/getcert_list"]) 52 gluster_v_info = simple_file("sos_commands/gluster/gluster_volume_info") 53 gluster_v_status = simple_file("sos_commands/gluster/gluster_volume_status") 54 hostname = first_file(["sos_commands/general/hostname_-f", "sos_commands/general/hostname", "sos_commands/host/hostname_-f", "sos_commands/host/hostname", "/etc/hostname", "hostname"]) 55 installed_rpms = simple_file("installed-rpms") 56 ip_addr = first_of([simple_file("sos_commands/networking/ip_-d_address"), simple_file("sos_commands/networking/ip_address")]) 57 ip_route_show_table_all = simple_file("sos_commands/networking/ip_route_show_table_all") 58 ip_s_link = first_of([simple_file("sos_commands/networking/ip_-s_-d_link"), simple_file("sos_commands/networking/ip_-s_link"), simple_file("sos_commands/networking/ip_link")]) 59 iptables = first_file(["/etc/sysconfig/iptables", "/etc/sysconfig/iptables.save"]) 60 journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot")]) 61 locale = simple_file("sos_commands/i18n/locale") 62 lsblk = simple_file("sos_commands/block/lsblk") 63 lsinitrd = simple_file("sos_commands/boot/lsinitrd") 64 lsof = simple_file("sos_commands/process/lsof_-b_M_-n_-l") 65 lsmod = simple_file("sos_commands/kernel/lsmod") 66 lspci = first_of([ 67 simple_file("sos_commands/pci/lspci_-nnvv"), 68 simple_file("sos_commands/pci/lspci"), 69 simple_file("sos_commands/pci/lspci_-nvv") 70 ]) 71 lsscsi = simple_file("sos_commands/scsi/lsscsi") 72 ls_dev = first_file(["sos_commands/block/ls_-lanR_.dev", "sos_commands/devicemapper/ls_-lanR_.dev"]) 73 lvs = first_file(["sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0", "sos_commands/lvm2/lvs_-a_-o_devices"]) 74 mount = simple_file("sos_commands/filesys/mount_-l") 75 multipath__v4__ll = first_file(["sos_commands/multipath/multipath_-v4_-ll", "sos_commands/devicemapper/multipath_-v4_-ll"]) 76 netstat = first_file(["sos_commands/networking/netstat_-neopa", "sos_commands/networking/netstat_-W_-neopa", "sos_commands/networking/netstat_-T_-neopa"]) 77 netstat_agn = first_of([simple_file("sos_commands/networking/netstat_-agn"), simple_file("sos_commands/networking/netstat_-W_-agn"), simple_file("sos_commands/networking/netstat_-T_-agn")]) 78 netstat_s = simple_file("sos_commands/networking/netstat_-s") 79 nmcli_dev_show = simple_file("sos_commands/networking/nmcli_device_show") 80 nmcli_dev_show_sos = glob_file(["sos_commands/networking/nmcli_dev_show_*", "sos_commands/networkmanager/nmcli_dev_show_*"]) 81 ntptime = simple_file("sos_commands/ntp/ntptime") 82 pcs_config = simple_file("sos_commands/pacemaker/pcs_config") 83 pcs_status = simple_file("sos_commands/pacemaker/pcs_status") 84 ps_alxwww = simple_file("sos_commands/process/ps_alxwww") 85 ps_aux = first_file(["sos_commands/process/ps_aux", "sos_commands/process/ps_auxwww", "sos_commands/process/ps_auxcww"]) 86 ps_auxcww = first_file(["sos_commands/process/ps_auxcww", "sos_commands/process/ps_auxwww", "sos_commands/process/ps_aux"]) 87 ps_auxww = first_file(["sos_commands/process/ps_auxww", "sos_commands/process/ps_auxwww", "sos_commands/process/ps_aux", "sos_commands/process/ps_auxcww"]) 88 puppet_ssl_cert_ca_pem = simple_file("sos_commands/foreman/foreman-debug/var/lib/puppet/ssl/certs/ca.pem") 89 pvs = first_file(["sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0", "sos_commands/lvm2/pvs_-a_-v"]) 90 qpid_stat_q = first_of([ 91 simple_file("qpid_stat_queues"), 92 simple_file("qpid-stat-q"), 93 simple_file("sos_commands/foreman/foreman-debug/qpid_stat_queues"), 94 simple_file("sos_commands/foreman/foreman-debug/qpid-stat-q") 95 ]) 96 qpid_stat_u = first_of([ 97 simple_file("qpid_stat_subscriptions"), 98 simple_file("qpid-stat-u"), 99 simple_file("sos_commands/foreman/foreman-debug/qpid_stat_subscriptions"), 100 simple_file("sos_commands/foreman/foreman-debug/qpid-stat-u") 101 ]) 102 rabbitmq_report = simple_file("sos_commands/rabbitmq/rabbitmqctl_report") 103 rabbitmq_report_of_containers = glob_file("sos_commands/rabbitmq/docker_exec_-t_rabbitmq-bundle-docker-*_rabbitmqctl_report") 104 rhn_charsets = first_file(["sos_commands/satellite/rhn-charsets", "sos_commands/rhn/rhn-charsets"]) 105 root_crontab = first_file(["sos_commands/crontab/root_crontab", "sos_commands/cron/root_crontab"]) 106 route = simple_file("sos_commands/networking/route_-n") 107 sestatus = simple_file("sos_commands/selinux/sestatus_-b") 108 subscription_manager_list_consumed = first_file([ 109 'sos_commands/yum/subscription-manager_list_--consumed', 110 'sos_commands/subscription_manager/subscription-manager_list_--consumed', 111 'sos_commands/general/subscription-manager_list_--consumed'] 112 ) 113 subscription_manager_list_installed = first_file([ 114 'sos_commands/yum/subscription-manager_list_--installed', 115 'sos_commands/subscription_manager/subscription-manager_list_--installed', 116 'sos_commands/general/subscription-manager_list_--installed'] 117 ) 118 sysctl = simple_file("sos_commands/kernel/sysctl_-a") 119 systemctl_list_unit_files = simple_file("sos_commands/systemd/systemctl_list-unit-files") 120 systemctl_list_units = first_file(["sos_commands/systemd/systemctl_list-units", "sos_commands/systemd/systemctl_list-units_--all"]) 121 teamdctl_config_dump = glob_file("sos_commands/teamd/teamdctl_*_config_dump") 122 teamdctl_state_dump = glob_file("sos_commands/teamd/teamdctl_*_state_dump") 123 uname = simple_file("sos_commands/kernel/uname_-a") 124 uptime = first_of([simple_file("sos_commands/general/uptime"), simple_file("sos_commands/host/uptime")]) 125 var_qemu_xml = glob_file(r"var/run/libvirt/qemu/*.xml") 126 vdsm_import_log = glob_file("var/log/vdsm/import/import-*.log") 127 vgdisplay = first_file(["sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0", "sos_commands/lvm2/vgdisplay_-vv"]) 128 vgs = first_file(["sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0", "sos_commands/lvm2/vgs_-v"]) 129 xfs_info = glob_file("sos_commands/xfs/xfs_info*") 130 yum_repolist = simple_file("sos_commands/yum/yum_-C_repolist") 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -60,6 +60,7 @@ journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot")]) locale = simple_file("sos_commands/i18n/locale") lsblk = simple_file("sos_commands/block/lsblk") + lscpu = simple_file("sos_commands/processor/lscpu") lsinitrd = simple_file("sos_commands/boot/lsinitrd") lsof = simple_file("sos_commands/process/lsof_-b_M_-n_-l") lsmod = simple_file("sos_commands/kernel/lsmod")
{"golden_diff": "diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py\n--- a/insights/specs/sos_archive.py\n+++ b/insights/specs/sos_archive.py\n@@ -60,6 +60,7 @@\n journal_since_boot = first_of([simple_file(\"sos_commands/logs/journalctl_--no-pager_--boot\"), simple_file(\"sos_commands/logs/journalctl_--no-pager_--catalog_--boot\")])\n locale = simple_file(\"sos_commands/i18n/locale\")\n lsblk = simple_file(\"sos_commands/block/lsblk\")\n+ lscpu = simple_file(\"sos_commands/processor/lscpu\")\n lsinitrd = simple_file(\"sos_commands/boot/lsinitrd\")\n lsof = simple_file(\"sos_commands/process/lsof_-b_M_-n_-l\")\n lsmod = simple_file(\"sos_commands/kernel/lsmod\")\n", "issue": "lscpu datasource missing for sos reports\nIf we add a new spec, let's check if it exists in sos report and include a spec for it if it does. lscpu is in `sos_commands/processor/lscpu`.\n", "before_files": [{"content": "from functools import partial\nfrom insights.specs import Specs\nfrom insights.core.context import SosArchiveContext\nfrom insights.core.spec_factory import simple_file, first_of, first_file, glob_file\n\nfirst_file = partial(first_file, context=SosArchiveContext)\nglob_file = partial(glob_file, context=SosArchiveContext)\nsimple_file = partial(simple_file, context=SosArchiveContext)\n\n\nclass SosSpecs(Specs):\n auditctl_status = simple_file(\"sos_commands/auditd/auditctl_-s\")\n blkid = simple_file(\"sos_commands/block/blkid_-c_.dev.null\")\n candlepin_log = first_of([\n simple_file(\"/var/log/candlepin/candlepin.log\"),\n simple_file(\"sos_commands/foreman/foreman-debug/var/log/candlepin/candlepin.log\")\n ])\n candlepin_error_log = first_of([\n simple_file(\"var/log/candlepin/error.log\"),\n simple_file(r\"sos_commands/foreman/foreman-debug/var/log/candlepin/error.log\")\n ])\n catalina_out = glob_file(\"var/log/tomcat*/catalina.out\")\n catalina_server_log = glob_file(\"var/log/tomcat*/catalina*.log\")\n ceph_osd_tree_text = simple_file(\"sos_commands/ceph/ceph_osd_tree\")\n ceph_report = simple_file(\"sos_commands/ceph/ceph_report\")\n ceph_health_detail = simple_file(\"sos_commands/ceph/ceph_health_detail_--format_json-pretty\")\n chkconfig = first_file([\"sos_commands/startup/chkconfig_--list\", \"sos_commands/services/chkconfig_--list\"])\n date = first_of([simple_file(\"sos_commands/general/date\"), simple_file(\"sos_commands/date/date\")])\n df__al = first_file([\"sos_commands/filesys/df_-al\", \"sos_commands/filesys/df_-al_-x_autofs\"])\n display_java = simple_file(\"sos_commands/java/alternatives_--display_java\")\n docker_info = simple_file(\"sos_commands/docker/docker_info\")\n docker_list_containers = first_file([\"sos_commands/docker/docker_ps_-a\", \"sos_commands/docker/docker_ps\"])\n dmesg = first_file([\"sos_commands/kernel/dmesg\", \"var/log/dmesg\"])\n dmidecode = simple_file(\"sos_commands/hardware/dmidecode\")\n dmsetup_info = simple_file(\"sos_commands/devicemapper/dmsetup_info_-c\")\n dumpe2fs_h = glob_file(\"sos_commands/filesys/dumpe2fs_-h_*\")\n ethtool = glob_file(\"sos_commands/networking/ethtool_*\", ignore=\"ethtool_-.*\")\n ethtool_S = glob_file(\"sos_commands/networking/ethtool_-S_*\")\n ethtool_T = glob_file(\"sos_commands/networking/ethtool_-T_*\")\n ethtool_a = glob_file(\"sos_commands/networking/ethtool_-a_*\")\n ethtool_c = glob_file(\"sos_commands/networking/ethtool_-c_*\")\n ethtool_g = glob_file(\"sos_commands/networking/ethtool_-g_*\")\n ethtool_i = glob_file(\"sos_commands/networking/ethtool_-i_*\")\n ethtool_k = glob_file(\"sos_commands/networking/ethtool_-k_*\")\n fdisk_l_sos = first_of([glob_file(r\"sos_commands/filesys/fdisk_-l_*\"), glob_file(r\"sos_commands/block/fdisk_-l_*\")])\n foreman_production_log = first_of([simple_file(\"/var/log/foreman/production.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman/production.log\")])\n foreman_proxy_conf = first_of([simple_file(\"/etc/foreman-proxy/settings.yml\"), simple_file(\"sos_commands/foreman/foreman-debug/etc/foreman-proxy/settings.yml\")])\n foreman_proxy_log = first_of([simple_file(\"/var/log/foreman-proxy/proxy.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman-proxy/proxy.log\")])\n foreman_satellite_log = first_of([simple_file(\"/var/log/foreman-installer/satellite.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman-installer/satellite.log\")])\n foreman_ssl_access_ssl_log = first_file([\"var/log/httpd/foreman-ssl_access_ssl.log\", r\"sos_commands/foreman/foreman-debug/var/log/httpd/foreman-ssl_access_ssl.log\"])\n getcert_list = first_file([\"sos_commands/ipa/ipa-getcert_list\", \"sos_commands/ipa/getcert_list\"])\n gluster_v_info = simple_file(\"sos_commands/gluster/gluster_volume_info\")\n gluster_v_status = simple_file(\"sos_commands/gluster/gluster_volume_status\")\n hostname = first_file([\"sos_commands/general/hostname_-f\", \"sos_commands/general/hostname\", \"sos_commands/host/hostname_-f\", \"sos_commands/host/hostname\", \"/etc/hostname\", \"hostname\"])\n installed_rpms = simple_file(\"installed-rpms\")\n ip_addr = first_of([simple_file(\"sos_commands/networking/ip_-d_address\"), simple_file(\"sos_commands/networking/ip_address\")])\n ip_route_show_table_all = simple_file(\"sos_commands/networking/ip_route_show_table_all\")\n ip_s_link = first_of([simple_file(\"sos_commands/networking/ip_-s_-d_link\"), simple_file(\"sos_commands/networking/ip_-s_link\"), simple_file(\"sos_commands/networking/ip_link\")])\n iptables = first_file([\"/etc/sysconfig/iptables\", \"/etc/sysconfig/iptables.save\"])\n journal_since_boot = first_of([simple_file(\"sos_commands/logs/journalctl_--no-pager_--boot\"), simple_file(\"sos_commands/logs/journalctl_--no-pager_--catalog_--boot\")])\n locale = simple_file(\"sos_commands/i18n/locale\")\n lsblk = simple_file(\"sos_commands/block/lsblk\")\n lsinitrd = simple_file(\"sos_commands/boot/lsinitrd\")\n lsof = simple_file(\"sos_commands/process/lsof_-b_M_-n_-l\")\n lsmod = simple_file(\"sos_commands/kernel/lsmod\")\n lspci = first_of([\n simple_file(\"sos_commands/pci/lspci_-nnvv\"),\n simple_file(\"sos_commands/pci/lspci\"),\n simple_file(\"sos_commands/pci/lspci_-nvv\")\n ])\n lsscsi = simple_file(\"sos_commands/scsi/lsscsi\")\n ls_dev = first_file([\"sos_commands/block/ls_-lanR_.dev\", \"sos_commands/devicemapper/ls_-lanR_.dev\"])\n lvs = first_file([\"sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0\", \"sos_commands/lvm2/lvs_-a_-o_devices\"])\n mount = simple_file(\"sos_commands/filesys/mount_-l\")\n multipath__v4__ll = first_file([\"sos_commands/multipath/multipath_-v4_-ll\", \"sos_commands/devicemapper/multipath_-v4_-ll\"])\n netstat = first_file([\"sos_commands/networking/netstat_-neopa\", \"sos_commands/networking/netstat_-W_-neopa\", \"sos_commands/networking/netstat_-T_-neopa\"])\n netstat_agn = first_of([simple_file(\"sos_commands/networking/netstat_-agn\"), simple_file(\"sos_commands/networking/netstat_-W_-agn\"), simple_file(\"sos_commands/networking/netstat_-T_-agn\")])\n netstat_s = simple_file(\"sos_commands/networking/netstat_-s\")\n nmcli_dev_show = simple_file(\"sos_commands/networking/nmcli_device_show\")\n nmcli_dev_show_sos = glob_file([\"sos_commands/networking/nmcli_dev_show_*\", \"sos_commands/networkmanager/nmcli_dev_show_*\"])\n ntptime = simple_file(\"sos_commands/ntp/ntptime\")\n pcs_config = simple_file(\"sos_commands/pacemaker/pcs_config\")\n pcs_status = simple_file(\"sos_commands/pacemaker/pcs_status\")\n ps_alxwww = simple_file(\"sos_commands/process/ps_alxwww\")\n ps_aux = first_file([\"sos_commands/process/ps_aux\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_auxcww\"])\n ps_auxcww = first_file([\"sos_commands/process/ps_auxcww\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_aux\"])\n ps_auxww = first_file([\"sos_commands/process/ps_auxww\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_aux\", \"sos_commands/process/ps_auxcww\"])\n puppet_ssl_cert_ca_pem = simple_file(\"sos_commands/foreman/foreman-debug/var/lib/puppet/ssl/certs/ca.pem\")\n pvs = first_file([\"sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0\", \"sos_commands/lvm2/pvs_-a_-v\"])\n qpid_stat_q = first_of([\n simple_file(\"qpid_stat_queues\"),\n simple_file(\"qpid-stat-q\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid_stat_queues\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid-stat-q\")\n ])\n qpid_stat_u = first_of([\n simple_file(\"qpid_stat_subscriptions\"),\n simple_file(\"qpid-stat-u\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid_stat_subscriptions\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid-stat-u\")\n ])\n rabbitmq_report = simple_file(\"sos_commands/rabbitmq/rabbitmqctl_report\")\n rabbitmq_report_of_containers = glob_file(\"sos_commands/rabbitmq/docker_exec_-t_rabbitmq-bundle-docker-*_rabbitmqctl_report\")\n rhn_charsets = first_file([\"sos_commands/satellite/rhn-charsets\", \"sos_commands/rhn/rhn-charsets\"])\n root_crontab = first_file([\"sos_commands/crontab/root_crontab\", \"sos_commands/cron/root_crontab\"])\n route = simple_file(\"sos_commands/networking/route_-n\")\n sestatus = simple_file(\"sos_commands/selinux/sestatus_-b\")\n subscription_manager_list_consumed = first_file([\n 'sos_commands/yum/subscription-manager_list_--consumed',\n 'sos_commands/subscription_manager/subscription-manager_list_--consumed',\n 'sos_commands/general/subscription-manager_list_--consumed']\n )\n subscription_manager_list_installed = first_file([\n 'sos_commands/yum/subscription-manager_list_--installed',\n 'sos_commands/subscription_manager/subscription-manager_list_--installed',\n 'sos_commands/general/subscription-manager_list_--installed']\n )\n sysctl = simple_file(\"sos_commands/kernel/sysctl_-a\")\n systemctl_list_unit_files = simple_file(\"sos_commands/systemd/systemctl_list-unit-files\")\n systemctl_list_units = first_file([\"sos_commands/systemd/systemctl_list-units\", \"sos_commands/systemd/systemctl_list-units_--all\"])\n teamdctl_config_dump = glob_file(\"sos_commands/teamd/teamdctl_*_config_dump\")\n teamdctl_state_dump = glob_file(\"sos_commands/teamd/teamdctl_*_state_dump\")\n uname = simple_file(\"sos_commands/kernel/uname_-a\")\n uptime = first_of([simple_file(\"sos_commands/general/uptime\"), simple_file(\"sos_commands/host/uptime\")])\n var_qemu_xml = glob_file(r\"var/run/libvirt/qemu/*.xml\")\n vdsm_import_log = glob_file(\"var/log/vdsm/import/import-*.log\")\n vgdisplay = first_file([\"sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0\", \"sos_commands/lvm2/vgdisplay_-vv\"])\n vgs = first_file([\"sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0\", \"sos_commands/lvm2/vgs_-v\"])\n xfs_info = glob_file(\"sos_commands/xfs/xfs_info*\")\n yum_repolist = simple_file(\"sos_commands/yum/yum_-C_repolist\")\n", "path": "insights/specs/sos_archive.py"}], "after_files": [{"content": "from functools import partial\nfrom insights.specs import Specs\nfrom insights.core.context import SosArchiveContext\nfrom insights.core.spec_factory import simple_file, first_of, first_file, glob_file\n\nfirst_file = partial(first_file, context=SosArchiveContext)\nglob_file = partial(glob_file, context=SosArchiveContext)\nsimple_file = partial(simple_file, context=SosArchiveContext)\n\n\nclass SosSpecs(Specs):\n auditctl_status = simple_file(\"sos_commands/auditd/auditctl_-s\")\n blkid = simple_file(\"sos_commands/block/blkid_-c_.dev.null\")\n candlepin_log = first_of([\n simple_file(\"/var/log/candlepin/candlepin.log\"),\n simple_file(\"sos_commands/foreman/foreman-debug/var/log/candlepin/candlepin.log\")\n ])\n candlepin_error_log = first_of([\n simple_file(\"var/log/candlepin/error.log\"),\n simple_file(r\"sos_commands/foreman/foreman-debug/var/log/candlepin/error.log\")\n ])\n catalina_out = glob_file(\"var/log/tomcat*/catalina.out\")\n catalina_server_log = glob_file(\"var/log/tomcat*/catalina*.log\")\n ceph_osd_tree_text = simple_file(\"sos_commands/ceph/ceph_osd_tree\")\n ceph_report = simple_file(\"sos_commands/ceph/ceph_report\")\n ceph_health_detail = simple_file(\"sos_commands/ceph/ceph_health_detail_--format_json-pretty\")\n chkconfig = first_file([\"sos_commands/startup/chkconfig_--list\", \"sos_commands/services/chkconfig_--list\"])\n date = first_of([simple_file(\"sos_commands/general/date\"), simple_file(\"sos_commands/date/date\")])\n df__al = first_file([\"sos_commands/filesys/df_-al\", \"sos_commands/filesys/df_-al_-x_autofs\"])\n display_java = simple_file(\"sos_commands/java/alternatives_--display_java\")\n docker_info = simple_file(\"sos_commands/docker/docker_info\")\n docker_list_containers = first_file([\"sos_commands/docker/docker_ps_-a\", \"sos_commands/docker/docker_ps\"])\n dmesg = first_file([\"sos_commands/kernel/dmesg\", \"var/log/dmesg\"])\n dmidecode = simple_file(\"sos_commands/hardware/dmidecode\")\n dmsetup_info = simple_file(\"sos_commands/devicemapper/dmsetup_info_-c\")\n dumpe2fs_h = glob_file(\"sos_commands/filesys/dumpe2fs_-h_*\")\n ethtool = glob_file(\"sos_commands/networking/ethtool_*\", ignore=\"ethtool_-.*\")\n ethtool_S = glob_file(\"sos_commands/networking/ethtool_-S_*\")\n ethtool_T = glob_file(\"sos_commands/networking/ethtool_-T_*\")\n ethtool_a = glob_file(\"sos_commands/networking/ethtool_-a_*\")\n ethtool_c = glob_file(\"sos_commands/networking/ethtool_-c_*\")\n ethtool_g = glob_file(\"sos_commands/networking/ethtool_-g_*\")\n ethtool_i = glob_file(\"sos_commands/networking/ethtool_-i_*\")\n ethtool_k = glob_file(\"sos_commands/networking/ethtool_-k_*\")\n fdisk_l_sos = first_of([glob_file(r\"sos_commands/filesys/fdisk_-l_*\"), glob_file(r\"sos_commands/block/fdisk_-l_*\")])\n foreman_production_log = first_of([simple_file(\"/var/log/foreman/production.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman/production.log\")])\n foreman_proxy_conf = first_of([simple_file(\"/etc/foreman-proxy/settings.yml\"), simple_file(\"sos_commands/foreman/foreman-debug/etc/foreman-proxy/settings.yml\")])\n foreman_proxy_log = first_of([simple_file(\"/var/log/foreman-proxy/proxy.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman-proxy/proxy.log\")])\n foreman_satellite_log = first_of([simple_file(\"/var/log/foreman-installer/satellite.log\"), simple_file(\"sos_commands/foreman/foreman-debug/var/log/foreman-installer/satellite.log\")])\n foreman_ssl_access_ssl_log = first_file([\"var/log/httpd/foreman-ssl_access_ssl.log\", r\"sos_commands/foreman/foreman-debug/var/log/httpd/foreman-ssl_access_ssl.log\"])\n getcert_list = first_file([\"sos_commands/ipa/ipa-getcert_list\", \"sos_commands/ipa/getcert_list\"])\n gluster_v_info = simple_file(\"sos_commands/gluster/gluster_volume_info\")\n gluster_v_status = simple_file(\"sos_commands/gluster/gluster_volume_status\")\n hostname = first_file([\"sos_commands/general/hostname_-f\", \"sos_commands/general/hostname\", \"sos_commands/host/hostname_-f\", \"sos_commands/host/hostname\", \"/etc/hostname\", \"hostname\"])\n installed_rpms = simple_file(\"installed-rpms\")\n ip_addr = first_of([simple_file(\"sos_commands/networking/ip_-d_address\"), simple_file(\"sos_commands/networking/ip_address\")])\n ip_route_show_table_all = simple_file(\"sos_commands/networking/ip_route_show_table_all\")\n ip_s_link = first_of([simple_file(\"sos_commands/networking/ip_-s_-d_link\"), simple_file(\"sos_commands/networking/ip_-s_link\"), simple_file(\"sos_commands/networking/ip_link\")])\n iptables = first_file([\"/etc/sysconfig/iptables\", \"/etc/sysconfig/iptables.save\"])\n journal_since_boot = first_of([simple_file(\"sos_commands/logs/journalctl_--no-pager_--boot\"), simple_file(\"sos_commands/logs/journalctl_--no-pager_--catalog_--boot\")])\n locale = simple_file(\"sos_commands/i18n/locale\")\n lsblk = simple_file(\"sos_commands/block/lsblk\")\n lscpu = simple_file(\"sos_commands/processor/lscpu\")\n lsinitrd = simple_file(\"sos_commands/boot/lsinitrd\")\n lsof = simple_file(\"sos_commands/process/lsof_-b_M_-n_-l\")\n lsmod = simple_file(\"sos_commands/kernel/lsmod\")\n lspci = first_of([\n simple_file(\"sos_commands/pci/lspci_-nnvv\"),\n simple_file(\"sos_commands/pci/lspci\"),\n simple_file(\"sos_commands/pci/lspci_-nvv\")\n ])\n lsscsi = simple_file(\"sos_commands/scsi/lsscsi\")\n ls_dev = first_file([\"sos_commands/block/ls_-lanR_.dev\", \"sos_commands/devicemapper/ls_-lanR_.dev\"])\n lvs = first_file([\"sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0\", \"sos_commands/lvm2/lvs_-a_-o_devices\"])\n mount = simple_file(\"sos_commands/filesys/mount_-l\")\n multipath__v4__ll = first_file([\"sos_commands/multipath/multipath_-v4_-ll\", \"sos_commands/devicemapper/multipath_-v4_-ll\"])\n netstat = first_file([\"sos_commands/networking/netstat_-neopa\", \"sos_commands/networking/netstat_-W_-neopa\", \"sos_commands/networking/netstat_-T_-neopa\"])\n netstat_agn = first_of([simple_file(\"sos_commands/networking/netstat_-agn\"), simple_file(\"sos_commands/networking/netstat_-W_-agn\"), simple_file(\"sos_commands/networking/netstat_-T_-agn\")])\n netstat_s = simple_file(\"sos_commands/networking/netstat_-s\")\n nmcli_dev_show = simple_file(\"sos_commands/networking/nmcli_device_show\")\n nmcli_dev_show_sos = glob_file([\"sos_commands/networking/nmcli_dev_show_*\", \"sos_commands/networkmanager/nmcli_dev_show_*\"])\n ntptime = simple_file(\"sos_commands/ntp/ntptime\")\n pcs_config = simple_file(\"sos_commands/pacemaker/pcs_config\")\n pcs_status = simple_file(\"sos_commands/pacemaker/pcs_status\")\n ps_alxwww = simple_file(\"sos_commands/process/ps_alxwww\")\n ps_aux = first_file([\"sos_commands/process/ps_aux\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_auxcww\"])\n ps_auxcww = first_file([\"sos_commands/process/ps_auxcww\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_aux\"])\n ps_auxww = first_file([\"sos_commands/process/ps_auxww\", \"sos_commands/process/ps_auxwww\", \"sos_commands/process/ps_aux\", \"sos_commands/process/ps_auxcww\"])\n puppet_ssl_cert_ca_pem = simple_file(\"sos_commands/foreman/foreman-debug/var/lib/puppet/ssl/certs/ca.pem\")\n pvs = first_file([\"sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0\", \"sos_commands/lvm2/pvs_-a_-v\"])\n qpid_stat_q = first_of([\n simple_file(\"qpid_stat_queues\"),\n simple_file(\"qpid-stat-q\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid_stat_queues\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid-stat-q\")\n ])\n qpid_stat_u = first_of([\n simple_file(\"qpid_stat_subscriptions\"),\n simple_file(\"qpid-stat-u\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid_stat_subscriptions\"),\n simple_file(\"sos_commands/foreman/foreman-debug/qpid-stat-u\")\n ])\n rabbitmq_report = simple_file(\"sos_commands/rabbitmq/rabbitmqctl_report\")\n rabbitmq_report_of_containers = glob_file(\"sos_commands/rabbitmq/docker_exec_-t_rabbitmq-bundle-docker-*_rabbitmqctl_report\")\n rhn_charsets = first_file([\"sos_commands/satellite/rhn-charsets\", \"sos_commands/rhn/rhn-charsets\"])\n root_crontab = first_file([\"sos_commands/crontab/root_crontab\", \"sos_commands/cron/root_crontab\"])\n route = simple_file(\"sos_commands/networking/route_-n\")\n sestatus = simple_file(\"sos_commands/selinux/sestatus_-b\")\n subscription_manager_list_consumed = first_file([\n 'sos_commands/yum/subscription-manager_list_--consumed',\n 'sos_commands/subscription_manager/subscription-manager_list_--consumed',\n 'sos_commands/general/subscription-manager_list_--consumed']\n )\n subscription_manager_list_installed = first_file([\n 'sos_commands/yum/subscription-manager_list_--installed',\n 'sos_commands/subscription_manager/subscription-manager_list_--installed',\n 'sos_commands/general/subscription-manager_list_--installed']\n )\n sysctl = simple_file(\"sos_commands/kernel/sysctl_-a\")\n systemctl_list_unit_files = simple_file(\"sos_commands/systemd/systemctl_list-unit-files\")\n systemctl_list_units = first_file([\"sos_commands/systemd/systemctl_list-units\", \"sos_commands/systemd/systemctl_list-units_--all\"])\n teamdctl_config_dump = glob_file(\"sos_commands/teamd/teamdctl_*_config_dump\")\n teamdctl_state_dump = glob_file(\"sos_commands/teamd/teamdctl_*_state_dump\")\n uname = simple_file(\"sos_commands/kernel/uname_-a\")\n uptime = first_of([simple_file(\"sos_commands/general/uptime\"), simple_file(\"sos_commands/host/uptime\")])\n var_qemu_xml = glob_file(r\"var/run/libvirt/qemu/*.xml\")\n vdsm_import_log = glob_file(\"var/log/vdsm/import/import-*.log\")\n vgdisplay = first_file([\"sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0\", \"sos_commands/lvm2/vgdisplay_-vv\"])\n vgs = first_file([\"sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0\", \"sos_commands/lvm2/vgs_-v\"])\n xfs_info = glob_file(\"sos_commands/xfs/xfs_info*\")\n yum_repolist = simple_file(\"sos_commands/yum/yum_-C_repolist\")\n", "path": "insights/specs/sos_archive.py"}]}
3,417
213
gh_patches_debug_8109
rasdani/github-patches
git_diff
pre-commit__pre-commit-204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crash when /tmp is on a different device ``` Traceback (most recent call last): File "/home/cameron/Workspace/hack16-llvm-lang/venv/bin/pre-commit", line 9, in <module> load_entry_point('pre-commit==0.4.0', 'console_scripts', 'pre-commit')() File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/main.py", line 136, in main 'Command {0} failed to exit with a returncode'.format(args.command) File "/usr/lib64/python3.4/contextlib.py", line 77, in __exit__ self.gen.throw(type, value, traceback) File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/error_handler.py", line 41, in error_handler traceback.format_exc(), File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/error_handler.py", line 24, in _log_and_exit store.require_created() File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py", line 97, in require_created self._create() File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py", line 90, in _create self._write_sqlite_db() File "/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py", line 82, in _write_sqlite_db os.rename(tmpfile, self.db_path) OSError: [Errno 18] Invalid cross-device link: '/tmp/tmpz1pkyqsm' -> '/home/cameron/.pre-commit/db.db' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/store.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import contextlib 4 import io 5 import logging 6 import os 7 import os.path 8 import sqlite3 9 import tempfile 10 11 from cached_property import cached_property 12 13 from pre_commit.prefixed_command_runner import PrefixedCommandRunner 14 from pre_commit.util import clean_path_on_failure 15 from pre_commit.util import cmd_output 16 from pre_commit.util import cwd 17 18 19 logger = logging.getLogger('pre_commit') 20 21 22 def _get_default_directory(): 23 """Returns the default directory for the Store. This is intentionally 24 underscored to indicate that `Store.get_default_directory` is the intended 25 way to get this information. This is also done so 26 `Store.get_default_directory` can be mocked in tests and 27 `_get_default_directory` can be tested. 28 """ 29 return os.environ.get( 30 'PRE_COMMIT_HOME', 31 os.path.join(os.path.expanduser('~'), '.pre-commit'), 32 ) 33 34 35 class Store(object): 36 get_default_directory = staticmethod(_get_default_directory) 37 38 class RepoPathGetter(object): 39 def __init__(self, repo, sha, store): 40 self._repo = repo 41 self._sha = sha 42 self._store = store 43 44 @cached_property 45 def repo_path(self): 46 return self._store.clone(self._repo, self._sha) 47 48 def __init__(self, directory=None): 49 if directory is None: 50 directory = self.get_default_directory() 51 52 self.directory = directory 53 self.__created = False 54 55 def _write_readme(self): 56 with io.open(os.path.join(self.directory, 'README'), 'w') as readme: 57 readme.write( 58 'This directory is maintained by the pre-commit project.\n' 59 'Learn more: https://github.com/pre-commit/pre-commit\n' 60 ) 61 62 def _write_sqlite_db(self): 63 # To avoid a race where someone ^Cs between db creation and execution 64 # of the CREATE TABLE statement 65 fd, tmpfile = tempfile.mkstemp() 66 # We'll be managing this file ourselves 67 os.close(fd) 68 # sqlite doesn't close its fd with its contextmanager >.< 69 # contextlib.closing fixes this. 70 # See: http://stackoverflow.com/a/28032829/812183 71 with contextlib.closing(sqlite3.connect(tmpfile)) as db: 72 db.executescript( 73 'CREATE TABLE repos (' 74 ' repo CHAR(255) NOT NULL,' 75 ' ref CHAR(255) NOT NULL,' 76 ' path CHAR(255) NOT NULL,' 77 ' PRIMARY KEY (repo, ref)' 78 ');' 79 ) 80 81 # Atomic file move 82 os.rename(tmpfile, self.db_path) 83 84 def _create(self): 85 if os.path.exists(self.db_path): 86 return 87 if not os.path.exists(self.directory): 88 os.makedirs(self.directory) 89 self._write_readme() 90 self._write_sqlite_db() 91 92 def require_created(self): 93 """Require the pre-commit file store to be created.""" 94 if self.__created: 95 return 96 97 self._create() 98 self.__created = True 99 100 def clone(self, url, sha): 101 """Clone the given url and checkout the specific sha.""" 102 self.require_created() 103 104 # Check if we already exist 105 with sqlite3.connect(self.db_path) as db: 106 result = db.execute( 107 'SELECT path FROM repos WHERE repo = ? AND ref = ?', 108 [url, sha], 109 ).fetchone() 110 if result: 111 return result[0] 112 113 logger.info('Initializing environment for {0}.'.format(url)) 114 115 dir = tempfile.mkdtemp(prefix='repo', dir=self.directory) 116 with clean_path_on_failure(dir): 117 cmd_output('git', 'clone', '--no-checkout', url, dir) 118 with cwd(dir): 119 cmd_output('git', 'checkout', sha) 120 121 # Update our db with the created repo 122 with sqlite3.connect(self.db_path) as db: 123 db.execute( 124 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)', 125 [url, sha, dir], 126 ) 127 return dir 128 129 def get_repo_path_getter(self, repo, sha): 130 return self.RepoPathGetter(repo, sha, self) 131 132 @cached_property 133 def cmd_runner(self): 134 return PrefixedCommandRunner(self.directory) 135 136 @cached_property 137 def db_path(self): 138 return os.path.join(self.directory, 'db.db') 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/store.py b/pre_commit/store.py --- a/pre_commit/store.py +++ b/pre_commit/store.py @@ -62,7 +62,7 @@ def _write_sqlite_db(self): # To avoid a race where someone ^Cs between db creation and execution # of the CREATE TABLE statement - fd, tmpfile = tempfile.mkstemp() + fd, tmpfile = tempfile.mkstemp(dir=self.directory) # We'll be managing this file ourselves os.close(fd) # sqlite doesn't close its fd with its contextmanager >.<
{"golden_diff": "diff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -62,7 +62,7 @@\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n- fd, tmpfile = tempfile.mkstemp()\n+ fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n", "issue": "Crash when /tmp is on a different device\n```\nTraceback (most recent call last):\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.4.0', 'console_scripts', 'pre-commit')()\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/main.py\", line 136, in main\n 'Command {0} failed to exit with a returncode'.format(args.command)\n File \"/usr/lib64/python3.4/contextlib.py\", line 77, in __exit__\n self.gen.throw(type, value, traceback)\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/error_handler.py\", line 41, in error_handler\n traceback.format_exc(),\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/error_handler.py\", line 24, in _log_and_exit\n store.require_created()\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py\", line 97, in require_created\n self._create()\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py\", line 90, in _create\n self._write_sqlite_db()\n File \"/home/cameron/Workspace/hack16-llvm-lang/venv/lib/python3.4/site-packages/pre_commit/store.py\", line 82, in _write_sqlite_db\n os.rename(tmpfile, self.db_path)\nOSError: [Errno 18] Invalid cross-device link: '/tmp/tmpz1pkyqsm' -> '/home/cameron/.pre-commit/db.db'\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n class RepoPathGetter(object):\n def __init__(self, repo, sha, store):\n self._repo = repo\n self._sha = sha\n self._store = store\n\n @cached_property\n def repo_path(self):\n return self._store.clone(self._repo, self._sha)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp()\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if self.__created:\n return\n\n self._create()\n self.__created = True\n\n def clone(self, url, sha):\n \"\"\"Clone the given url and checkout the specific sha.\"\"\"\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [url, sha],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {0}.'.format(url))\n\n dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(dir):\n cmd_output('git', 'clone', '--no-checkout', url, dir)\n with cwd(dir):\n cmd_output('git', 'checkout', sha)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [url, sha, dir],\n )\n return dir\n\n def get_repo_path_getter(self, repo, sha):\n return self.RepoPathGetter(repo, sha, self)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n class RepoPathGetter(object):\n def __init__(self, repo, sha, store):\n self._repo = repo\n self._sha = sha\n self._store = store\n\n @cached_property\n def repo_path(self):\n return self._store.clone(self._repo, self._sha)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if self.__created:\n return\n\n self._create()\n self.__created = True\n\n def clone(self, url, sha):\n \"\"\"Clone the given url and checkout the specific sha.\"\"\"\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [url, sha],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {0}.'.format(url))\n\n dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(dir):\n cmd_output('git', 'clone', '--no-checkout', url, dir)\n with cwd(dir):\n cmd_output('git', 'checkout', sha)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [url, sha, dir],\n )\n return dir\n\n def get_repo_path_getter(self, repo, sha):\n return self.RepoPathGetter(repo, sha, self)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}]}
1,986
130
gh_patches_debug_57079
rasdani/github-patches
git_diff
searx__searx-672
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Infinite scroll: answer are repeated on each page How to reproduce : search for ["user agent"](https://searx.me/?q=user+agent) with Infinite scroll activated. Should the answer be disabled except the first page ? or should Infinite Scroll hide the answer ? I vote for the first option : disabled answers except on the first page on the server side. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/plugins/self_info.py` Content: ``` 1 ''' 2 searx is free software: you can redistribute it and/or modify 3 it under the terms of the GNU Affero General Public License as published by 4 the Free Software Foundation, either version 3 of the License, or 5 (at your option) any later version. 6 7 searx is distributed in the hope that it will be useful, 8 but WITHOUT ANY WARRANTY; without even the implied warranty of 9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 GNU Affero General Public License for more details. 11 12 You should have received a copy of the GNU Affero General Public License 13 along with searx. If not, see < http://www.gnu.org/licenses/ >. 14 15 (C) 2015 by Adam Tauber, <[email protected]> 16 ''' 17 from flask_babel import gettext 18 import re 19 name = "Self Informations" 20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".') 21 default_on = True 22 23 24 # Self User Agent regex 25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE) 26 27 28 # attach callback to the post search hook 29 # request: flask request object 30 # ctx: the whole local context of the pre search hook 31 def post_search(request, ctx): 32 if ctx['search'].query == 'ip': 33 x_forwarded_for = request.headers.getlist("X-Forwarded-For") 34 if x_forwarded_for: 35 ip = x_forwarded_for[0] 36 else: 37 ip = request.remote_addr 38 ctx['search'].result_container.answers.clear() 39 ctx['search'].result_container.answers.add(ip) 40 elif p.match(ctx['search'].query): 41 ua = request.user_agent 42 ctx['search'].result_container.answers.clear() 43 ctx['search'].result_container.answers.add(ua) 44 return True 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py --- a/searx/plugins/self_info.py +++ b/searx/plugins/self_info.py @@ -29,6 +29,8 @@ # request: flask request object # ctx: the whole local context of the pre search hook def post_search(request, ctx): + if ctx['search'].pageno > 1: + return True if ctx['search'].query == 'ip': x_forwarded_for = request.headers.getlist("X-Forwarded-For") if x_forwarded_for:
{"golden_diff": "diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -29,6 +29,8 @@\n # request: flask request object\n # ctx: the whole local context of the pre search hook\n def post_search(request, ctx):\n+ if ctx['search'].pageno > 1:\n+ return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n", "issue": "Infinite scroll: answer are repeated on each page\nHow to reproduce : search for [\"user agent\"](https://searx.me/?q=user+agent) with Infinite scroll activated.\n\nShould the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?\n\nI vote for the first option : disabled answers except on the first page on the server side. \n\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n", "path": "searx/plugins/self_info.py"}], "after_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].pageno > 1:\n return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n", "path": "searx/plugins/self_info.py"}]}
813
135
gh_patches_debug_29251
rasdani/github-patches
git_diff
litestar-org__litestar-472
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enhancement: Add more types to the default `Response` serializer. It looks like most of the project templates we have floating around implement a custom serializer for Responses. We should consider enhancing the built in to reduce the need for this. For instance, here is the current `Response.serializer`: ```python @staticmethod def serializer(value: Any) -> Dict[str, Any]: """Serializer hook for orjson to handle pydantic models. This method can be overridden to extend json serialization. Args: value: The value to be serialized Returns: A string keyed dictionary of json compatible values """ if isinstance(value, BaseModel): return value.dict() raise TypeError # pragma: no cover ``` and here is one that's used on another project: ```python @staticmethod def serializer(value: Any) -> Dict[str, Any]: """Serializer hook for orjson to handle pydantic models. Args: value: The value to be serialized Returns: A string keyed dictionary of json compatible values """ if isinstance(value, Enum): return value.value if isinstance(value, EnumMeta): return None if isinstance(value, SecretStr): return value.get_secret_value() if isinstance(value, UUID): return str(value) return starlite.Response.serializer(value) ``` Thoughts? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlite/response.py` Content: ``` 1 from typing import ( 2 TYPE_CHECKING, 3 Any, 4 Dict, 5 Generic, 6 NoReturn, 7 Optional, 8 TypeVar, 9 Union, 10 cast, 11 ) 12 13 import yaml 14 from orjson import OPT_INDENT_2, OPT_OMIT_MICROSECONDS, OPT_SERIALIZE_NUMPY, dumps 15 from pydantic import BaseModel 16 from pydantic_openapi_schema.v3_1_0.open_api import OpenAPI 17 from starlette.responses import Response as StarletteResponse 18 from starlette.status import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED 19 20 from starlite.enums import MediaType, OpenAPIMediaType 21 from starlite.exceptions import ImproperlyConfiguredException 22 23 T = TypeVar("T") 24 25 if TYPE_CHECKING: 26 from starlite.datastructures import BackgroundTask, BackgroundTasks 27 from starlite.template import TemplateEngineProtocol 28 from starlite.types import ResponseCookies 29 30 31 class Response(StarletteResponse, Generic[T]): 32 def __init__( 33 self, 34 content: T, 35 *, 36 status_code: int, 37 media_type: Union["MediaType", "OpenAPIMediaType", str], 38 background: Optional[Union["BackgroundTask", "BackgroundTasks"]] = None, 39 headers: Optional[Dict[str, Any]] = None, 40 cookies: Optional["ResponseCookies"] = None, 41 ): 42 """The response class is used to return an HTTP response. 43 44 Args: 45 content: A value for the response body that will be rendered into bytes string. 46 status_code: A value for the response HTTP status code. 47 media_type: A value for the response 'Content-Type' header. 48 background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or 49 [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished. 50 Defaults to None. 51 headers: A string keyed dictionary of response headers. Header keys are insensitive. 52 cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header. 53 """ 54 super().__init__( 55 content=content, 56 status_code=status_code, 57 headers=headers or {}, 58 media_type=media_type, 59 background=cast("BackgroundTask", background), 60 ) 61 self.cookies = cookies or [] 62 63 @staticmethod 64 def serializer(value: Any) -> Dict[str, Any]: 65 """Serializer hook for orjson to handle pydantic models. 66 67 This method can be overridden to extend json serialization. 68 69 Args: 70 value: The value to be serialized 71 72 Returns: 73 A string keyed dictionary of json compatible values 74 """ 75 if isinstance(value, BaseModel): 76 return value.dict() 77 raise TypeError # pragma: no cover 78 79 def render(self, content: Any) -> bytes: 80 """ 81 Handles the rendering of content T into a bytes string. 82 Args: 83 content: An arbitrary value of type T 84 85 Returns: 86 An encoded bytes string 87 """ 88 try: 89 if ( 90 content is None 91 or content is NoReturn 92 and (self.status_code < 100 or self.status_code in {HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED}) 93 ): 94 return b"" 95 if self.media_type == MediaType.JSON: 96 return dumps(content, default=self.serializer, option=OPT_SERIALIZE_NUMPY | OPT_OMIT_MICROSECONDS) 97 if isinstance(content, OpenAPI): 98 content_dict = content.dict(by_alias=True, exclude_none=True) 99 if self.media_type == OpenAPIMediaType.OPENAPI_YAML: 100 encoded = yaml.dump(content_dict, default_flow_style=False).encode("utf-8") 101 return cast("bytes", encoded) 102 return dumps(content_dict, option=OPT_INDENT_2 | OPT_OMIT_MICROSECONDS) 103 return super().render(content) 104 except (AttributeError, ValueError, TypeError) as e: 105 raise ImproperlyConfiguredException("Unable to serialize response content") from e 106 107 108 class TemplateResponse(Response): 109 def __init__( 110 self, 111 template_name: str, 112 template_engine: "TemplateEngineProtocol", 113 status_code: int, 114 context: Optional[Dict[str, Any]] = None, 115 background: Optional[Union["BackgroundTask", "BackgroundTasks"]] = None, 116 headers: Optional[Dict[str, Any]] = None, 117 cookies: Optional["ResponseCookies"] = None, 118 ): 119 """Handles the rendering of a given template into a bytes string. 120 121 Args: 122 template_name: Path-like name for the template to be rendered, e.g. "index.html". 123 template_engine: The template engine class to use to render the response. 124 status_code: A value for the response HTTP status code. 125 context: A dictionary of key/value pairs to be passed to the temple engine's render method. Defaults to None. 126 background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or 127 [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished. 128 Defaults to None. 129 headers: A string keyed dictionary of response headers. Header keys are insensitive. 130 cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header. 131 """ 132 context = context or {} 133 template = template_engine.get_template(template_name) 134 content = template.render(**context or {}) 135 super().__init__( 136 content=content, 137 status_code=status_code, 138 headers=headers, 139 media_type=MediaType.HTML, 140 background=background, 141 cookies=cookies, 142 ) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlite/response.py b/starlite/response.py --- a/starlite/response.py +++ b/starlite/response.py @@ -1,3 +1,4 @@ +from pathlib import PurePath, PurePosixPath from typing import ( TYPE_CHECKING, Any, @@ -12,7 +13,7 @@ import yaml from orjson import OPT_INDENT_2, OPT_OMIT_MICROSECONDS, OPT_SERIALIZE_NUMPY, dumps -from pydantic import BaseModel +from pydantic import BaseModel, SecretStr from pydantic_openapi_schema.v3_1_0.open_api import OpenAPI from starlette.responses import Response as StarletteResponse from starlette.status import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED @@ -61,7 +62,7 @@ self.cookies = cookies or [] @staticmethod - def serializer(value: Any) -> Dict[str, Any]: + def serializer(value: Any) -> Union[Dict[str, Any], str]: """Serializer hook for orjson to handle pydantic models. This method can be overridden to extend json serialization. @@ -74,6 +75,10 @@ """ if isinstance(value, BaseModel): return value.dict() + if isinstance(value, SecretStr): + return value.get_secret_value() + if isinstance(value, (PurePath, PurePosixPath)): + return str(value) raise TypeError # pragma: no cover def render(self, content: Any) -> bytes:
{"golden_diff": "diff --git a/starlite/response.py b/starlite/response.py\n--- a/starlite/response.py\n+++ b/starlite/response.py\n@@ -1,3 +1,4 @@\n+from pathlib import PurePath, PurePosixPath\n from typing import (\n TYPE_CHECKING,\n Any,\n@@ -12,7 +13,7 @@\n \n import yaml\n from orjson import OPT_INDENT_2, OPT_OMIT_MICROSECONDS, OPT_SERIALIZE_NUMPY, dumps\n-from pydantic import BaseModel\n+from pydantic import BaseModel, SecretStr\n from pydantic_openapi_schema.v3_1_0.open_api import OpenAPI\n from starlette.responses import Response as StarletteResponse\n from starlette.status import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED\n@@ -61,7 +62,7 @@\n self.cookies = cookies or []\n \n @staticmethod\n- def serializer(value: Any) -> Dict[str, Any]:\n+ def serializer(value: Any) -> Union[Dict[str, Any], str]:\n \"\"\"Serializer hook for orjson to handle pydantic models.\n \n This method can be overridden to extend json serialization.\n@@ -74,6 +75,10 @@\n \"\"\"\n if isinstance(value, BaseModel):\n return value.dict()\n+ if isinstance(value, SecretStr):\n+ return value.get_secret_value()\n+ if isinstance(value, (PurePath, PurePosixPath)):\n+ return str(value)\n raise TypeError # pragma: no cover\n \n def render(self, content: Any) -> bytes:\n", "issue": "Enhancement: Add more types to the default `Response` serializer.\nIt looks like most of the project templates we have floating around implement a custom serializer for Responses. We should consider enhancing the built in to reduce the need for this.\r\n\r\nFor instance, here is the current `Response.serializer`:\r\n```python\r\n @staticmethod\r\n def serializer(value: Any) -> Dict[str, Any]:\r\n \"\"\"Serializer hook for orjson to handle pydantic models.\r\n\r\n This method can be overridden to extend json serialization.\r\n\r\n Args:\r\n value: The value to be serialized\r\n\r\n Returns:\r\n A string keyed dictionary of json compatible values\r\n \"\"\"\r\n if isinstance(value, BaseModel):\r\n return value.dict()\r\n raise TypeError # pragma: no cover\r\n```\r\n\r\nand here is one that's used on another project:\r\n```python\r\n@staticmethod\r\n def serializer(value: Any) -> Dict[str, Any]:\r\n \"\"\"Serializer hook for orjson to handle pydantic models.\r\n\r\n Args:\r\n value: The value to be serialized\r\n\r\n Returns:\r\n A string keyed dictionary of json compatible values\r\n \"\"\"\r\n if isinstance(value, Enum):\r\n return value.value\r\n if isinstance(value, EnumMeta):\r\n return None\r\n if isinstance(value, SecretStr):\r\n return value.get_secret_value()\r\n if isinstance(value, UUID):\r\n return str(value)\r\n return starlite.Response.serializer(value)\r\n```\r\n\r\nThoughts?\r\n\r\n\n", "before_files": [{"content": "from typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Generic,\n NoReturn,\n Optional,\n TypeVar,\n Union,\n cast,\n)\n\nimport yaml\nfrom orjson import OPT_INDENT_2, OPT_OMIT_MICROSECONDS, OPT_SERIALIZE_NUMPY, dumps\nfrom pydantic import BaseModel\nfrom pydantic_openapi_schema.v3_1_0.open_api import OpenAPI\nfrom starlette.responses import Response as StarletteResponse\nfrom starlette.status import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED\n\nfrom starlite.enums import MediaType, OpenAPIMediaType\nfrom starlite.exceptions import ImproperlyConfiguredException\n\nT = TypeVar(\"T\")\n\nif TYPE_CHECKING:\n from starlite.datastructures import BackgroundTask, BackgroundTasks\n from starlite.template import TemplateEngineProtocol\n from starlite.types import ResponseCookies\n\n\nclass Response(StarletteResponse, Generic[T]):\n def __init__(\n self,\n content: T,\n *,\n status_code: int,\n media_type: Union[\"MediaType\", \"OpenAPIMediaType\", str],\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n headers: Optional[Dict[str, Any]] = None,\n cookies: Optional[\"ResponseCookies\"] = None,\n ):\n \"\"\"The response class is used to return an HTTP response.\n\n Args:\n content: A value for the response body that will be rendered into bytes string.\n status_code: A value for the response HTTP status code.\n media_type: A value for the response 'Content-Type' header.\n background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or\n [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header.\n \"\"\"\n super().__init__(\n content=content,\n status_code=status_code,\n headers=headers or {},\n media_type=media_type,\n background=cast(\"BackgroundTask\", background),\n )\n self.cookies = cookies or []\n\n @staticmethod\n def serializer(value: Any) -> Dict[str, Any]:\n \"\"\"Serializer hook for orjson to handle pydantic models.\n\n This method can be overridden to extend json serialization.\n\n Args:\n value: The value to be serialized\n\n Returns:\n A string keyed dictionary of json compatible values\n \"\"\"\n if isinstance(value, BaseModel):\n return value.dict()\n raise TypeError # pragma: no cover\n\n def render(self, content: Any) -> bytes:\n \"\"\"\n Handles the rendering of content T into a bytes string.\n Args:\n content: An arbitrary value of type T\n\n Returns:\n An encoded bytes string\n \"\"\"\n try:\n if (\n content is None\n or content is NoReturn\n and (self.status_code < 100 or self.status_code in {HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED})\n ):\n return b\"\"\n if self.media_type == MediaType.JSON:\n return dumps(content, default=self.serializer, option=OPT_SERIALIZE_NUMPY | OPT_OMIT_MICROSECONDS)\n if isinstance(content, OpenAPI):\n content_dict = content.dict(by_alias=True, exclude_none=True)\n if self.media_type == OpenAPIMediaType.OPENAPI_YAML:\n encoded = yaml.dump(content_dict, default_flow_style=False).encode(\"utf-8\")\n return cast(\"bytes\", encoded)\n return dumps(content_dict, option=OPT_INDENT_2 | OPT_OMIT_MICROSECONDS)\n return super().render(content)\n except (AttributeError, ValueError, TypeError) as e:\n raise ImproperlyConfiguredException(\"Unable to serialize response content\") from e\n\n\nclass TemplateResponse(Response):\n def __init__(\n self,\n template_name: str,\n template_engine: \"TemplateEngineProtocol\",\n status_code: int,\n context: Optional[Dict[str, Any]] = None,\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n headers: Optional[Dict[str, Any]] = None,\n cookies: Optional[\"ResponseCookies\"] = None,\n ):\n \"\"\"Handles the rendering of a given template into a bytes string.\n\n Args:\n template_name: Path-like name for the template to be rendered, e.g. \"index.html\".\n template_engine: The template engine class to use to render the response.\n status_code: A value for the response HTTP status code.\n context: A dictionary of key/value pairs to be passed to the temple engine's render method. Defaults to None.\n background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or\n [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header.\n \"\"\"\n context = context or {}\n template = template_engine.get_template(template_name)\n content = template.render(**context or {})\n super().__init__(\n content=content,\n status_code=status_code,\n headers=headers,\n media_type=MediaType.HTML,\n background=background,\n cookies=cookies,\n )\n", "path": "starlite/response.py"}], "after_files": [{"content": "from pathlib import PurePath, PurePosixPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Generic,\n NoReturn,\n Optional,\n TypeVar,\n Union,\n cast,\n)\n\nimport yaml\nfrom orjson import OPT_INDENT_2, OPT_OMIT_MICROSECONDS, OPT_SERIALIZE_NUMPY, dumps\nfrom pydantic import BaseModel, SecretStr\nfrom pydantic_openapi_schema.v3_1_0.open_api import OpenAPI\nfrom starlette.responses import Response as StarletteResponse\nfrom starlette.status import HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED\n\nfrom starlite.enums import MediaType, OpenAPIMediaType\nfrom starlite.exceptions import ImproperlyConfiguredException\n\nT = TypeVar(\"T\")\n\nif TYPE_CHECKING:\n from starlite.datastructures import BackgroundTask, BackgroundTasks\n from starlite.template import TemplateEngineProtocol\n from starlite.types import ResponseCookies\n\n\nclass Response(StarletteResponse, Generic[T]):\n def __init__(\n self,\n content: T,\n *,\n status_code: int,\n media_type: Union[\"MediaType\", \"OpenAPIMediaType\", str],\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n headers: Optional[Dict[str, Any]] = None,\n cookies: Optional[\"ResponseCookies\"] = None,\n ):\n \"\"\"The response class is used to return an HTTP response.\n\n Args:\n content: A value for the response body that will be rendered into bytes string.\n status_code: A value for the response HTTP status code.\n media_type: A value for the response 'Content-Type' header.\n background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or\n [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header.\n \"\"\"\n super().__init__(\n content=content,\n status_code=status_code,\n headers=headers or {},\n media_type=media_type,\n background=cast(\"BackgroundTask\", background),\n )\n self.cookies = cookies or []\n\n @staticmethod\n def serializer(value: Any) -> Union[Dict[str, Any], str]:\n \"\"\"Serializer hook for orjson to handle pydantic models.\n\n This method can be overridden to extend json serialization.\n\n Args:\n value: The value to be serialized\n\n Returns:\n A string keyed dictionary of json compatible values\n \"\"\"\n if isinstance(value, BaseModel):\n return value.dict()\n if isinstance(value, SecretStr):\n return value.get_secret_value()\n if isinstance(value, (PurePath, PurePosixPath)):\n return str(value)\n raise TypeError # pragma: no cover\n\n def render(self, content: Any) -> bytes:\n \"\"\"\n Handles the rendering of content T into a bytes string.\n Args:\n content: An arbitrary value of type T\n\n Returns:\n An encoded bytes string\n \"\"\"\n try:\n if (\n content is None\n or content is NoReturn\n and (self.status_code < 100 or self.status_code in {HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED})\n ):\n return b\"\"\n if self.media_type == MediaType.JSON:\n return dumps(content, default=self.serializer, option=OPT_SERIALIZE_NUMPY | OPT_OMIT_MICROSECONDS)\n if isinstance(content, OpenAPI):\n content_dict = content.dict(by_alias=True, exclude_none=True)\n if self.media_type == OpenAPIMediaType.OPENAPI_YAML:\n encoded = yaml.dump(content_dict, default_flow_style=False).encode(\"utf-8\")\n return cast(\"bytes\", encoded)\n return dumps(content_dict, option=OPT_INDENT_2 | OPT_OMIT_MICROSECONDS)\n return super().render(content)\n except (AttributeError, ValueError, TypeError) as e:\n raise ImproperlyConfiguredException(\"Unable to serialize response content\") from e\n\n\nclass TemplateResponse(Response):\n def __init__(\n self,\n template_name: str,\n template_engine: \"TemplateEngineProtocol\",\n status_code: int,\n context: Optional[Dict[str, Any]] = None,\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n headers: Optional[Dict[str, Any]] = None,\n cookies: Optional[\"ResponseCookies\"] = None,\n ):\n \"\"\"Handles the rendering of a given template into a bytes string.\n\n Args:\n template_name: Path-like name for the template to be rendered, e.g. \"index.html\".\n template_engine: The template engine class to use to render the response.\n status_code: A value for the response HTTP status code.\n context: A dictionary of key/value pairs to be passed to the temple engine's render method. Defaults to None.\n background: A [BackgroundTask][starlite.datastructures.BackgroundTask] instance or\n [BackgroundTasks][starlite.datastructures.BackgroundTasks] to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of [Cookie][starlite.datastructures.Cookie] instances to be set under the response 'Set-Cookie' header.\n \"\"\"\n context = context or {}\n template = template_engine.get_template(template_name)\n content = template.render(**context or {})\n super().__init__(\n content=content,\n status_code=status_code,\n headers=headers,\n media_type=MediaType.HTML,\n background=background,\n cookies=cookies,\n )\n", "path": "starlite/response.py"}]}
2,060
342
gh_patches_debug_6064
rasdani/github-patches
git_diff
benoitc__gunicorn-1441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Supporting newest version of python Gunicorn currently doesn't run tests with python3.6. Since 3.6 is release and some of us are preparing to use it in production it would be great if gunicorn had confirmed support. Also `setup.py` classifiers doesn't include 3.5 or 3.6. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 - 2 # 3 # This file is part of gunicorn released under the MIT license. 4 # See the NOTICE for more information. 5 6 import os 7 import sys 8 9 from setuptools import setup, find_packages 10 from setuptools.command.test import test as TestCommand 11 12 from gunicorn import __version__ 13 14 15 CLASSIFIERS = [ 16 'Development Status :: 4 - Beta', 17 'Environment :: Other Environment', 18 'Intended Audience :: Developers', 19 'License :: OSI Approved :: MIT License', 20 'Operating System :: MacOS :: MacOS X', 21 'Operating System :: POSIX', 22 'Programming Language :: Python', 23 'Programming Language :: Python :: 2', 24 'Programming Language :: Python :: 2.6', 25 'Programming Language :: Python :: 2.7', 26 'Programming Language :: Python :: 3', 27 'Programming Language :: Python :: 3.2', 28 'Programming Language :: Python :: 3.3', 29 'Programming Language :: Python :: 3.4', 30 'Topic :: Internet', 31 'Topic :: Utilities', 32 'Topic :: Software Development :: Libraries :: Python Modules', 33 'Topic :: Internet :: WWW/HTTP', 34 'Topic :: Internet :: WWW/HTTP :: WSGI', 35 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', 36 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'] 37 38 # read long description 39 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: 40 long_description = f.read() 41 42 # read dev requirements 43 fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt') 44 with open(fname) as f: 45 tests_require = [l.strip() for l in f.readlines()] 46 47 if sys.version_info[:2] < (3, 3): 48 tests_require.append('mock') 49 if sys.version_info[:2] < (2, 7): 50 tests_require.append('unittest2') 51 52 class PyTestCommand(TestCommand): 53 user_options = [ 54 ("cov", None, "measure coverage") 55 ] 56 57 def initialize_options(self): 58 TestCommand.initialize_options(self) 59 self.cov = None 60 61 def finalize_options(self): 62 TestCommand.finalize_options(self) 63 self.test_args = ['tests'] 64 if self.cov: 65 self.test_args += ['--cov', 'gunicorn'] 66 self.test_suite = True 67 68 def run_tests(self): 69 import pytest 70 errno = pytest.main(self.test_args) 71 sys.exit(errno) 72 73 setup( 74 name='gunicorn', 75 version=__version__, 76 77 description='WSGI HTTP Server for UNIX', 78 long_description=long_description, 79 author='Benoit Chesneau', 80 author_email='[email protected]', 81 license='MIT', 82 url='http://gunicorn.org', 83 84 classifiers=CLASSIFIERS, 85 zip_safe=False, 86 packages=find_packages(exclude=['examples', 'tests']), 87 include_package_data=True, 88 89 tests_require=tests_require, 90 cmdclass={'test': PyTestCommand}, 91 92 entry_points=""" 93 [console_scripts] 94 gunicorn=gunicorn.app.wsgiapp:run 95 gunicorn_django=gunicorn.app.djangoapp:run 96 gunicorn_paster=gunicorn.app.pasterapp:run 97 98 [paste.server_runner] 99 main=gunicorn.app.pasterapp:paste_server 100 """ 101 ) 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -27,6 +27,8 @@ 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Topic :: Internet', 'Topic :: Utilities', 'Topic :: Software Development :: Libraries :: Python Modules',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,6 +27,8 @@\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n", "issue": "Supporting newest version of python\nGunicorn currently doesn't run tests with python3.6.\r\n\r\nSince 3.6 is release and some of us are preparing to use it in production it would be great if gunicorn had confirmed support.\r\n\r\nAlso `setup.py` classifiers doesn't include 3.5 or 3.6.\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nif sys.version_info[:2] < (3, 3):\n tests_require.append('mock')\nif sys.version_info[:2] < (2, 7):\n tests_require.append('unittest2')\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='http://gunicorn.org',\n\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n gunicorn_django=gunicorn.app.djangoapp:run\n gunicorn_paster=gunicorn.app.pasterapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:paste_server\n \"\"\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nif sys.version_info[:2] < (3, 3):\n tests_require.append('mock')\nif sys.version_info[:2] < (2, 7):\n tests_require.append('unittest2')\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='http://gunicorn.org',\n\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n gunicorn_django=gunicorn.app.djangoapp:run\n gunicorn_paster=gunicorn.app.pasterapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:paste_server\n \"\"\"\n)\n", "path": "setup.py"}]}
1,239
118
gh_patches_debug_20410
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-1692
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use `importlib.metadata` to get package version instead of `pkg_resources` Now that we're using Python 3.8+, we should switch to using `importlib.metadata` to get our version at runtime in `plasmapy/__init__.py`. We're using `pkg_resources` right now, but that has a "[significant runtime cost](https://github.com/pypa/setuptools_scm/#retrieving-package-version-at-runtime)". --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plasmapy/__init__.py` Content: ``` 1 """ 2 Welcome to the `plasmapy` package, an open source community-developed Python 3 package for the plasma community. Documentation is available in the docstrings 4 and online at https://docs.plasmapy.org (accessible also using the 5 :func:`~plasmapy.online_help` function). 6 """ 7 __all__ = [ 8 "online_help", 9 "analysis", 10 "diagnostics", 11 "dispersion", 12 "formulary", 13 "particles", 14 "plasma", 15 "simulation", 16 "utils", 17 "__version__", 18 "__citation__", 19 ] 20 21 # Enforce Python version check during package import. 22 # This is the same check as the one at the top of setup.py 23 import sys 24 25 if sys.version_info < (3, 8): # coverage: ignore 26 raise ImportError("PlasmaPy does not support Python < 3.8") 27 28 # Packages may add whatever they like to this file, but 29 # should keep this content at the top. 30 # ---------------------------------------------------------------------------- 31 import pkg_resources 32 33 from plasmapy import ( 34 analysis, 35 diagnostics, 36 dispersion, 37 formulary, 38 particles, 39 plasma, 40 simulation, 41 utils, 42 ) 43 44 # define version 45 try: 46 # this places a runtime dependency on setuptools 47 # 48 # note: if there's any distribution metadata in your source files, then this 49 # will find a version based on those files. Keep distribution metadata 50 # out of your repository unless you've intentionally installed the package 51 # as editable (e.g. `pip install -e {plasmapy_directory_root}`), 52 # but then __version__ will not be updated with each commit, it is 53 # frozen to the version at time of install. 54 # 55 #: PlasmaPy version string 56 __version__ = pkg_resources.get_distribution("plasmapy").version 57 except pkg_resources.DistributionNotFound: 58 # package is not installed 59 fallback_version = "unknown" 60 try: 61 # code most likely being used from source 62 # if setuptools_scm is installed then generate a version 63 from setuptools_scm import get_version 64 65 __version__ = get_version( 66 root="..", relative_to=__file__, fallback_version=fallback_version 67 ) 68 del get_version 69 warn_add = "setuptools_scm failed to detect the version" 70 except ModuleNotFoundError: 71 # setuptools_scm is not installed 72 __version__ = fallback_version 73 warn_add = "setuptools_scm is not installed" 74 75 if __version__ == fallback_version: 76 from warnings import warn 77 78 warn( 79 f"plasmapy.__version__ not generated (set to 'unknown'), PlasmaPy is " 80 f"not an installed package and {warn_add}.", 81 RuntimeWarning, 82 ) 83 84 del warn 85 del fallback_version, warn_add 86 87 # ---------------------------------------------------------------------------- 88 #: PlasmaPy citation instructions 89 __citation__ = ( 90 "Instructions on how to cite and acknowledge PlasmaPy are provided in the " 91 "online documentation at: http://docs.plasmapy.org/en/stable/about/citation.html" 92 ) 93 94 95 def online_help(query: str): 96 """ 97 Open a webpage containing a search page in `PlasmaPy's documentation`_, 98 or another page that contains relevant online help. 99 100 This function requires an active internet connection, and will open 101 the page in the default web browser. 102 103 Parameters 104 ---------- 105 query : str 106 The search query. 107 """ 108 import webbrowser 109 110 from urllib.parse import urlencode 111 112 url = ( 113 "http://docs.plasmapy.org/en/stable/search.html?" 114 "{}&check_keywords=yes&area=default" 115 ).format(urlencode({"q": query})) 116 117 if query.lower() in ("unit", "units", "quantity", "quantities"): 118 url = "http://docs.astropy.org/en/stable/units/" 119 120 webbrowser.open(url) 121 122 123 del pkg_resources, sys 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py --- a/plasmapy/__init__.py +++ b/plasmapy/__init__.py @@ -28,7 +28,7 @@ # Packages may add whatever they like to this file, but # should keep this content at the top. # ---------------------------------------------------------------------------- -import pkg_resources +from importlib.metadata import PackageNotFoundError, version from plasmapy import ( analysis, @@ -53,8 +53,8 @@ # frozen to the version at time of install. # #: PlasmaPy version string - __version__ = pkg_resources.get_distribution("plasmapy").version -except pkg_resources.DistributionNotFound: + __version__ = version("plasmapy") +except PackageNotFoundError: # package is not installed fallback_version = "unknown" try: @@ -120,4 +120,4 @@ webbrowser.open(url) -del pkg_resources, sys +del sys
{"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -28,7 +28,7 @@\n # Packages may add whatever they like to this file, but\n # should keep this content at the top.\n # ----------------------------------------------------------------------------\n-import pkg_resources\n+from importlib.metadata import PackageNotFoundError, version\n \n from plasmapy import (\n analysis,\n@@ -53,8 +53,8 @@\n # frozen to the version at time of install.\n #\n #: PlasmaPy version string\n- __version__ = pkg_resources.get_distribution(\"plasmapy\").version\n-except pkg_resources.DistributionNotFound:\n+ __version__ = version(\"plasmapy\")\n+except PackageNotFoundError:\n # package is not installed\n fallback_version = \"unknown\"\n try:\n@@ -120,4 +120,4 @@\n webbrowser.open(url)\n \n \n-del pkg_resources, sys\n+del sys\n", "issue": "Use `importlib.metadata` to get package version instead of `pkg_resources`\nNow that we're using Python 3.8+, we should switch to using `importlib.metadata` to get our version at runtime in `plasmapy/__init__.py`. We're using `pkg_resources` right now, but that has a \"[significant runtime cost](https://github.com/pypa/setuptools_scm/#retrieving-package-version-at-runtime)\".\n", "before_files": [{"content": "\"\"\"\nWelcome to the `plasmapy` package, an open source community-developed Python\npackage for the plasma community. Documentation is available in the docstrings\nand online at https://docs.plasmapy.org (accessible also using the\n:func:`~plasmapy.online_help` function).\n\"\"\"\n__all__ = [\n \"online_help\",\n \"analysis\",\n \"diagnostics\",\n \"dispersion\",\n \"formulary\",\n \"particles\",\n \"plasma\",\n \"simulation\",\n \"utils\",\n \"__version__\",\n \"__citation__\",\n]\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\nif sys.version_info < (3, 8): # coverage: ignore\n raise ImportError(\"PlasmaPy does not support Python < 3.8\")\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nimport pkg_resources\n\nfrom plasmapy import (\n analysis,\n diagnostics,\n dispersion,\n formulary,\n particles,\n plasma,\n simulation,\n utils,\n)\n\n# define version\ntry:\n # this places a runtime dependency on setuptools\n #\n # note: if there's any distribution metadata in your source files, then this\n # will find a version based on those files. Keep distribution metadata\n # out of your repository unless you've intentionally installed the package\n # as editable (e.g. `pip install -e {plasmapy_directory_root}`),\n # but then __version__ will not be updated with each commit, it is\n # frozen to the version at time of install.\n #\n #: PlasmaPy version string\n __version__ = pkg_resources.get_distribution(\"plasmapy\").version\nexcept pkg_resources.DistributionNotFound:\n # package is not installed\n fallback_version = \"unknown\"\n try:\n # code most likely being used from source\n # if setuptools_scm is installed then generate a version\n from setuptools_scm import get_version\n\n __version__ = get_version(\n root=\"..\", relative_to=__file__, fallback_version=fallback_version\n )\n del get_version\n warn_add = \"setuptools_scm failed to detect the version\"\n except ModuleNotFoundError:\n # setuptools_scm is not installed\n __version__ = fallback_version\n warn_add = \"setuptools_scm is not installed\"\n\n if __version__ == fallback_version:\n from warnings import warn\n\n warn(\n f\"plasmapy.__version__ not generated (set to 'unknown'), PlasmaPy is \"\n f\"not an installed package and {warn_add}.\",\n RuntimeWarning,\n )\n\n del warn\n del fallback_version, warn_add\n\n# ----------------------------------------------------------------------------\n#: PlasmaPy citation instructions\n__citation__ = (\n \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n \"online documentation at: http://docs.plasmapy.org/en/stable/about/citation.html\"\n)\n\n\ndef online_help(query: str):\n \"\"\"\n Open a webpage containing a search page in `PlasmaPy's documentation`_,\n or another page that contains relevant online help.\n\n This function requires an active internet connection, and will open\n the page in the default web browser.\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n import webbrowser\n\n from urllib.parse import urlencode\n\n url = (\n \"http://docs.plasmapy.org/en/stable/search.html?\"\n \"{}&check_keywords=yes&area=default\"\n ).format(urlencode({\"q\": query}))\n\n if query.lower() in (\"unit\", \"units\", \"quantity\", \"quantities\"):\n url = \"http://docs.astropy.org/en/stable/units/\"\n\n webbrowser.open(url)\n\n\ndel pkg_resources, sys\n", "path": "plasmapy/__init__.py"}], "after_files": [{"content": "\"\"\"\nWelcome to the `plasmapy` package, an open source community-developed Python\npackage for the plasma community. Documentation is available in the docstrings\nand online at https://docs.plasmapy.org (accessible also using the\n:func:`~plasmapy.online_help` function).\n\"\"\"\n__all__ = [\n \"online_help\",\n \"analysis\",\n \"diagnostics\",\n \"dispersion\",\n \"formulary\",\n \"particles\",\n \"plasma\",\n \"simulation\",\n \"utils\",\n \"__version__\",\n \"__citation__\",\n]\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\nif sys.version_info < (3, 8): # coverage: ignore\n raise ImportError(\"PlasmaPy does not support Python < 3.8\")\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom importlib.metadata import PackageNotFoundError, version\n\nfrom plasmapy import (\n analysis,\n diagnostics,\n dispersion,\n formulary,\n particles,\n plasma,\n simulation,\n utils,\n)\n\n# define version\ntry:\n # this places a runtime dependency on setuptools\n #\n # note: if there's any distribution metadata in your source files, then this\n # will find a version based on those files. Keep distribution metadata\n # out of your repository unless you've intentionally installed the package\n # as editable (e.g. `pip install -e {plasmapy_directory_root}`),\n # but then __version__ will not be updated with each commit, it is\n # frozen to the version at time of install.\n #\n #: PlasmaPy version string\n __version__ = version(\"plasmapy\")\nexcept PackageNotFoundError:\n # package is not installed\n fallback_version = \"unknown\"\n try:\n # code most likely being used from source\n # if setuptools_scm is installed then generate a version\n from setuptools_scm import get_version\n\n __version__ = get_version(\n root=\"..\", relative_to=__file__, fallback_version=fallback_version\n )\n del get_version\n warn_add = \"setuptools_scm failed to detect the version\"\n except ModuleNotFoundError:\n # setuptools_scm is not installed\n __version__ = fallback_version\n warn_add = \"setuptools_scm is not installed\"\n\n if __version__ == fallback_version:\n from warnings import warn\n\n warn(\n f\"plasmapy.__version__ not generated (set to 'unknown'), PlasmaPy is \"\n f\"not an installed package and {warn_add}.\",\n RuntimeWarning,\n )\n\n del warn\n del fallback_version, warn_add\n\n# ----------------------------------------------------------------------------\n#: PlasmaPy citation instructions\n__citation__ = (\n \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n \"online documentation at: http://docs.plasmapy.org/en/stable/about/citation.html\"\n)\n\n\ndef online_help(query: str):\n \"\"\"\n Open a webpage containing a search page in `PlasmaPy's documentation`_,\n or another page that contains relevant online help.\n\n This function requires an active internet connection, and will open\n the page in the default web browser.\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n import webbrowser\n\n from urllib.parse import urlencode\n\n url = (\n \"http://docs.plasmapy.org/en/stable/search.html?\"\n \"{}&check_keywords=yes&area=default\"\n ).format(urlencode({\"q\": query}))\n\n if query.lower() in (\"unit\", \"units\", \"quantity\", \"quantities\"):\n url = \"http://docs.astropy.org/en/stable/units/\"\n\n webbrowser.open(url)\n\n\ndel sys\n", "path": "plasmapy/__init__.py"}]}
1,469
230
gh_patches_debug_37016
rasdani/github-patches
git_diff
wger-project__wger-543
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature Request: remove or edit weight entry - [ ] Remove or edit weight entry --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wger/weight/views.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This file is part of wger Workout Manager. 4 # 5 # wger Workout Manager is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU Affero General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # wger Workout Manager is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU Affero General Public License 16 17 # Standard Library 18 import csv 19 import datetime 20 import logging 21 22 # Django 23 from django.contrib.auth.decorators import login_required 24 from django.db.models import ( 25 Max, 26 Min 27 ) 28 from django.http import ( 29 HttpResponse, 30 HttpResponseRedirect 31 ) 32 from django.shortcuts import render 33 from django.urls import reverse 34 from django.utils.translation import ( 35 ugettext as _, 36 ugettext_lazy 37 ) 38 from django.views.generic import ( 39 CreateView, 40 UpdateView 41 ) 42 43 # Third Party 44 from formtools.preview import FormPreview 45 from rest_framework.decorators import api_view 46 from rest_framework.response import Response 47 48 # wger 49 from wger.utils.generic_views import WgerFormMixin 50 from wger.utils.helpers import check_access 51 from wger.weight import helpers 52 from wger.weight.forms import WeightForm 53 from wger.weight.models import WeightEntry 54 55 56 logger = logging.getLogger(__name__) 57 58 59 class WeightAddView(WgerFormMixin, CreateView): 60 """ 61 Generic view to add a new weight entry 62 """ 63 model = WeightEntry 64 form_class = WeightForm 65 title = ugettext_lazy('Add weight entry') 66 67 def get_initial(self): 68 """ 69 Set the initial data for the form. 70 71 Read the comment on weight/models.py WeightEntry about why we need 72 to pass the user here. 73 """ 74 return {'user': self.request.user, 75 'date': datetime.date.today()} 76 77 def form_valid(self, form): 78 """ 79 Set the owner of the entry here 80 """ 81 form.instance.user = self.request.user 82 return super(WeightAddView, self).form_valid(form) 83 84 def get_success_url(self): 85 """ 86 Return to overview with username 87 """ 88 return reverse('weight:overview', kwargs={'username': self.object.user.username}) 89 90 91 class WeightUpdateView(WgerFormMixin, UpdateView): 92 """ 93 Generic view to edit an existing weight entry 94 """ 95 model = WeightEntry 96 form_class = WeightForm 97 98 def get_context_data(self, **kwargs): 99 context = super(WeightUpdateView, self).get_context_data(**kwargs) 100 context['title'] = _('Edit weight entry for the %s') % self.object.date 101 102 return context 103 104 def get_success_url(self): 105 """ 106 Return to overview with username 107 """ 108 return reverse('weight:overview', kwargs={'username': self.object.user.username}) 109 110 111 @login_required 112 def export_csv(request): 113 """ 114 Exports the saved weight data as a CSV file 115 """ 116 117 # Prepare the response headers 118 response = HttpResponse(content_type='text/csv') 119 120 # Convert all weight data to CSV 121 writer = csv.writer(response) 122 123 weights = WeightEntry.objects.filter(user=request.user) 124 writer.writerow([_('Weight'), _('Date')]) 125 126 for entry in weights: 127 writer.writerow([entry.weight, entry.date]) 128 129 # Send the data to the browser 130 response['Content-Disposition'] = 'attachment; filename=Weightdata.csv' 131 response['Content-Length'] = len(response.content) 132 return response 133 134 135 def overview(request, username=None): 136 """ 137 Shows a plot with the weight data 138 139 More info about the D3 library can be found here: 140 * https://github.com/mbostock/d3 141 * http://d3js.org/ 142 """ 143 is_owner, user = check_access(request.user, username) 144 145 template_data = {} 146 147 min_date = WeightEntry.objects.filter(user=user).\ 148 aggregate(Min('date'))['date__min'] 149 max_date = WeightEntry.objects.filter(user=user).\ 150 aggregate(Max('date'))['date__max'] 151 if min_date: 152 template_data['min_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \ 153 {'year': min_date.year, 154 'month': min_date.month, 155 'day': min_date.day} 156 if max_date: 157 template_data['max_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \ 158 {'year': max_date.year, 159 'month': max_date.month, 160 'day': max_date.day} 161 162 last_weight_entries = helpers.get_last_entries(user) 163 164 template_data['is_owner'] = is_owner 165 template_data['owner_user'] = user 166 template_data['show_shariff'] = is_owner 167 template_data['last_five_weight_entries_details'] = last_weight_entries 168 return render(request, 'overview.html', template_data) 169 170 171 @api_view(['GET']) 172 def get_weight_data(request, username=None): 173 """ 174 Process the data to pass it to the JS libraries to generate an SVG image 175 """ 176 177 is_owner, user = check_access(request.user, username) 178 179 date_min = request.GET.get('date_min', False) 180 date_max = request.GET.get('date_max', True) 181 182 if date_min and date_max: 183 weights = WeightEntry.objects.filter(user=user, 184 date__range=(date_min, date_max)) 185 else: 186 weights = WeightEntry.objects.filter(user=user) 187 188 chart_data = [] 189 190 for i in weights: 191 chart_data.append({'date': i.date, 192 'weight': i.weight}) 193 194 # Return the results to the client 195 return Response(chart_data) 196 197 198 class WeightCsvImportFormPreview(FormPreview): 199 preview_template = 'import_csv_preview.html' 200 form_template = 'import_csv_form.html' 201 202 def get_context(self, request, form): 203 """ 204 Context for template rendering. 205 """ 206 207 return {'form': form, 208 'stage_field': self.unused_name('stage'), 209 'state': self.state} 210 211 def process_preview(self, request, form, context): 212 context['weight_list'], context['error_list'] = helpers.parse_weight_csv(request, 213 form.cleaned_data) 214 return context 215 216 def done(self, request, cleaned_data): 217 weight_list, error_list = helpers.parse_weight_csv(request, cleaned_data) 218 WeightEntry.objects.bulk_create(weight_list) 219 return HttpResponseRedirect(reverse('weight:overview', 220 kwargs={'username': request.user.username})) 221 ``` Path: `wger/weight/urls.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This file is part of wger Workout Manager. 4 # 5 # wger Workout Manager is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU Affero General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # wger Workout Manager is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU Affero General Public License 16 # along with Workout Manager. If not, see <http://www.gnu.org/licenses/>. 17 18 # Django 19 from django.conf.urls import url 20 from django.contrib.auth.decorators import login_required 21 22 # wger 23 from wger.weight import views 24 from wger.weight.forms import WeightCsvImportForm 25 26 27 urlpatterns = [ 28 url(r'^add/$', 29 login_required(views.WeightAddView.as_view()), 30 name='add'), 31 32 url(r'^(?P<pk>\d+)/edit/$', 33 login_required(views.WeightUpdateView.as_view()), 34 name='edit'), 35 36 url(r'^export-csv/$', 37 views.export_csv, 38 name='export-csv'), 39 url(r'^import-csv/$', 40 login_required(views.WeightCsvImportFormPreview(WeightCsvImportForm)), 41 name='import-csv'), 42 43 url(r'^overview/(?P<username>[\w.@+-]+)$', 44 views.overview, 45 name='overview'), 46 # url(r'^overview/$', 47 # views.overview, 48 # name='overview'), 49 url(r'^api/get_weight_data/(?P<username>[\w.@+-]+)$', # JS 50 views.get_weight_data, 51 name='weight-data'), 52 url(r'^api/get_weight_data/$', # JS 53 views.get_weight_data, 54 name='weight-data'), 55 ] 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wger/weight/urls.py b/wger/weight/urls.py --- a/wger/weight/urls.py +++ b/wger/weight/urls.py @@ -33,6 +33,10 @@ login_required(views.WeightUpdateView.as_view()), name='edit'), + url(r'^(?P<pk>\d+)/delete/$', + views.WeightDeleteView.as_view(), + name='delete'), + url(r'^export-csv/$', views.export_csv, name='export-csv'), diff --git a/wger/weight/views.py b/wger/weight/views.py --- a/wger/weight/views.py +++ b/wger/weight/views.py @@ -21,6 +21,7 @@ # Django from django.contrib.auth.decorators import login_required +from django.contrib.auth.mixins import LoginRequiredMixin from django.db.models import ( Max, Min @@ -37,7 +38,8 @@ ) from django.views.generic import ( CreateView, - UpdateView + UpdateView, + DeleteView ) # Third Party @@ -46,7 +48,10 @@ from rest_framework.response import Response # wger -from wger.utils.generic_views import WgerFormMixin +from wger.utils.generic_views import ( + WgerFormMixin, + WgerDeleteMixin +) from wger.utils.helpers import check_access from wger.weight import helpers from wger.weight.forms import WeightForm @@ -88,7 +93,7 @@ return reverse('weight:overview', kwargs={'username': self.object.user.username}) -class WeightUpdateView(WgerFormMixin, UpdateView): +class WeightUpdateView(WgerFormMixin, LoginRequiredMixin, UpdateView): """ Generic view to edit an existing weight entry """ @@ -108,6 +113,28 @@ return reverse('weight:overview', kwargs={'username': self.object.user.username}) +class WeightDeleteView(WgerDeleteMixin, LoginRequiredMixin, DeleteView): + """ + Generic view to delete a weight entry + """ + + model = WeightEntry + fields = ('weight',) + + messages = ugettext_lazy('Successfully deleted.') + + def get_context_data(self, **kwargs): + context = super(WeightDeleteView, self).get_context_data(**kwargs) + context['title'] = _('Delete weight entry for the %s') % self.object.date + return context + + def get_success_url(self): + """ + Return to overview with username + """ + return reverse('weight:overview', kwargs={'username': self.object.user.username}) + + @login_required def export_csv(request): """
{"golden_diff": "diff --git a/wger/weight/urls.py b/wger/weight/urls.py\n--- a/wger/weight/urls.py\n+++ b/wger/weight/urls.py\n@@ -33,6 +33,10 @@\n login_required(views.WeightUpdateView.as_view()),\n name='edit'),\n \n+ url(r'^(?P<pk>\\d+)/delete/$',\n+ views.WeightDeleteView.as_view(),\n+ name='delete'),\n+\n url(r'^export-csv/$',\n views.export_csv,\n name='export-csv'),\ndiff --git a/wger/weight/views.py b/wger/weight/views.py\n--- a/wger/weight/views.py\n+++ b/wger/weight/views.py\n@@ -21,6 +21,7 @@\n \n # Django\n from django.contrib.auth.decorators import login_required\n+from django.contrib.auth.mixins import LoginRequiredMixin\n from django.db.models import (\n Max,\n Min\n@@ -37,7 +38,8 @@\n )\n from django.views.generic import (\n CreateView,\n- UpdateView\n+ UpdateView,\n+ DeleteView\n )\n \n # Third Party\n@@ -46,7 +48,10 @@\n from rest_framework.response import Response\n \n # wger\n-from wger.utils.generic_views import WgerFormMixin\n+from wger.utils.generic_views import (\n+ WgerFormMixin,\n+ WgerDeleteMixin\n+)\n from wger.utils.helpers import check_access\n from wger.weight import helpers\n from wger.weight.forms import WeightForm\n@@ -88,7 +93,7 @@\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n \n \n-class WeightUpdateView(WgerFormMixin, UpdateView):\n+class WeightUpdateView(WgerFormMixin, LoginRequiredMixin, UpdateView):\n \"\"\"\n Generic view to edit an existing weight entry\n \"\"\"\n@@ -108,6 +113,28 @@\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n \n \n+class WeightDeleteView(WgerDeleteMixin, LoginRequiredMixin, DeleteView):\n+ \"\"\"\n+ Generic view to delete a weight entry\n+ \"\"\"\n+\n+ model = WeightEntry\n+ fields = ('weight',)\n+\n+ messages = ugettext_lazy('Successfully deleted.')\n+\n+ def get_context_data(self, **kwargs):\n+ context = super(WeightDeleteView, self).get_context_data(**kwargs)\n+ context['title'] = _('Delete weight entry for the %s') % self.object.date\n+ return context\n+\n+ def get_success_url(self):\n+ \"\"\"\n+ Return to overview with username\n+ \"\"\"\n+ return reverse('weight:overview', kwargs={'username': self.object.user.username})\n+\n+\n @login_required\n def export_csv(request):\n \"\"\"\n", "issue": "Feature Request: remove or edit weight entry\n- [ ] Remove or edit weight entry\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\n# Standard Library\nimport csv\nimport datetime\nimport logging\n\n# Django\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import (\n Max,\n Min\n)\nfrom django.http import (\n HttpResponse,\n HttpResponseRedirect\n)\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.translation import (\n ugettext as _,\n ugettext_lazy\n)\nfrom django.views.generic import (\n CreateView,\n UpdateView\n)\n\n# Third Party\nfrom formtools.preview import FormPreview\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n# wger\nfrom wger.utils.generic_views import WgerFormMixin\nfrom wger.utils.helpers import check_access\nfrom wger.weight import helpers\nfrom wger.weight.forms import WeightForm\nfrom wger.weight.models import WeightEntry\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WeightAddView(WgerFormMixin, CreateView):\n \"\"\"\n Generic view to add a new weight entry\n \"\"\"\n model = WeightEntry\n form_class = WeightForm\n title = ugettext_lazy('Add weight entry')\n\n def get_initial(self):\n \"\"\"\n Set the initial data for the form.\n\n Read the comment on weight/models.py WeightEntry about why we need\n to pass the user here.\n \"\"\"\n return {'user': self.request.user,\n 'date': datetime.date.today()}\n\n def form_valid(self, form):\n \"\"\"\n Set the owner of the entry here\n \"\"\"\n form.instance.user = self.request.user\n return super(WeightAddView, self).form_valid(form)\n\n def get_success_url(self):\n \"\"\"\n Return to overview with username\n \"\"\"\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n\n\nclass WeightUpdateView(WgerFormMixin, UpdateView):\n \"\"\"\n Generic view to edit an existing weight entry\n \"\"\"\n model = WeightEntry\n form_class = WeightForm\n\n def get_context_data(self, **kwargs):\n context = super(WeightUpdateView, self).get_context_data(**kwargs)\n context['title'] = _('Edit weight entry for the %s') % self.object.date\n\n return context\n\n def get_success_url(self):\n \"\"\"\n Return to overview with username\n \"\"\"\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n\n\n@login_required\ndef export_csv(request):\n \"\"\"\n Exports the saved weight data as a CSV file\n \"\"\"\n\n # Prepare the response headers\n response = HttpResponse(content_type='text/csv')\n\n # Convert all weight data to CSV\n writer = csv.writer(response)\n\n weights = WeightEntry.objects.filter(user=request.user)\n writer.writerow([_('Weight'), _('Date')])\n\n for entry in weights:\n writer.writerow([entry.weight, entry.date])\n\n # Send the data to the browser\n response['Content-Disposition'] = 'attachment; filename=Weightdata.csv'\n response['Content-Length'] = len(response.content)\n return response\n\n\ndef overview(request, username=None):\n \"\"\"\n Shows a plot with the weight data\n\n More info about the D3 library can be found here:\n * https://github.com/mbostock/d3\n * http://d3js.org/\n \"\"\"\n is_owner, user = check_access(request.user, username)\n\n template_data = {}\n\n min_date = WeightEntry.objects.filter(user=user).\\\n aggregate(Min('date'))['date__min']\n max_date = WeightEntry.objects.filter(user=user).\\\n aggregate(Max('date'))['date__max']\n if min_date:\n template_data['min_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \\\n {'year': min_date.year,\n 'month': min_date.month,\n 'day': min_date.day}\n if max_date:\n template_data['max_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \\\n {'year': max_date.year,\n 'month': max_date.month,\n 'day': max_date.day}\n\n last_weight_entries = helpers.get_last_entries(user)\n\n template_data['is_owner'] = is_owner\n template_data['owner_user'] = user\n template_data['show_shariff'] = is_owner\n template_data['last_five_weight_entries_details'] = last_weight_entries\n return render(request, 'overview.html', template_data)\n\n\n@api_view(['GET'])\ndef get_weight_data(request, username=None):\n \"\"\"\n Process the data to pass it to the JS libraries to generate an SVG image\n \"\"\"\n\n is_owner, user = check_access(request.user, username)\n\n date_min = request.GET.get('date_min', False)\n date_max = request.GET.get('date_max', True)\n\n if date_min and date_max:\n weights = WeightEntry.objects.filter(user=user,\n date__range=(date_min, date_max))\n else:\n weights = WeightEntry.objects.filter(user=user)\n\n chart_data = []\n\n for i in weights:\n chart_data.append({'date': i.date,\n 'weight': i.weight})\n\n # Return the results to the client\n return Response(chart_data)\n\n\nclass WeightCsvImportFormPreview(FormPreview):\n preview_template = 'import_csv_preview.html'\n form_template = 'import_csv_form.html'\n\n def get_context(self, request, form):\n \"\"\"\n Context for template rendering.\n \"\"\"\n\n return {'form': form,\n 'stage_field': self.unused_name('stage'),\n 'state': self.state}\n\n def process_preview(self, request, form, context):\n context['weight_list'], context['error_list'] = helpers.parse_weight_csv(request,\n form.cleaned_data)\n return context\n\n def done(self, request, cleaned_data):\n weight_list, error_list = helpers.parse_weight_csv(request, cleaned_data)\n WeightEntry.objects.bulk_create(weight_list)\n return HttpResponseRedirect(reverse('weight:overview',\n kwargs={'username': request.user.username}))\n", "path": "wger/weight/views.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.\n\n# Django\nfrom django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\n\n# wger\nfrom wger.weight import views\nfrom wger.weight.forms import WeightCsvImportForm\n\n\nurlpatterns = [\n url(r'^add/$',\n login_required(views.WeightAddView.as_view()),\n name='add'),\n\n url(r'^(?P<pk>\\d+)/edit/$',\n login_required(views.WeightUpdateView.as_view()),\n name='edit'),\n\n url(r'^export-csv/$',\n views.export_csv,\n name='export-csv'),\n url(r'^import-csv/$',\n login_required(views.WeightCsvImportFormPreview(WeightCsvImportForm)),\n name='import-csv'),\n\n url(r'^overview/(?P<username>[\\w.@+-]+)$',\n views.overview,\n name='overview'),\n # url(r'^overview/$',\n # views.overview,\n # name='overview'),\n url(r'^api/get_weight_data/(?P<username>[\\w.@+-]+)$', # JS\n views.get_weight_data,\n name='weight-data'),\n url(r'^api/get_weight_data/$', # JS\n views.get_weight_data,\n name='weight-data'),\n]\n", "path": "wger/weight/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\n# Standard Library\nimport csv\nimport datetime\nimport logging\n\n# Django\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import (\n Max,\n Min\n)\nfrom django.http import (\n HttpResponse,\n HttpResponseRedirect\n)\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.translation import (\n ugettext as _,\n ugettext_lazy\n)\nfrom django.views.generic import (\n CreateView,\n UpdateView,\n DeleteView\n)\n\n# Third Party\nfrom formtools.preview import FormPreview\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n# wger\nfrom wger.utils.generic_views import (\n WgerFormMixin,\n WgerDeleteMixin\n)\nfrom wger.utils.helpers import check_access\nfrom wger.weight import helpers\nfrom wger.weight.forms import WeightForm\nfrom wger.weight.models import WeightEntry\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WeightAddView(WgerFormMixin, CreateView):\n \"\"\"\n Generic view to add a new weight entry\n \"\"\"\n model = WeightEntry\n form_class = WeightForm\n title = ugettext_lazy('Add weight entry')\n\n def get_initial(self):\n \"\"\"\n Set the initial data for the form.\n\n Read the comment on weight/models.py WeightEntry about why we need\n to pass the user here.\n \"\"\"\n return {'user': self.request.user,\n 'date': datetime.date.today()}\n\n def form_valid(self, form):\n \"\"\"\n Set the owner of the entry here\n \"\"\"\n form.instance.user = self.request.user\n return super(WeightAddView, self).form_valid(form)\n\n def get_success_url(self):\n \"\"\"\n Return to overview with username\n \"\"\"\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n\n\nclass WeightUpdateView(WgerFormMixin, LoginRequiredMixin, UpdateView):\n \"\"\"\n Generic view to edit an existing weight entry\n \"\"\"\n model = WeightEntry\n form_class = WeightForm\n\n def get_context_data(self, **kwargs):\n context = super(WeightUpdateView, self).get_context_data(**kwargs)\n context['title'] = _('Edit weight entry for the %s') % self.object.date\n\n return context\n\n def get_success_url(self):\n \"\"\"\n Return to overview with username\n \"\"\"\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n\n\nclass WeightDeleteView(WgerDeleteMixin, LoginRequiredMixin, DeleteView):\n \"\"\"\n Generic view to delete a weight entry\n \"\"\"\n\n model = WeightEntry\n fields = ('weight',)\n\n messages = ugettext_lazy('Successfully deleted.')\n\n def get_context_data(self, **kwargs):\n context = super(WeightDeleteView, self).get_context_data(**kwargs)\n context['title'] = _('Delete weight entry for the %s') % self.object.date\n return context\n\n def get_success_url(self):\n \"\"\"\n Return to overview with username\n \"\"\"\n return reverse('weight:overview', kwargs={'username': self.object.user.username})\n\n\n@login_required\ndef export_csv(request):\n \"\"\"\n Exports the saved weight data as a CSV file\n \"\"\"\n\n # Prepare the response headers\n response = HttpResponse(content_type='text/csv')\n\n # Convert all weight data to CSV\n writer = csv.writer(response)\n\n weights = WeightEntry.objects.filter(user=request.user)\n writer.writerow([_('Weight'), _('Date')])\n\n for entry in weights:\n writer.writerow([entry.weight, entry.date])\n\n # Send the data to the browser\n response['Content-Disposition'] = 'attachment; filename=Weightdata.csv'\n response['Content-Length'] = len(response.content)\n return response\n\n\ndef overview(request, username=None):\n \"\"\"\n Shows a plot with the weight data\n\n More info about the D3 library can be found here:\n * https://github.com/mbostock/d3\n * http://d3js.org/\n \"\"\"\n is_owner, user = check_access(request.user, username)\n\n template_data = {}\n\n min_date = WeightEntry.objects.filter(user=user).\\\n aggregate(Min('date'))['date__min']\n max_date = WeightEntry.objects.filter(user=user).\\\n aggregate(Max('date'))['date__max']\n if min_date:\n template_data['min_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \\\n {'year': min_date.year,\n 'month': min_date.month,\n 'day': min_date.day}\n if max_date:\n template_data['max_date'] = 'new Date(%(year)s, %(month)s, %(day)s)' % \\\n {'year': max_date.year,\n 'month': max_date.month,\n 'day': max_date.day}\n\n last_weight_entries = helpers.get_last_entries(user)\n\n template_data['is_owner'] = is_owner\n template_data['owner_user'] = user\n template_data['show_shariff'] = is_owner\n template_data['last_five_weight_entries_details'] = last_weight_entries\n return render(request, 'overview.html', template_data)\n\n\n@api_view(['GET'])\ndef get_weight_data(request, username=None):\n \"\"\"\n Process the data to pass it to the JS libraries to generate an SVG image\n \"\"\"\n\n is_owner, user = check_access(request.user, username)\n\n date_min = request.GET.get('date_min', False)\n date_max = request.GET.get('date_max', True)\n\n if date_min and date_max:\n weights = WeightEntry.objects.filter(user=user,\n date__range=(date_min, date_max))\n else:\n weights = WeightEntry.objects.filter(user=user)\n\n chart_data = []\n\n for i in weights:\n chart_data.append({'date': i.date,\n 'weight': i.weight})\n\n # Return the results to the client\n return Response(chart_data)\n\n\nclass WeightCsvImportFormPreview(FormPreview):\n preview_template = 'import_csv_preview.html'\n form_template = 'import_csv_form.html'\n\n def get_context(self, request, form):\n \"\"\"\n Context for template rendering.\n \"\"\"\n\n return {'form': form,\n 'stage_field': self.unused_name('stage'),\n 'state': self.state}\n\n def process_preview(self, request, form, context):\n context['weight_list'], context['error_list'] = helpers.parse_weight_csv(request,\n form.cleaned_data)\n return context\n\n def done(self, request, cleaned_data):\n weight_list, error_list = helpers.parse_weight_csv(request, cleaned_data)\n WeightEntry.objects.bulk_create(weight_list)\n return HttpResponseRedirect(reverse('weight:overview',\n kwargs={'username': request.user.username}))\n", "path": "wger/weight/views.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.\n\n# Django\nfrom django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\n\n# wger\nfrom wger.weight import views\nfrom wger.weight.forms import WeightCsvImportForm\n\n\nurlpatterns = [\n url(r'^add/$',\n login_required(views.WeightAddView.as_view()),\n name='add'),\n\n url(r'^(?P<pk>\\d+)/edit/$',\n login_required(views.WeightUpdateView.as_view()),\n name='edit'),\n\n url(r'^(?P<pk>\\d+)/delete/$',\n views.WeightDeleteView.as_view(),\n name='delete'),\n\n url(r'^export-csv/$',\n views.export_csv,\n name='export-csv'),\n url(r'^import-csv/$',\n login_required(views.WeightCsvImportFormPreview(WeightCsvImportForm)),\n name='import-csv'),\n\n url(r'^overview/(?P<username>[\\w.@+-]+)$',\n views.overview,\n name='overview'),\n # url(r'^overview/$',\n # views.overview,\n # name='overview'),\n url(r'^api/get_weight_data/(?P<username>[\\w.@+-]+)$', # JS\n views.get_weight_data,\n name='weight-data'),\n url(r'^api/get_weight_data/$', # JS\n views.get_weight_data,\n name='weight-data'),\n]\n", "path": "wger/weight/urls.py"}]}
2,839
611
gh_patches_debug_10067
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2529
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Handle 404 pages throughout the app ## Description * Client side routing: A non existent url results in a blank screen, or has obscure errors. * Server side routing: A non existent url entered on the browser results in a 404, which returns a django 404 page. ## Expected behavior * 404s should be clearly presented to the user in all valid cases. * The 404 page should be consistent in appearance in both server side & client side routing. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/views.py` Content: ``` 1 from django.conf import settings 2 from django.contrib.auth.decorators import login_required 3 from django.shortcuts import render, redirect, get_object_or_404 4 from rest_framework import status 5 from rest_framework.decorators import api_view 6 from rest_framework.response import Response 7 8 from mathesar.api.db.permissions.database import DatabaseAccessPolicy 9 from mathesar.api.db.permissions.query import QueryAccessPolicy 10 from mathesar.api.db.permissions.schema import SchemaAccessPolicy 11 from mathesar.api.db.permissions.table import TableAccessPolicy 12 from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer 13 from mathesar.api.serializers.schemas import SchemaSerializer 14 from mathesar.api.serializers.tables import TableSerializer 15 from mathesar.api.serializers.queries import QuerySerializer 16 from mathesar.api.ui.serializers.users import UserSerializer 17 from mathesar.database.types import UIType 18 from mathesar.models.base import Database, Schema, Table 19 from mathesar.models.query import UIQuery 20 from mathesar.state import reset_reflection 21 from mathesar import __version__ 22 23 24 def get_schema_list(request, database): 25 qs = Schema.objects.filter(database=database) 26 permission_restricted_qs = SchemaAccessPolicy.scope_queryset(request, qs) 27 schema_serializer = SchemaSerializer( 28 permission_restricted_qs, 29 many=True, 30 context={'request': request} 31 ) 32 return schema_serializer.data 33 34 35 def _get_permissible_db_queryset(request): 36 qs = Database.objects.all() 37 permission_restricted_qs = DatabaseAccessPolicy.scope_queryset(request, qs) 38 schema_qs = Schema.objects.all() 39 permitted_schemas = SchemaAccessPolicy.scope_queryset(request, schema_qs) 40 databases_from_permitted_schema = Database.objects.filter(schemas__in=permitted_schemas) 41 permission_restricted_qs = permission_restricted_qs | databases_from_permitted_schema 42 return permission_restricted_qs.distinct() 43 44 45 def get_database_list(request): 46 permission_restricted_db_qs = _get_permissible_db_queryset(request) 47 database_serializer = DatabaseSerializer( 48 permission_restricted_db_qs, 49 many=True, 50 context={'request': request} 51 ) 52 return database_serializer.data 53 54 55 def get_table_list(request, schema): 56 if schema is None: 57 return [] 58 qs = Table.objects.filter(schema=schema) 59 permission_restricted_qs = TableAccessPolicy.scope_queryset(request, qs) 60 table_serializer = TableSerializer( 61 permission_restricted_qs, 62 many=True, 63 context={'request': request} 64 ) 65 return table_serializer.data 66 67 68 def get_queries_list(request, schema): 69 if schema is None: 70 return [] 71 qs = UIQuery.objects.filter(base_table__schema=schema) 72 permission_restricted_qs = QueryAccessPolicy.scope_queryset(request, qs) 73 74 query_serializer = QuerySerializer( 75 permission_restricted_qs, 76 many=True, 77 context={'request': request} 78 ) 79 return query_serializer.data 80 81 82 def get_ui_type_list(request, database): 83 if database is None: 84 return [] 85 type_serializer = TypeSerializer( 86 UIType, 87 many=True, 88 context={'request': request} 89 ) 90 return type_serializer.data 91 92 93 def get_user_data(request): 94 user_serializer = UserSerializer( 95 request.user, 96 many=False, 97 context={'request': request} 98 ) 99 return user_serializer.data 100 101 102 def get_common_data(request, database=None, schema=None): 103 return { 104 'current_db': database.name if database else None, 105 'current_schema': schema.id if schema else None, 106 'schemas': get_schema_list(request, database), 107 'databases': get_database_list(request), 108 'tables': get_table_list(request, schema), 109 'queries': get_queries_list(request, schema), 110 'abstract_types': get_ui_type_list(request, database), 111 'user': get_user_data(request), 112 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False), 113 'current_release_tag_name': __version__, 114 } 115 116 117 def get_current_database(request, db_name): 118 """Get database from passed name, with fall back behavior.""" 119 permitted_databases = _get_permissible_db_queryset(request) 120 if db_name is not None: 121 current_database = get_object_or_404(permitted_databases, name=db_name) 122 else: 123 request_database_name = request.GET.get('database') 124 try: 125 if request_database_name is not None: 126 # Try to get the database named specified in the request 127 current_database = permitted_databases.get(name=request_database_name) 128 else: 129 # Try to get the first database available 130 current_database = permitted_databases.order_by('id').first() 131 except Database.DoesNotExist: 132 current_database = None 133 return current_database 134 135 136 def get_current_schema(request, schema_id, database): 137 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error. 138 if schema_id is not None: 139 permitted_schemas = SchemaAccessPolicy.scope_queryset(request, Schema.objects.all()) 140 return get_object_or_404(permitted_schemas, id=schema_id) 141 else: 142 try: 143 # Try to get the first schema in the DB 144 return Schema.objects.filter(database=database).order_by('id').first() 145 except Schema.DoesNotExist: 146 return None 147 148 149 def render_schema(request, database, schema): 150 # if there's no schema available, redirect to the schemas page. 151 if not schema: 152 return redirect('schemas', db_name=database.name) 153 else: 154 # We are redirecting so that the correct URL is passed to the frontend. 155 return redirect('schema_home', db_name=database.name, schema_id=schema.id) 156 157 158 @login_required 159 @api_view(['POST']) 160 def reflect_all(_): 161 reset_reflection() 162 return Response(status=status.HTTP_200_OK) 163 164 165 @login_required 166 def home(request): 167 database = get_current_database(request, None) 168 if database is None: 169 return render(request, 'mathesar/index.html', { 170 'common_data': get_common_data(request, database) 171 }) 172 return redirect('schemas', db_name=database.name) 173 174 175 @login_required 176 def profile(request): 177 return render(request, 'mathesar/index.html', { 178 'common_data': get_common_data(request) 179 }) 180 181 182 @login_required 183 def admin_home(request, **kwargs): 184 return render(request, 'mathesar/index.html', { 185 'common_data': get_common_data(request) 186 }) 187 188 189 @login_required 190 def schema_home(request, db_name, schema_id, **kwargs): 191 database = get_current_database(request, db_name) 192 schema = get_current_schema(request, schema_id, database) 193 return render(request, 'mathesar/index.html', { 194 'common_data': get_common_data(request, database, schema) 195 }) 196 197 198 @login_required 199 def schemas(request, db_name): 200 database = get_current_database(request, db_name) 201 return render(request, 'mathesar/index.html', { 202 'common_data': get_common_data(request, database, None) 203 }) 204 ``` Path: `config/urls.py` Content: ``` 1 """config URL Configuration 2 3 The `urlpatterns` list routes URLs to views. For more information please see: 4 https://docs.djangoproject.com/en/3.1/topics/http/urls/ 5 Examples: 6 Function views 7 1. Add an import: from my_app import views 8 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 Class-based views 10 1. Add an import: from other_app.views import Home 11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 Including another URLconf 13 1. Import the include() function: from django.urls import include, path 14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 """ 16 from django.contrib import admin 17 from django.urls import include, path 18 19 from mathesar import urls as mathesar_urls 20 21 22 urlpatterns = [ 23 path('admin/', admin.site.urls), 24 path('', include(mathesar_urls)), 25 path('api-docs/', include('rest_framework.urls')), 26 ] 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/config/urls.py b/config/urls.py --- a/config/urls.py +++ b/config/urls.py @@ -24,3 +24,5 @@ path('', include(mathesar_urls)), path('api-docs/', include('rest_framework.urls')), ] + +handler404 = 'mathesar.views.page_not_found_view' diff --git a/mathesar/views.py b/mathesar/views.py --- a/mathesar/views.py +++ b/mathesar/views.py @@ -201,3 +201,9 @@ return render(request, 'mathesar/index.html', { 'common_data': get_common_data(request, database, None) }) + + +def page_not_found_view(request, exception): + return render(request, 'mathesar/index.html', { + 'common_data': get_common_data(request), + }, status=404)
{"golden_diff": "diff --git a/config/urls.py b/config/urls.py\n--- a/config/urls.py\n+++ b/config/urls.py\n@@ -24,3 +24,5 @@\n path('', include(mathesar_urls)),\n path('api-docs/', include('rest_framework.urls')),\n ]\n+\n+handler404 = 'mathesar.views.page_not_found_view'\ndiff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -201,3 +201,9 @@\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n+\n+\n+def page_not_found_view(request, exception):\n+ return render(request, 'mathesar/index.html', {\n+ 'common_data': get_common_data(request),\n+ }, status=404)\n", "issue": "Handle 404 pages throughout the app\n## Description\r\n* Client side routing: A non existent url results in a blank screen, or has obscure errors.\r\n* Server side routing: A non existent url entered on the browser results in a 404, which returns a django 404 page.\r\n\r\n## Expected behavior\r\n* 404s should be clearly presented to the user in all valid cases.\r\n* The 404 page should be consistent in appearance in both server side & client side routing. \n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.query import QueryAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.db.permissions.table import TableAccessPolicy\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.api.ui.serializers.users import UserSerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.models.query import UIQuery\nfrom mathesar.state import reset_reflection\nfrom mathesar import __version__\n\n\ndef get_schema_list(request, database):\n qs = Schema.objects.filter(database=database)\n permission_restricted_qs = SchemaAccessPolicy.scope_queryset(request, qs)\n schema_serializer = SchemaSerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef _get_permissible_db_queryset(request):\n qs = Database.objects.all()\n permission_restricted_qs = DatabaseAccessPolicy.scope_queryset(request, qs)\n schema_qs = Schema.objects.all()\n permitted_schemas = SchemaAccessPolicy.scope_queryset(request, schema_qs)\n databases_from_permitted_schema = Database.objects.filter(schemas__in=permitted_schemas)\n permission_restricted_qs = permission_restricted_qs | databases_from_permitted_schema\n return permission_restricted_qs.distinct()\n\n\ndef get_database_list(request):\n permission_restricted_db_qs = _get_permissible_db_queryset(request)\n database_serializer = DatabaseSerializer(\n permission_restricted_db_qs,\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n qs = Table.objects.filter(schema=schema)\n permission_restricted_qs = TableAccessPolicy.scope_queryset(request, qs)\n table_serializer = TableSerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n qs = UIQuery.objects.filter(base_table__schema=schema)\n permission_restricted_qs = QueryAccessPolicy.scope_queryset(request, qs)\n\n query_serializer = QuerySerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_user_data(request):\n user_serializer = UserSerializer(\n request.user,\n many=False,\n context={'request': request}\n )\n return user_serializer.data\n\n\ndef get_common_data(request, database=None, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n 'user': get_user_data(request),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n 'current_release_tag_name': __version__,\n }\n\n\ndef get_current_database(request, db_name):\n \"\"\"Get database from passed name, with fall back behavior.\"\"\"\n permitted_databases = _get_permissible_db_queryset(request)\n if db_name is not None:\n current_database = get_object_or_404(permitted_databases, name=db_name)\n else:\n request_database_name = request.GET.get('database')\n try:\n if request_database_name is not None:\n # Try to get the database named specified in the request\n current_database = permitted_databases.get(name=request_database_name)\n else:\n # Try to get the first database available\n current_database = permitted_databases.order_by('id').first()\n except Database.DoesNotExist:\n current_database = None\n return current_database\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n permitted_schemas = SchemaAccessPolicy.scope_queryset(request, Schema.objects.all())\n return get_object_or_404(permitted_schemas, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\n@api_view(['POST'])\ndef reflect_all(_):\n reset_reflection()\n return Response(status=status.HTTP_200_OK)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n if database is None:\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database)\n })\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef profile(request):\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request)\n })\n\n\n@login_required\ndef admin_home(request, **kwargs):\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request)\n })\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}, {"content": "\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom mathesar import urls as mathesar_urls\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(mathesar_urls)),\n path('api-docs/', include('rest_framework.urls')),\n]\n", "path": "config/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.query import QueryAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.db.permissions.table import TableAccessPolicy\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.api.ui.serializers.users import UserSerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.models.query import UIQuery\nfrom mathesar.state import reset_reflection\nfrom mathesar import __version__\n\n\ndef get_schema_list(request, database):\n qs = Schema.objects.filter(database=database)\n permission_restricted_qs = SchemaAccessPolicy.scope_queryset(request, qs)\n schema_serializer = SchemaSerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef _get_permissible_db_queryset(request):\n qs = Database.objects.all()\n permission_restricted_qs = DatabaseAccessPolicy.scope_queryset(request, qs)\n schema_qs = Schema.objects.all()\n permitted_schemas = SchemaAccessPolicy.scope_queryset(request, schema_qs)\n databases_from_permitted_schema = Database.objects.filter(schemas__in=permitted_schemas)\n permission_restricted_qs = permission_restricted_qs | databases_from_permitted_schema\n return permission_restricted_qs.distinct()\n\n\ndef get_database_list(request):\n permission_restricted_db_qs = _get_permissible_db_queryset(request)\n database_serializer = DatabaseSerializer(\n permission_restricted_db_qs,\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n qs = Table.objects.filter(schema=schema)\n permission_restricted_qs = TableAccessPolicy.scope_queryset(request, qs)\n table_serializer = TableSerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n qs = UIQuery.objects.filter(base_table__schema=schema)\n permission_restricted_qs = QueryAccessPolicy.scope_queryset(request, qs)\n\n query_serializer = QuerySerializer(\n permission_restricted_qs,\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_user_data(request):\n user_serializer = UserSerializer(\n request.user,\n many=False,\n context={'request': request}\n )\n return user_serializer.data\n\n\ndef get_common_data(request, database=None, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n 'user': get_user_data(request),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n 'current_release_tag_name': __version__,\n }\n\n\ndef get_current_database(request, db_name):\n \"\"\"Get database from passed name, with fall back behavior.\"\"\"\n permitted_databases = _get_permissible_db_queryset(request)\n if db_name is not None:\n current_database = get_object_or_404(permitted_databases, name=db_name)\n else:\n request_database_name = request.GET.get('database')\n try:\n if request_database_name is not None:\n # Try to get the database named specified in the request\n current_database = permitted_databases.get(name=request_database_name)\n else:\n # Try to get the first database available\n current_database = permitted_databases.order_by('id').first()\n except Database.DoesNotExist:\n current_database = None\n return current_database\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n permitted_schemas = SchemaAccessPolicy.scope_queryset(request, Schema.objects.all())\n return get_object_or_404(permitted_schemas, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\n@api_view(['POST'])\ndef reflect_all(_):\n reset_reflection()\n return Response(status=status.HTTP_200_OK)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n if database is None:\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database)\n })\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef profile(request):\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request)\n })\n\n\n@login_required\ndef admin_home(request, **kwargs):\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request)\n })\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n\n\ndef page_not_found_view(request, exception):\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request),\n }, status=404)\n", "path": "mathesar/views.py"}, {"content": "\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom mathesar import urls as mathesar_urls\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include(mathesar_urls)),\n path('api-docs/', include('rest_framework.urls')),\n]\n\nhandler404 = 'mathesar.views.page_not_found_view'\n", "path": "config/urls.py"}]}
2,635
193
gh_patches_debug_16708
rasdani/github-patches
git_diff
pytorch__ignite-1290
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix backward compatibilty ## 🐛 Bug description Recent PR on the change of `mp.spawn` by `mp.start_process` brakes compatibility with PyTorch 1.3.1 and 1.4.0 versions. https://github.com/pytorch/ignite/actions?query=workflow%3A.github%2Fworkflows%2Fpytorch-version-tests.yml We have to fix that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/distributed/comp_models/native.py` Content: ``` 1 import os 2 import subprocess 3 import warnings 4 from distutils.version import LooseVersion 5 from typing import Callable, Mapping, Optional, Tuple 6 7 import torch 8 import torch.distributed as dist 9 import torch.multiprocessing as mp 10 11 from ignite.distributed.comp_models.base import ComputationModel 12 13 has_native_dist_support = dist.is_available() 14 15 16 if has_native_dist_support: 17 18 NCCL = dist.Backend.NCCL 19 GLOO = dist.Backend.GLOO 20 MPI = dist.Backend.MPI 21 22 class _NativeDistModel(ComputationModel): 23 """Private class for PyTorch native distributed computation model. 24 25 Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_: 26 27 - NCCL 28 - GLOO 29 - MPI 30 31 In this implementation we assume the following mapping between backend and devices: 32 33 - NCCL <-> GPU 34 - GLOO <-> CPU 35 - MPI <-> CPU 36 37 """ 38 39 name = "native-dist" 40 available_backends = tuple( 41 name for name in [NCCL, GLOO, MPI] if getattr(dist, "is_{}_available".format(name))() 42 ) 43 44 @staticmethod 45 def create_from_context() -> Optional["_NativeDistModel"]: 46 if not (dist.is_available() and dist.is_initialized()): 47 return None 48 return _NativeDistModel() 49 50 @staticmethod 51 def create_from_backend(backend: str, **kwargs) -> "_NativeDistModel": 52 if dist.is_available() and dist.is_initialized(): 53 raise RuntimeError("Can not create new distributed process group if default one is already initialized") 54 return _NativeDistModel(backend=backend, **kwargs) 55 56 def __init__(self, backend=None, timeout=None, **kwargs): 57 """This is a private method. Please, use `create_from_backend` or `create_from_context` 58 """ 59 super(_NativeDistModel, self).__init__() 60 self._env_backup = None 61 if backend is not None: 62 self._create_from_backend(backend, timeout=timeout, **kwargs) 63 else: 64 self._init_from_context() 65 66 def _create_from_backend(self, backend, timeout=None, **kwargs): 67 if backend == dist.Backend.NCCL and not torch.cuda.is_available(): 68 raise RuntimeError("Nccl backend is required but no cuda capable devices") 69 70 self.setup_env_vars() 71 72 self._local_rank = int(os.environ["LOCAL_RANK"]) 73 # for debug purposes 74 self._master_port = int(os.environ["MASTER_PORT"]) 75 self._master_addr = os.environ["MASTER_ADDR"] 76 77 init_pg_kwargs = {} 78 if timeout is not None: 79 init_pg_kwargs["timeout"] = timeout 80 81 dist.init_process_group(backend, init_method="env://", **init_pg_kwargs) 82 # https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 83 dist.barrier() 84 85 if backend == dist.Backend.NCCL: 86 torch.cuda.set_device(self._local_rank) 87 88 self._setup_attrs() 89 90 def _init_from_context(self): 91 92 self._identify_local_rank() 93 94 # for debug purposes 95 self._master_port = None 96 self._master_addr = None 97 self._setup_attrs() 98 99 def _compute_nproc_per_node(self): 100 tensor = torch.tensor([self.get_local_rank() + 1]).to(self.device()) 101 dist.all_reduce(tensor, op=dist.ReduceOp.MAX) 102 return tensor.item() 103 104 def _get_all_hostnames(self): 105 import socket 106 107 device = "cpu" 108 if self.backend() == dist.Backend.NCCL: 109 index = torch.cuda.current_device() 110 device = "cuda:{}".format(index) 111 name = socket.gethostname() 112 name = torch.tensor(bytearray(name, "utf-8")).to(device) 113 padded_t_name = torch.zeros(256, device=device, dtype=torch.long) 114 padded_t_name[: len(name)] = name 115 out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())] 116 dist.all_gather(out_t_names, padded_t_name) 117 out_t_names = [tuple(t.cpu().tolist()) for t in out_t_names] 118 return out_t_names 119 120 @staticmethod 121 def _compute_node_and_local_ranks(rank, hostnames): 122 from collections import Counter 123 124 c = Counter(hostnames) 125 sizes = torch.tensor([0,] + list(c.values())) 126 cumsum_sizes = torch.cumsum(sizes, dim=0) 127 node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item() 128 local_rank = rank - cumsum_sizes[node_rank].item() 129 return local_rank, node_rank 130 131 def _compute_local_rank_via_hostname(self): 132 # get all hostnames 133 hostnames = self._get_all_hostnames() 134 local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames) 135 136 if local_rank < 0 or self._node < 0: 137 raise ValueError( 138 "Failed to correctly estimate local rank. " 139 "Debugging info: local rank: {}, node rank: {}, hostnames: {}".format( 140 local_rank, self._node, hostnames 141 ) 142 ) 143 return local_rank 144 145 def _identify_local_rank(self): 146 147 if "SLURM_JOBID" in os.environ: 148 os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"] 149 150 if "LOCAL_RANK" in os.environ: 151 self._local_rank = int(os.environ["LOCAL_RANK"]) 152 elif self._ext_local_rank is not None: 153 self._local_rank = self._ext_local_rank 154 else: 155 warnings.warn( 156 "Local rank information for native distributed setting will be initialized using " 157 "heuristic approach based on hostname which can be different of real setup. Please, " 158 "either set `os.environ['LOCAL_RANK']` " 159 "or use `idist.set_local_rank(local_rank)` with correct local rank index." 160 ) 161 # use socket gethostname heuristic to determine number of nodes => local rank 162 self._local_rank = self._compute_local_rank_via_hostname() 163 164 def setup_env_vars(self): 165 166 self._env_backup = os.environ.copy() 167 168 if "SLURM_JOBID" in os.environ: 169 self._setup_env_in_slurm() 170 return 171 172 # check if all necessary env vars are set 173 # if partially defined raise an error 174 necessary_env_vars = ["RANK", "LOCAL_RANK", "WORLD_SIZE"] 175 all_env_vars_defined = [k in os.environ for k in necessary_env_vars] 176 if any(all_env_vars_defined) and not all(all_env_vars_defined): 177 raise RuntimeError( 178 "PyTorch distributed configuration should define env variables '{}'".format(necessary_env_vars) 179 ) 180 181 os.environ["RANK"] = os.environ.get("RANK", "0") 182 os.environ["LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") 183 os.environ["WORLD_SIZE"] = os.environ.get("WORLD_SIZE", "1") 184 os.environ["MASTER_ADDR"] = os.environ.get("MASTER_ADDR", "127.0.0.1") 185 os.environ["MASTER_PORT"] = os.environ.get("MASTER_PORT", "15000") 186 187 def _setup_env_in_slurm(self): 188 for k in ["SLURM_PROCID", "SLURM_LOCALID", "SLURM_NTASKS", "SLURM_JOB_NODELIST"]: 189 if k not in os.environ: 190 raise RuntimeError("SLURM distributed configuration is missing '{}' in env variables".format(k)) 191 192 os.environ["RANK"] = os.environ["SLURM_PROCID"] 193 os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"] 194 os.environ["WORLD_SIZE"] = os.environ["SLURM_NTASKS"] 195 # port should be the same over all process 196 slurm_port = os.environ["SLURM_JOB_ID"] 197 slurm_port = slurm_port[-4:] 198 os.environ["MASTER_PORT"] = str(int(slurm_port) + 15000) 199 # master address is the first hostname of nodes list 200 hostnames = subprocess.check_output(["scontrol", "show", "hostnames", os.environ["SLURM_JOB_NODELIST"]]) 201 os.environ["MASTER_ADDR"] = hostnames.split()[0].decode("utf-8") 202 203 def get_local_rank(self) -> int: 204 return self._local_rank 205 206 def get_rank(self) -> int: 207 return dist.get_rank() 208 209 def get_world_size(self) -> int: 210 return dist.get_world_size() 211 212 def get_nproc_per_node(self) -> int: 213 return self._nproc_per_node 214 215 def get_nnodes(self) -> int: 216 return self._nnodes 217 218 def get_node_rank(self) -> int: 219 return self._node 220 221 def device(self) -> torch.device: 222 if self.backend() == dist.Backend.NCCL: 223 index = torch.cuda.current_device() 224 return torch.device("cuda:{}".format(index)) 225 return torch.device("cpu") 226 227 def backend(self) -> str: 228 return dist.get_backend() 229 230 def finalize(self): 231 dist.destroy_process_group() 232 # restore backed-up env 233 if self._env_backup is not None: 234 os.environ.clear() 235 os.environ.update(self._env_backup) 236 237 @staticmethod 238 def _dist_worker_task_fn( 239 local_rank, backend, fn, args, kw_dict, world_size, nprocs_per_node, node_rank, master_addr, master_port, kw 240 ): 241 from ignite.distributed.utils import _set_model, finalize 242 243 copy_env_vars = os.environ.copy() 244 245 os.environ["LOCAL_RANK"] = str(local_rank) 246 os.environ["RANK"] = str(node_rank * nprocs_per_node + local_rank) 247 os.environ["WORLD_SIZE"] = str(world_size) 248 os.environ["MASTER_ADDR"] = str(master_addr) 249 os.environ["MASTER_PORT"] = str(master_port) 250 251 model = _NativeDistModel.create_from_backend(backend, **kw) 252 _set_model(model) 253 fn(local_rank, *args, **kw_dict) 254 finalize() 255 256 os.environ.clear() 257 os.environ.update(copy_env_vars) 258 259 @staticmethod 260 def spawn( 261 fn: Callable, 262 args: Tuple, 263 kwargs_dict: Optional[Mapping] = None, 264 nproc_per_node: int = 1, 265 nnodes: int = 1, 266 node_rank: int = 0, 267 master_addr: str = "127.0.0.1", 268 master_port: int = 2222, 269 backend: str = "nccl", 270 **kwargs 271 ): 272 world_size = nnodes * nproc_per_node 273 274 spawn_kwargs = { 275 "join": kwargs.get("join", True), 276 "daemon": kwargs.get("daemon", False), 277 } 278 # start_method in pytorch >= 1.5 279 if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"): 280 spawn_kwargs["start_method"] = kwargs.get("start_method", "spawn") 281 282 mp.start_processes( 283 _NativeDistModel._dist_worker_task_fn, 284 nprocs=nproc_per_node, 285 args=( 286 backend, 287 fn, 288 args, 289 kwargs_dict, 290 world_size, 291 nproc_per_node, 292 node_rank, 293 master_addr, 294 master_port, 295 kwargs, 296 ), 297 **spawn_kwargs, 298 ) 299 300 _reduce_op_map = { 301 "SUM": dist.ReduceOp.SUM, 302 "PRODUCT": dist.ReduceOp.PRODUCT, 303 "MIN": dist.ReduceOp.MIN, 304 "MAX": dist.ReduceOp.MAX, 305 "AND": dist.ReduceOp.BAND, 306 "OR": dist.ReduceOp.BOR, 307 } 308 309 def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor: 310 if op not in self._reduce_op_map: 311 raise ValueError("Unsupported reduction operation: '{}'".format(op)) 312 op = self._reduce_op_map[op] 313 dist.all_reduce(tensor, op) 314 return tensor 315 316 def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor: 317 if tensor.ndimension() == 0: 318 tensor = tensor.unsqueeze(0) 319 output = [torch.zeros_like(tensor) for _ in range(self.get_world_size())] 320 dist.all_gather(output, tensor) 321 return torch.cat(output, dim=0) 322 323 def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor: 324 dist.broadcast(tensor, src=src) 325 return tensor 326 327 def barrier(self): 328 dist.barrier() 329 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py --- a/ignite/distributed/comp_models/native.py +++ b/ignite/distributed/comp_models/native.py @@ -275,11 +275,14 @@ "join": kwargs.get("join", True), "daemon": kwargs.get("daemon", False), } - # start_method in pytorch >= 1.5 + + start_processes = mp.spawn + # start_method and start_processes in pytorch >= 1.5 if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"): spawn_kwargs["start_method"] = kwargs.get("start_method", "spawn") + start_processes = mp.start_processes - mp.start_processes( + start_processes( _NativeDistModel._dist_worker_task_fn, nprocs=nproc_per_node, args=(
{"golden_diff": "diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py\n--- a/ignite/distributed/comp_models/native.py\n+++ b/ignite/distributed/comp_models/native.py\n@@ -275,11 +275,14 @@\n \"join\": kwargs.get(\"join\", True),\n \"daemon\": kwargs.get(\"daemon\", False),\n }\n- # start_method in pytorch >= 1.5\n+\n+ start_processes = mp.spawn\n+ # start_method and start_processes in pytorch >= 1.5\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n spawn_kwargs[\"start_method\"] = kwargs.get(\"start_method\", \"spawn\")\n+ start_processes = mp.start_processes\n \n- mp.start_processes(\n+ start_processes(\n _NativeDistModel._dist_worker_task_fn,\n nprocs=nproc_per_node,\n args=(\n", "issue": "Fix backward compatibilty\n## \ud83d\udc1b Bug description\r\n\r\nRecent PR on the change of `mp.spawn` by `mp.start_process` brakes compatibility with PyTorch 1.3.1 and 1.4.0 versions.\r\n\r\nhttps://github.com/pytorch/ignite/actions?query=workflow%3A.github%2Fworkflows%2Fpytorch-version-tests.yml\r\n\r\nWe have to fix that.\n", "before_files": [{"content": "import os\nimport subprocess\nimport warnings\nfrom distutils.version import LooseVersion\nfrom typing import Callable, Mapping, Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nfrom ignite.distributed.comp_models.base import ComputationModel\n\nhas_native_dist_support = dist.is_available()\n\n\nif has_native_dist_support:\n\n NCCL = dist.Backend.NCCL\n GLOO = dist.Backend.GLOO\n MPI = dist.Backend.MPI\n\n class _NativeDistModel(ComputationModel):\n \"\"\"Private class for PyTorch native distributed computation model.\n\n Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:\n\n - NCCL\n - GLOO\n - MPI\n\n In this implementation we assume the following mapping between backend and devices:\n\n - NCCL <-> GPU\n - GLOO <-> CPU\n - MPI <-> CPU\n\n \"\"\"\n\n name = \"native-dist\"\n available_backends = tuple(\n name for name in [NCCL, GLOO, MPI] if getattr(dist, \"is_{}_available\".format(name))()\n )\n\n @staticmethod\n def create_from_context() -> Optional[\"_NativeDistModel\"]:\n if not (dist.is_available() and dist.is_initialized()):\n return None\n return _NativeDistModel()\n\n @staticmethod\n def create_from_backend(backend: str, **kwargs) -> \"_NativeDistModel\":\n if dist.is_available() and dist.is_initialized():\n raise RuntimeError(\"Can not create new distributed process group if default one is already initialized\")\n return _NativeDistModel(backend=backend, **kwargs)\n\n def __init__(self, backend=None, timeout=None, **kwargs):\n \"\"\"This is a private method. Please, use `create_from_backend` or `create_from_context`\n \"\"\"\n super(_NativeDistModel, self).__init__()\n self._env_backup = None\n if backend is not None:\n self._create_from_backend(backend, timeout=timeout, **kwargs)\n else:\n self._init_from_context()\n\n def _create_from_backend(self, backend, timeout=None, **kwargs):\n if backend == dist.Backend.NCCL and not torch.cuda.is_available():\n raise RuntimeError(\"Nccl backend is required but no cuda capable devices\")\n\n self.setup_env_vars()\n\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n # for debug purposes\n self._master_port = int(os.environ[\"MASTER_PORT\"])\n self._master_addr = os.environ[\"MASTER_ADDR\"]\n\n init_pg_kwargs = {}\n if timeout is not None:\n init_pg_kwargs[\"timeout\"] = timeout\n\n dist.init_process_group(backend, init_method=\"env://\", **init_pg_kwargs)\n # https://github.com/facebookresearch/maskrcnn-benchmark/issues/172\n dist.barrier()\n\n if backend == dist.Backend.NCCL:\n torch.cuda.set_device(self._local_rank)\n\n self._setup_attrs()\n\n def _init_from_context(self):\n\n self._identify_local_rank()\n\n # for debug purposes\n self._master_port = None\n self._master_addr = None\n self._setup_attrs()\n\n def _compute_nproc_per_node(self):\n tensor = torch.tensor([self.get_local_rank() + 1]).to(self.device())\n dist.all_reduce(tensor, op=dist.ReduceOp.MAX)\n return tensor.item()\n\n def _get_all_hostnames(self):\n import socket\n\n device = \"cpu\"\n if self.backend() == dist.Backend.NCCL:\n index = torch.cuda.current_device()\n device = \"cuda:{}\".format(index)\n name = socket.gethostname()\n name = torch.tensor(bytearray(name, \"utf-8\")).to(device)\n padded_t_name = torch.zeros(256, device=device, dtype=torch.long)\n padded_t_name[: len(name)] = name\n out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]\n dist.all_gather(out_t_names, padded_t_name)\n out_t_names = [tuple(t.cpu().tolist()) for t in out_t_names]\n return out_t_names\n\n @staticmethod\n def _compute_node_and_local_ranks(rank, hostnames):\n from collections import Counter\n\n c = Counter(hostnames)\n sizes = torch.tensor([0,] + list(c.values()))\n cumsum_sizes = torch.cumsum(sizes, dim=0)\n node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()\n local_rank = rank - cumsum_sizes[node_rank].item()\n return local_rank, node_rank\n\n def _compute_local_rank_via_hostname(self):\n # get all hostnames\n hostnames = self._get_all_hostnames()\n local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)\n\n if local_rank < 0 or self._node < 0:\n raise ValueError(\n \"Failed to correctly estimate local rank. \"\n \"Debugging info: local rank: {}, node rank: {}, hostnames: {}\".format(\n local_rank, self._node, hostnames\n )\n )\n return local_rank\n\n def _identify_local_rank(self):\n\n if \"SLURM_JOBID\" in os.environ:\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n\n if \"LOCAL_RANK\" in os.environ:\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n elif self._ext_local_rank is not None:\n self._local_rank = self._ext_local_rank\n else:\n warnings.warn(\n \"Local rank information for native distributed setting will be initialized using \"\n \"heuristic approach based on hostname which can be different of real setup. Please, \"\n \"either set `os.environ['LOCAL_RANK']` \"\n \"or use `idist.set_local_rank(local_rank)` with correct local rank index.\"\n )\n # use socket gethostname heuristic to determine number of nodes => local rank\n self._local_rank = self._compute_local_rank_via_hostname()\n\n def setup_env_vars(self):\n\n self._env_backup = os.environ.copy()\n\n if \"SLURM_JOBID\" in os.environ:\n self._setup_env_in_slurm()\n return\n\n # check if all necessary env vars are set\n # if partially defined raise an error\n necessary_env_vars = [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]\n all_env_vars_defined = [k in os.environ for k in necessary_env_vars]\n if any(all_env_vars_defined) and not all(all_env_vars_defined):\n raise RuntimeError(\n \"PyTorch distributed configuration should define env variables '{}'\".format(necessary_env_vars)\n )\n\n os.environ[\"RANK\"] = os.environ.get(\"RANK\", \"0\")\n os.environ[\"LOCAL_RANK\"] = os.environ.get(\"LOCAL_RANK\", \"0\")\n os.environ[\"WORLD_SIZE\"] = os.environ.get(\"WORLD_SIZE\", \"1\")\n os.environ[\"MASTER_ADDR\"] = os.environ.get(\"MASTER_ADDR\", \"127.0.0.1\")\n os.environ[\"MASTER_PORT\"] = os.environ.get(\"MASTER_PORT\", \"15000\")\n\n def _setup_env_in_slurm(self):\n for k in [\"SLURM_PROCID\", \"SLURM_LOCALID\", \"SLURM_NTASKS\", \"SLURM_JOB_NODELIST\"]:\n if k not in os.environ:\n raise RuntimeError(\"SLURM distributed configuration is missing '{}' in env variables\".format(k))\n\n os.environ[\"RANK\"] = os.environ[\"SLURM_PROCID\"]\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n os.environ[\"WORLD_SIZE\"] = os.environ[\"SLURM_NTASKS\"]\n # port should be the same over all process\n slurm_port = os.environ[\"SLURM_JOB_ID\"]\n slurm_port = slurm_port[-4:]\n os.environ[\"MASTER_PORT\"] = str(int(slurm_port) + 15000)\n # master address is the first hostname of nodes list\n hostnames = subprocess.check_output([\"scontrol\", \"show\", \"hostnames\", os.environ[\"SLURM_JOB_NODELIST\"]])\n os.environ[\"MASTER_ADDR\"] = hostnames.split()[0].decode(\"utf-8\")\n\n def get_local_rank(self) -> int:\n return self._local_rank\n\n def get_rank(self) -> int:\n return dist.get_rank()\n\n def get_world_size(self) -> int:\n return dist.get_world_size()\n\n def get_nproc_per_node(self) -> int:\n return self._nproc_per_node\n\n def get_nnodes(self) -> int:\n return self._nnodes\n\n def get_node_rank(self) -> int:\n return self._node\n\n def device(self) -> torch.device:\n if self.backend() == dist.Backend.NCCL:\n index = torch.cuda.current_device()\n return torch.device(\"cuda:{}\".format(index))\n return torch.device(\"cpu\")\n\n def backend(self) -> str:\n return dist.get_backend()\n\n def finalize(self):\n dist.destroy_process_group()\n # restore backed-up env\n if self._env_backup is not None:\n os.environ.clear()\n os.environ.update(self._env_backup)\n\n @staticmethod\n def _dist_worker_task_fn(\n local_rank, backend, fn, args, kw_dict, world_size, nprocs_per_node, node_rank, master_addr, master_port, kw\n ):\n from ignite.distributed.utils import _set_model, finalize\n\n copy_env_vars = os.environ.copy()\n\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(node_rank * nprocs_per_node + local_rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n os.environ[\"MASTER_ADDR\"] = str(master_addr)\n os.environ[\"MASTER_PORT\"] = str(master_port)\n\n model = _NativeDistModel.create_from_backend(backend, **kw)\n _set_model(model)\n fn(local_rank, *args, **kw_dict)\n finalize()\n\n os.environ.clear()\n os.environ.update(copy_env_vars)\n\n @staticmethod\n def spawn(\n fn: Callable,\n args: Tuple,\n kwargs_dict: Optional[Mapping] = None,\n nproc_per_node: int = 1,\n nnodes: int = 1,\n node_rank: int = 0,\n master_addr: str = \"127.0.0.1\",\n master_port: int = 2222,\n backend: str = \"nccl\",\n **kwargs\n ):\n world_size = nnodes * nproc_per_node\n\n spawn_kwargs = {\n \"join\": kwargs.get(\"join\", True),\n \"daemon\": kwargs.get(\"daemon\", False),\n }\n # start_method in pytorch >= 1.5\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n spawn_kwargs[\"start_method\"] = kwargs.get(\"start_method\", \"spawn\")\n\n mp.start_processes(\n _NativeDistModel._dist_worker_task_fn,\n nprocs=nproc_per_node,\n args=(\n backend,\n fn,\n args,\n kwargs_dict,\n world_size,\n nproc_per_node,\n node_rank,\n master_addr,\n master_port,\n kwargs,\n ),\n **spawn_kwargs,\n )\n\n _reduce_op_map = {\n \"SUM\": dist.ReduceOp.SUM,\n \"PRODUCT\": dist.ReduceOp.PRODUCT,\n \"MIN\": dist.ReduceOp.MIN,\n \"MAX\": dist.ReduceOp.MAX,\n \"AND\": dist.ReduceOp.BAND,\n \"OR\": dist.ReduceOp.BOR,\n }\n\n def _do_all_reduce(self, tensor: torch.Tensor, op: str = \"SUM\") -> torch.Tensor:\n if op not in self._reduce_op_map:\n raise ValueError(\"Unsupported reduction operation: '{}'\".format(op))\n op = self._reduce_op_map[op]\n dist.all_reduce(tensor, op)\n return tensor\n\n def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndimension() == 0:\n tensor = tensor.unsqueeze(0)\n output = [torch.zeros_like(tensor) for _ in range(self.get_world_size())]\n dist.all_gather(output, tensor)\n return torch.cat(output, dim=0)\n\n def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:\n dist.broadcast(tensor, src=src)\n return tensor\n\n def barrier(self):\n dist.barrier()\n", "path": "ignite/distributed/comp_models/native.py"}], "after_files": [{"content": "import os\nimport subprocess\nimport warnings\nfrom distutils.version import LooseVersion\nfrom typing import Callable, Mapping, Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nfrom ignite.distributed.comp_models.base import ComputationModel\n\nhas_native_dist_support = dist.is_available()\n\n\nif has_native_dist_support:\n\n NCCL = dist.Backend.NCCL\n GLOO = dist.Backend.GLOO\n MPI = dist.Backend.MPI\n\n class _NativeDistModel(ComputationModel):\n \"\"\"Private class for PyTorch native distributed computation model.\n\n Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:\n\n - NCCL\n - GLOO\n - MPI\n\n In this implementation we assume the following mapping between backend and devices:\n\n - NCCL <-> GPU\n - GLOO <-> CPU\n - MPI <-> CPU\n\n \"\"\"\n\n name = \"native-dist\"\n available_backends = tuple(\n name for name in [NCCL, GLOO, MPI] if getattr(dist, \"is_{}_available\".format(name))()\n )\n\n @staticmethod\n def create_from_context() -> Optional[\"_NativeDistModel\"]:\n if not (dist.is_available() and dist.is_initialized()):\n return None\n return _NativeDistModel()\n\n @staticmethod\n def create_from_backend(backend: str, **kwargs) -> \"_NativeDistModel\":\n if dist.is_available() and dist.is_initialized():\n raise RuntimeError(\"Can not create new distributed process group if default one is already initialized\")\n return _NativeDistModel(backend=backend, **kwargs)\n\n def __init__(self, backend=None, timeout=None, **kwargs):\n \"\"\"This is a private method. Please, use `create_from_backend` or `create_from_context`\n \"\"\"\n super(_NativeDistModel, self).__init__()\n self._env_backup = None\n if backend is not None:\n self._create_from_backend(backend, timeout=timeout, **kwargs)\n else:\n self._init_from_context()\n\n def _create_from_backend(self, backend, timeout=None, **kwargs):\n if backend == dist.Backend.NCCL and not torch.cuda.is_available():\n raise RuntimeError(\"Nccl backend is required but no cuda capable devices\")\n\n self.setup_env_vars()\n\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n # for debug purposes\n self._master_port = int(os.environ[\"MASTER_PORT\"])\n self._master_addr = os.environ[\"MASTER_ADDR\"]\n\n init_pg_kwargs = {}\n if timeout is not None:\n init_pg_kwargs[\"timeout\"] = timeout\n\n dist.init_process_group(backend, init_method=\"env://\", **init_pg_kwargs)\n # https://github.com/facebookresearch/maskrcnn-benchmark/issues/172\n dist.barrier()\n\n if backend == dist.Backend.NCCL:\n torch.cuda.set_device(self._local_rank)\n\n self._setup_attrs()\n\n def _init_from_context(self):\n\n self._identify_local_rank()\n\n # for debug purposes\n self._master_port = None\n self._master_addr = None\n self._setup_attrs()\n\n def _compute_nproc_per_node(self):\n tensor = torch.tensor([self.get_local_rank() + 1]).to(self.device())\n dist.all_reduce(tensor, op=dist.ReduceOp.MAX)\n return tensor.item()\n\n def _get_all_hostnames(self):\n import socket\n\n device = \"cpu\"\n if self.backend() == dist.Backend.NCCL:\n index = torch.cuda.current_device()\n device = \"cuda:{}\".format(index)\n name = socket.gethostname()\n name = torch.tensor(bytearray(name, \"utf-8\")).to(device)\n padded_t_name = torch.zeros(256, device=device, dtype=torch.long)\n padded_t_name[: len(name)] = name\n out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]\n dist.all_gather(out_t_names, padded_t_name)\n out_t_names = [tuple(t.cpu().tolist()) for t in out_t_names]\n return out_t_names\n\n @staticmethod\n def _compute_node_and_local_ranks(rank, hostnames):\n from collections import Counter\n\n c = Counter(hostnames)\n sizes = torch.tensor([0,] + list(c.values()))\n cumsum_sizes = torch.cumsum(sizes, dim=0)\n node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()\n local_rank = rank - cumsum_sizes[node_rank].item()\n return local_rank, node_rank\n\n def _compute_local_rank_via_hostname(self):\n # get all hostnames\n hostnames = self._get_all_hostnames()\n local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)\n\n if local_rank < 0 or self._node < 0:\n raise ValueError(\n \"Failed to correctly estimate local rank. \"\n \"Debugging info: local rank: {}, node rank: {}, hostnames: {}\".format(\n local_rank, self._node, hostnames\n )\n )\n return local_rank\n\n def _identify_local_rank(self):\n\n if \"SLURM_JOBID\" in os.environ:\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n\n if \"LOCAL_RANK\" in os.environ:\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n elif self._ext_local_rank is not None:\n self._local_rank = self._ext_local_rank\n else:\n warnings.warn(\n \"Local rank information for native distributed setting will be initialized using \"\n \"heuristic approach based on hostname which can be different of real setup. Please, \"\n \"either set `os.environ['LOCAL_RANK']` \"\n \"or use `idist.set_local_rank(local_rank)` with correct local rank index.\"\n )\n # use socket gethostname heuristic to determine number of nodes => local rank\n self._local_rank = self._compute_local_rank_via_hostname()\n\n def setup_env_vars(self):\n\n self._env_backup = os.environ.copy()\n\n if \"SLURM_JOBID\" in os.environ:\n self._setup_env_in_slurm()\n return\n\n # check if all necessary env vars are set\n # if partially defined raise an error\n necessary_env_vars = [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]\n all_env_vars_defined = [k in os.environ for k in necessary_env_vars]\n if any(all_env_vars_defined) and not all(all_env_vars_defined):\n raise RuntimeError(\n \"PyTorch distributed configuration should define env variables '{}'\".format(necessary_env_vars)\n )\n\n os.environ[\"RANK\"] = os.environ.get(\"RANK\", \"0\")\n os.environ[\"LOCAL_RANK\"] = os.environ.get(\"LOCAL_RANK\", \"0\")\n os.environ[\"WORLD_SIZE\"] = os.environ.get(\"WORLD_SIZE\", \"1\")\n os.environ[\"MASTER_ADDR\"] = os.environ.get(\"MASTER_ADDR\", \"127.0.0.1\")\n os.environ[\"MASTER_PORT\"] = os.environ.get(\"MASTER_PORT\", \"15000\")\n\n def _setup_env_in_slurm(self):\n for k in [\"SLURM_PROCID\", \"SLURM_LOCALID\", \"SLURM_NTASKS\", \"SLURM_JOB_NODELIST\"]:\n if k not in os.environ:\n raise RuntimeError(\"SLURM distributed configuration is missing '{}' in env variables\".format(k))\n\n os.environ[\"RANK\"] = os.environ[\"SLURM_PROCID\"]\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n os.environ[\"WORLD_SIZE\"] = os.environ[\"SLURM_NTASKS\"]\n # port should be the same over all process\n slurm_port = os.environ[\"SLURM_JOB_ID\"]\n slurm_port = slurm_port[-4:]\n os.environ[\"MASTER_PORT\"] = str(int(slurm_port) + 15000)\n # master address is the first hostname of nodes list\n hostnames = subprocess.check_output([\"scontrol\", \"show\", \"hostnames\", os.environ[\"SLURM_JOB_NODELIST\"]])\n os.environ[\"MASTER_ADDR\"] = hostnames.split()[0].decode(\"utf-8\")\n\n def get_local_rank(self) -> int:\n return self._local_rank\n\n def get_rank(self) -> int:\n return dist.get_rank()\n\n def get_world_size(self) -> int:\n return dist.get_world_size()\n\n def get_nproc_per_node(self) -> int:\n return self._nproc_per_node\n\n def get_nnodes(self) -> int:\n return self._nnodes\n\n def get_node_rank(self) -> int:\n return self._node\n\n def device(self) -> torch.device:\n if self.backend() == dist.Backend.NCCL:\n index = torch.cuda.current_device()\n return torch.device(\"cuda:{}\".format(index))\n return torch.device(\"cpu\")\n\n def backend(self) -> str:\n return dist.get_backend()\n\n def finalize(self):\n dist.destroy_process_group()\n # restore backed-up env\n if self._env_backup is not None:\n os.environ.clear()\n os.environ.update(self._env_backup)\n\n @staticmethod\n def _dist_worker_task_fn(\n local_rank, backend, fn, args, kw_dict, world_size, nprocs_per_node, node_rank, master_addr, master_port, kw\n ):\n from ignite.distributed.utils import _set_model, finalize\n\n copy_env_vars = os.environ.copy()\n\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(node_rank * nprocs_per_node + local_rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n os.environ[\"MASTER_ADDR\"] = str(master_addr)\n os.environ[\"MASTER_PORT\"] = str(master_port)\n\n model = _NativeDistModel.create_from_backend(backend, **kw)\n _set_model(model)\n fn(local_rank, *args, **kw_dict)\n finalize()\n\n os.environ.clear()\n os.environ.update(copy_env_vars)\n\n @staticmethod\n def spawn(\n fn: Callable,\n args: Tuple,\n kwargs_dict: Optional[Mapping] = None,\n nproc_per_node: int = 1,\n nnodes: int = 1,\n node_rank: int = 0,\n master_addr: str = \"127.0.0.1\",\n master_port: int = 2222,\n backend: str = \"nccl\",\n **kwargs\n ):\n world_size = nnodes * nproc_per_node\n\n spawn_kwargs = {\n \"join\": kwargs.get(\"join\", True),\n \"daemon\": kwargs.get(\"daemon\", False),\n }\n\n start_processes = mp.spawn\n # start_method and start_processes in pytorch >= 1.5\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n spawn_kwargs[\"start_method\"] = kwargs.get(\"start_method\", \"spawn\")\n start_processes = mp.start_processes\n\n start_processes(\n _NativeDistModel._dist_worker_task_fn,\n nprocs=nproc_per_node,\n args=(\n backend,\n fn,\n args,\n kwargs_dict,\n world_size,\n nproc_per_node,\n node_rank,\n master_addr,\n master_port,\n kwargs,\n ),\n **spawn_kwargs,\n )\n\n _reduce_op_map = {\n \"SUM\": dist.ReduceOp.SUM,\n \"PRODUCT\": dist.ReduceOp.PRODUCT,\n \"MIN\": dist.ReduceOp.MIN,\n \"MAX\": dist.ReduceOp.MAX,\n \"AND\": dist.ReduceOp.BAND,\n \"OR\": dist.ReduceOp.BOR,\n }\n\n def _do_all_reduce(self, tensor: torch.Tensor, op: str = \"SUM\") -> torch.Tensor:\n if op not in self._reduce_op_map:\n raise ValueError(\"Unsupported reduction operation: '{}'\".format(op))\n op = self._reduce_op_map[op]\n dist.all_reduce(tensor, op)\n return tensor\n\n def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndimension() == 0:\n tensor = tensor.unsqueeze(0)\n output = [torch.zeros_like(tensor) for _ in range(self.get_world_size())]\n dist.all_gather(output, tensor)\n return torch.cat(output, dim=0)\n\n def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:\n dist.broadcast(tensor, src=src)\n return tensor\n\n def barrier(self):\n dist.barrier()\n", "path": "ignite/distributed/comp_models/native.py"}]}
4,012
205
gh_patches_debug_35513
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-1566
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] Performance varies according to the batch size # 🐛 Bug <!-- A clear and concise description of what the bug is. --> The accuracy of the classifier trained with the codes (https://github.com/cornellius-gp/gpytorch/tree/master/examples/06_PyTorch_NN_Integration_DKL) varies according to the batch size. It should be consistent regardless of the batch size. Am I missing something? ## To reproduce ** Code snippet to reproduce ** ```python from torch.optim import SGD, Adam from torch.optim.lr_scheduler import MultiStepLR import torch.nn.functional as F from torch import nn import torch import os import torchvision.datasets as dset import torchvision.transforms as transforms import gpytorch import math import tqdm normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761]) aug_trans = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()] common_trans = [transforms.ToTensor(), normalize] train_compose = transforms.Compose(aug_trans + common_trans) test_compose = transforms.Compose(common_trans) dataset = "cifar10" if ('CI' in os.environ): # this is for running the notebook in our testing framework train_set = torch.utils.data.TensorDataset(torch.randn(8, 3, 32, 32), torch.rand(8).round().long()) test_set = torch.utils.data.TensorDataset(torch.randn(4, 3, 32, 32), torch.rand(4).round().long()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=2, shuffle=False) num_classes = 2 elif dataset == 'cifar10': train_set = dset.CIFAR10('data', train=True, transform=train_compose, download=True) test_set = dset.CIFAR10('data', train=False, transform=test_compose) train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=16, shuffle=False) num_classes = 10 elif dataset == 'cifar100': train_set = dset.CIFAR100('data', train=True, transform=train_compose, download=True) test_set = dset.CIFAR100('data', train=False, transform=test_compose) train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False) num_classes = 100 else: raise RuntimeError('dataset must be one of "cifar100" or "cifar10"') from densenet import DenseNet class DenseNetFeatureExtractor(DenseNet): def forward(self, x): features = self.features(x) out = F.relu(features, inplace=True) out = F.avg_pool2d(out, kernel_size=self.avgpool_size).view(features.size(0), -1) return out feature_extractor = DenseNetFeatureExtractor(block_config=(6, 6, 6), num_classes=num_classes) num_features = feature_extractor.classifier.in_features class GaussianProcessLayer(gpytorch.models.ApproximateGP): def __init__(self, num_dim, grid_bounds=(-10., 10.), grid_size=64): variational_distribution = gpytorch.variational.CholeskyVariationalDistribution( num_inducing_points=grid_size, batch_shape=torch.Size([num_dim]) ) # Our base variational strategy is a GridInterpolationVariationalStrategy, # which places variational inducing points on a Grid # We wrap it with a IndependentMultitaskVariationalStrategy so that our output is a vector-valued GP variational_strategy = gpytorch.variational.IndependentMultitaskVariationalStrategy( gpytorch.variational.GridInterpolationVariationalStrategy( self, grid_size=grid_size, grid_bounds=[grid_bounds], variational_distribution=variational_distribution, ), num_tasks=num_dim, ) super().__init__(variational_strategy) self.covar_module = gpytorch.kernels.ScaleKernel( gpytorch.kernels.RBFKernel( lengthscale_prior=gpytorch.priors.SmoothedBoxPrior( math.exp(-1), math.exp(1), sigma=0.1, transform=torch.exp ) ) ) self.mean_module = gpytorch.means.ConstantMean() self.grid_bounds = grid_bounds def forward(self, x): mean = self.mean_module(x) covar = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean, covar) class DKLModel(gpytorch.Module): def __init__(self, feature_extractor, num_dim, grid_bounds=(-10., 10.)): super(DKLModel, self).__init__() self.feature_extractor = feature_extractor self.gp_layer = GaussianProcessLayer(num_dim=num_dim, grid_bounds=grid_bounds) self.grid_bounds = grid_bounds self.num_dim = num_dim def forward(self, x): features = self.feature_extractor(x) features = gpytorch.utils.grid.scale_to_bounds(features, self.grid_bounds[0], self.grid_bounds[1]) # This next line makes it so that we learn a GP for each feature features = features.transpose(-1, -2).unsqueeze(-1) res = self.gp_layer(features) return res model = DKLModel(feature_extractor, num_dim=num_features) likelihood = gpytorch.likelihoods.SoftmaxLikelihood(num_features=model.num_dim, num_classes=num_classes) # If you run this example without CUDA, I hope you like waiting! if torch.cuda.is_available(): model = model.cuda() likelihood = likelihood.cuda() # load trained model state_dict = torch.load('dkl_cifar_checkpoint.dat') model_state_dict = state_dict['model'] likelihood_state_dict = state_dict['likelihood'] model.load_state_dict(model_state_dict) likelihood.load_state_dict(likelihood_state_dict) def test(): model.eval() likelihood.eval() correct = 0 with torch.no_grad(), gpytorch.settings.num_likelihood_samples(16): for data, target in test_loader: if torch.cuda.is_available(): data, target = data.cuda(), target.cuda() output = likelihood(model(data)) # This gives us 16 samples from the predictive distribution pred = output.probs.mean(0).argmax(-1) # Taking the mean over all of the sample we've drawn correct += pred.eq(target.view_as(pred)).cpu().sum() print('Test set: Accuracy: {}/{} ({}%)'.format( correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset)) )) with gpytorch.settings.use_toeplitz(False): test() ``` ## Expected Behavior <!-- A clear and concise description of what you expected to happen. --> When the batch size is set to 256, the accuracy on the test set is 90.9% as expected. However, if the batch size is set to 16, the accuracy drops to 74.8%. ## System information **Please complete the following information:** - GPyTorch Version: 1.3.0 - PyTorch Version: 1.8.0.dev20201117 - Ubuntu 18.04 ## Additional context Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gpytorch/utils/grid.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import math 4 from typing import List, Tuple 5 6 import torch 7 8 9 def scale_to_bounds(x, lower_bound, upper_bound): 10 """ 11 Scale the input data so that it lies in between the lower and upper bounds. 12 13 :param x: the input data 14 :type x: torch.Tensor (... x n x d) 15 :param float lower_bound: lower bound of scaled data 16 :param float upper_bound: upper bound of scaled data 17 :return: scaled data 18 :rtype: torch.Tensor (... x n x d) 19 """ 20 # Scale features so they fit inside grid bounds 21 min_val = x.min() 22 max_val = x.max() 23 diff = max_val - min_val 24 x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound 25 return x 26 27 28 def choose_grid_size(train_inputs, ratio=1.0, kronecker_structure=True): 29 """ 30 Given some training inputs, determine a good grid size for KISS-GP. 31 32 :param x: the input data 33 :type x: torch.Tensor (... x n x d) 34 :param ratio: Amount of grid points per data point (default: 1.) 35 :type ratio: float, optional 36 :param kronecker_structure: Whether or not the model will use Kronecker structure in the grid 37 (set to True unless there is an additive or product decomposition in the prior) 38 :type kronecker_structure: bool, optional 39 :return: Grid size 40 :rtype: int 41 """ 42 # Scale features so they fit inside grid bounds 43 num_data = train_inputs.numel() if train_inputs.dim() == 1 else train_inputs.size(-2) 44 num_dim = 1 if train_inputs.dim() == 1 else train_inputs.size(-1) 45 if kronecker_structure: 46 return int(ratio * math.pow(num_data, 1.0 / num_dim)) 47 else: 48 return ratio * num_data 49 50 51 def convert_legacy_grid(grid: torch.Tensor) -> List[torch.Tensor]: 52 return [grid[:, i] for i in range(grid.size(-1))] 53 54 55 def create_data_from_grid(grid: List[torch.Tensor]) -> torch.Tensor: 56 """ 57 :param grid: Each Tensor is a 1D set of increments for the grid in that dimension 58 :type grid: List[torch.Tensor] 59 :return: The set of points on the grid going by column-major order 60 :rtype: torch.Tensor 61 """ 62 if torch.is_tensor(grid): 63 grid = convert_legacy_grid(grid) 64 ndims = len(grid) 65 assert all(axis.dim() == 1 for axis in grid) 66 projections = torch.meshgrid(*grid) 67 grid_tensor = torch.stack(projections, axis=-1) 68 # Note that if we did 69 # grid_data = grid_tensor.reshape(-1, ndims) 70 # instead, we would be iterating through the points of our grid from the 71 # last data dimension to the first data dimension. However, due to legacy 72 # reasons, we need to iterate from the first data dimension to the last data 73 # dimension when creating grid_data 74 grid_data = grid_tensor.permute(*(reversed(range(ndims + 1)))).reshape(ndims, -1).transpose(0, 1) 75 return grid_data 76 77 78 def create_grid( 79 grid_sizes: List[int], grid_bounds: List[Tuple[float, float]], extend: bool = True, device="cpu", dtype=torch.float, 80 ) -> List[torch.Tensor]: 81 """ 82 Creates a grid represented by a list of 1D Tensors representing the 83 projections of the grid into each dimension 84 85 If `extend`, we extend the grid by two points past the specified boundary 86 which can be important for getting good grid interpolations. 87 88 :param grid_sizes: Sizes of each grid dimension 89 :type grid_sizes: List[int] 90 :param grid_bounds: Lower and upper bounds of each grid dimension 91 :type grid_sizes: List[Tuple[float, float]] 92 :param device: target device for output (default: cpu) 93 :type device: torch.device, optional 94 :param dtype: target dtype for output (default: torch.float) 95 :type dtype: torch.dtype, optional 96 :return: Grid points for each dimension. Grid points are stored in a :obj:`torch.Tensor` with shape `grid_sizes[i]`. 97 :rtype: List[torch.Tensor] 98 """ 99 grid = [] 100 for i in range(len(grid_bounds)): 101 grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_sizes[i] - 2) 102 if extend: 103 proj = torch.linspace( 104 grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_sizes[i], device=device, dtype=dtype, 105 ) 106 else: 107 proj = torch.linspace(grid_bounds[i][0], grid_bounds[i][1], grid_sizes[i], device=device, dtype=dtype,) 108 grid.append(proj) 109 return grid 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gpytorch/utils/grid.py b/gpytorch/utils/grid.py --- a/gpytorch/utils/grid.py +++ b/gpytorch/utils/grid.py @@ -1,15 +1,62 @@ #!/usr/bin/env python3 import math +import warnings from typing import List, Tuple import torch -def scale_to_bounds(x, lower_bound, upper_bound): +class ScaleToBounds(torch.nn.Module): """ Scale the input data so that it lies in between the lower and upper bounds. + In training (`self.train()`), this module adjusts the scaling factor to the minibatch of data. + During evaluation (`self.eval()`), this module uses the scaling factor from the previous minibatch of data. + + :param float lower_bound: lower bound of scaled data + :param float upper_bound: upper bound of scaled data + + Example: + >>> train_x = torch.randn(10, 5) + >>> module = gpytorch.utils.grid.ScaleToBounds(lower_bound=-1., upper_bound=1.) + >>> + >>> module.train() + >>> scaled_train_x = module(train_x) # Data should be between -0.95 and 0.95 + >>> + >>> module.eval() + >>> test_x = torch.randn(10, 5) + >>> scaled_test_x = module(test_x) # Scaling is based on train_x + """ + + def __init__(self, lower_bound, upper_bound): + super().__init__() + self.lower_bound = float(lower_bound) + self.upper_bound = float(upper_bound) + self.register_buffer("min_val", torch.tensor(lower_bound)) + self.register_buffer("max_val", torch.tensor(upper_bound)) + + def forward(self, x): + if self.training: + min_val = x.min() + max_val = x.max() + self.min_val.data = min_val + self.max_val.data = max_val + else: + min_val = self.min_val + max_val = self.max_val + # Clamp extreme values + x = x.clamp(min_val, max_val) + + diff = max_val - min_val + x = (x - min_val) * (0.95 * (self.upper_bound - self.lower_bound) / diff) + 0.95 * self.lower_bound + return x + + +def scale_to_bounds(x, lower_bound, upper_bound): + """ + DEPRECATRED: Use :obj:`~gpytorch.utils.grid.ScaleToBounds` instead. + :param x: the input data :type x: torch.Tensor (... x n x d) :param float lower_bound: lower bound of scaled data @@ -17,6 +64,10 @@ :return: scaled data :rtype: torch.Tensor (... x n x d) """ + warnings.warn( + "The `scale_to_bounds` method is deprecated. Use the `gpytorch.utils.grid.ScaleToBounds` module instead.", + DeprecationWarning, + ) # Scale features so they fit inside grid bounds min_val = x.min() max_val = x.max()
{"golden_diff": "diff --git a/gpytorch/utils/grid.py b/gpytorch/utils/grid.py\n--- a/gpytorch/utils/grid.py\n+++ b/gpytorch/utils/grid.py\n@@ -1,15 +1,62 @@\n #!/usr/bin/env python3\n \n import math\n+import warnings\n from typing import List, Tuple\n \n import torch\n \n \n-def scale_to_bounds(x, lower_bound, upper_bound):\n+class ScaleToBounds(torch.nn.Module):\n \"\"\"\n Scale the input data so that it lies in between the lower and upper bounds.\n \n+ In training (`self.train()`), this module adjusts the scaling factor to the minibatch of data.\n+ During evaluation (`self.eval()`), this module uses the scaling factor from the previous minibatch of data.\n+\n+ :param float lower_bound: lower bound of scaled data\n+ :param float upper_bound: upper bound of scaled data\n+\n+ Example:\n+ >>> train_x = torch.randn(10, 5)\n+ >>> module = gpytorch.utils.grid.ScaleToBounds(lower_bound=-1., upper_bound=1.)\n+ >>>\n+ >>> module.train()\n+ >>> scaled_train_x = module(train_x) # Data should be between -0.95 and 0.95\n+ >>>\n+ >>> module.eval()\n+ >>> test_x = torch.randn(10, 5)\n+ >>> scaled_test_x = module(test_x) # Scaling is based on train_x\n+ \"\"\"\n+\n+ def __init__(self, lower_bound, upper_bound):\n+ super().__init__()\n+ self.lower_bound = float(lower_bound)\n+ self.upper_bound = float(upper_bound)\n+ self.register_buffer(\"min_val\", torch.tensor(lower_bound))\n+ self.register_buffer(\"max_val\", torch.tensor(upper_bound))\n+\n+ def forward(self, x):\n+ if self.training:\n+ min_val = x.min()\n+ max_val = x.max()\n+ self.min_val.data = min_val\n+ self.max_val.data = max_val\n+ else:\n+ min_val = self.min_val\n+ max_val = self.max_val\n+ # Clamp extreme values\n+ x = x.clamp(min_val, max_val)\n+\n+ diff = max_val - min_val\n+ x = (x - min_val) * (0.95 * (self.upper_bound - self.lower_bound) / diff) + 0.95 * self.lower_bound\n+ return x\n+\n+\n+def scale_to_bounds(x, lower_bound, upper_bound):\n+ \"\"\"\n+ DEPRECATRED: Use :obj:`~gpytorch.utils.grid.ScaleToBounds` instead.\n+\n :param x: the input data\n :type x: torch.Tensor (... x n x d)\n :param float lower_bound: lower bound of scaled data\n@@ -17,6 +64,10 @@\n :return: scaled data\n :rtype: torch.Tensor (... x n x d)\n \"\"\"\n+ warnings.warn(\n+ \"The `scale_to_bounds` method is deprecated. Use the `gpytorch.utils.grid.ScaleToBounds` module instead.\",\n+ DeprecationWarning,\n+ )\n # Scale features so they fit inside grid bounds\n min_val = x.min()\n max_val = x.max()\n", "issue": "[Bug] Performance varies according to the batch size\n# \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nThe accuracy of the classifier trained with the codes (https://github.com/cornellius-gp/gpytorch/tree/master/examples/06_PyTorch_NN_Integration_DKL) varies according to the batch size. It should be consistent regardless of the batch size. Am I missing something?\r\n\r\n## To reproduce\r\n\r\n** Code snippet to reproduce **\r\n```python\r\nfrom torch.optim import SGD, Adam\r\nfrom torch.optim.lr_scheduler import MultiStepLR\r\nimport torch.nn.functional as F\r\nfrom torch import nn\r\nimport torch\r\nimport os\r\nimport torchvision.datasets as dset\r\nimport torchvision.transforms as transforms\r\nimport gpytorch\r\nimport math\r\nimport tqdm\r\n\r\n\r\nnormalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])\r\naug_trans = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()]\r\ncommon_trans = [transforms.ToTensor(), normalize]\r\ntrain_compose = transforms.Compose(aug_trans + common_trans)\r\ntest_compose = transforms.Compose(common_trans)\r\n\r\n\r\ndataset = \"cifar10\"\r\n\r\n\r\nif ('CI' in os.environ): # this is for running the notebook in our testing framework\r\n train_set = torch.utils.data.TensorDataset(torch.randn(8, 3, 32, 32), torch.rand(8).round().long())\r\n test_set = torch.utils.data.TensorDataset(torch.randn(4, 3, 32, 32), torch.rand(4).round().long())\r\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=2, shuffle=False)\r\n num_classes = 2\r\nelif dataset == 'cifar10':\r\n train_set = dset.CIFAR10('data', train=True, transform=train_compose, download=True)\r\n test_set = dset.CIFAR10('data', train=False, transform=test_compose)\r\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=16, shuffle=False)\r\n num_classes = 10\r\nelif dataset == 'cifar100':\r\n train_set = dset.CIFAR100('data', train=True, transform=train_compose, download=True)\r\n test_set = dset.CIFAR100('data', train=False, transform=test_compose)\r\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True)\r\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False)\r\n num_classes = 100\r\nelse:\r\n raise RuntimeError('dataset must be one of \"cifar100\" or \"cifar10\"')\r\n\r\nfrom densenet import DenseNet\r\n\r\nclass DenseNetFeatureExtractor(DenseNet):\r\n def forward(self, x):\r\n features = self.features(x)\r\n out = F.relu(features, inplace=True)\r\n out = F.avg_pool2d(out, kernel_size=self.avgpool_size).view(features.size(0), -1)\r\n return out\r\n\r\nfeature_extractor = DenseNetFeatureExtractor(block_config=(6, 6, 6), num_classes=num_classes)\r\nnum_features = feature_extractor.classifier.in_features\r\n\r\nclass GaussianProcessLayer(gpytorch.models.ApproximateGP):\r\n def __init__(self, num_dim, grid_bounds=(-10., 10.), grid_size=64):\r\n variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(\r\n num_inducing_points=grid_size, batch_shape=torch.Size([num_dim])\r\n )\r\n\r\n # Our base variational strategy is a GridInterpolationVariationalStrategy,\r\n # which places variational inducing points on a Grid\r\n # We wrap it with a IndependentMultitaskVariationalStrategy so that our output is a vector-valued GP\r\n variational_strategy = gpytorch.variational.IndependentMultitaskVariationalStrategy(\r\n gpytorch.variational.GridInterpolationVariationalStrategy(\r\n self, grid_size=grid_size, grid_bounds=[grid_bounds],\r\n variational_distribution=variational_distribution,\r\n ), num_tasks=num_dim,\r\n )\r\n super().__init__(variational_strategy)\r\n\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.RBFKernel(\r\n lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(\r\n math.exp(-1), math.exp(1), sigma=0.1, transform=torch.exp\r\n )\r\n )\r\n )\r\n self.mean_module = gpytorch.means.ConstantMean()\r\n self.grid_bounds = grid_bounds\r\n\r\n def forward(self, x):\r\n mean = self.mean_module(x)\r\n covar = self.covar_module(x)\r\n return gpytorch.distributions.MultivariateNormal(mean, covar)\r\n\r\n\r\nclass DKLModel(gpytorch.Module):\r\n def __init__(self, feature_extractor, num_dim, grid_bounds=(-10., 10.)):\r\n super(DKLModel, self).__init__()\r\n self.feature_extractor = feature_extractor\r\n self.gp_layer = GaussianProcessLayer(num_dim=num_dim, grid_bounds=grid_bounds)\r\n self.grid_bounds = grid_bounds\r\n self.num_dim = num_dim\r\n\r\n def forward(self, x):\r\n features = self.feature_extractor(x)\r\n features = gpytorch.utils.grid.scale_to_bounds(features, self.grid_bounds[0], self.grid_bounds[1])\r\n # This next line makes it so that we learn a GP for each feature\r\n features = features.transpose(-1, -2).unsqueeze(-1)\r\n res = self.gp_layer(features)\r\n return res\r\n\r\nmodel = DKLModel(feature_extractor, num_dim=num_features)\r\nlikelihood = gpytorch.likelihoods.SoftmaxLikelihood(num_features=model.num_dim, num_classes=num_classes)\r\n\r\n\r\n# If you run this example without CUDA, I hope you like waiting!\r\nif torch.cuda.is_available():\r\n model = model.cuda()\r\n likelihood = likelihood.cuda()\r\n\r\n# load trained model\r\nstate_dict = torch.load('dkl_cifar_checkpoint.dat')\r\nmodel_state_dict = state_dict['model']\r\nlikelihood_state_dict = state_dict['likelihood']\r\n\r\nmodel.load_state_dict(model_state_dict)\r\nlikelihood.load_state_dict(likelihood_state_dict)\r\n\r\ndef test():\r\n model.eval()\r\n likelihood.eval()\r\n\r\n correct = 0\r\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(16):\r\n for data, target in test_loader:\r\n if torch.cuda.is_available():\r\n data, target = data.cuda(), target.cuda()\r\n output = likelihood(model(data)) # This gives us 16 samples from the predictive distribution\r\n pred = output.probs.mean(0).argmax(-1) # Taking the mean over all of the sample we've drawn\r\n correct += pred.eq(target.view_as(pred)).cpu().sum()\r\n print('Test set: Accuracy: {}/{} ({}%)'.format(\r\n correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset))\r\n ))\r\n\r\nwith gpytorch.settings.use_toeplitz(False):\r\n test()\r\n\r\n```\r\n\r\n## Expected Behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nWhen the batch size is set to 256, the accuracy on the test set is 90.9% as expected. However, if the batch size is set to 16, the accuracy drops to 74.8%.\r\n\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- GPyTorch Version: 1.3.0\r\n- PyTorch Version: 1.8.0.dev20201117\r\n- Ubuntu 18.04\r\n\r\n## Additional context\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport math\nfrom typing import List, Tuple\n\nimport torch\n\n\ndef scale_to_bounds(x, lower_bound, upper_bound):\n \"\"\"\n Scale the input data so that it lies in between the lower and upper bounds.\n\n :param x: the input data\n :type x: torch.Tensor (... x n x d)\n :param float lower_bound: lower bound of scaled data\n :param float upper_bound: upper bound of scaled data\n :return: scaled data\n :rtype: torch.Tensor (... x n x d)\n \"\"\"\n # Scale features so they fit inside grid bounds\n min_val = x.min()\n max_val = x.max()\n diff = max_val - min_val\n x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound\n return x\n\n\ndef choose_grid_size(train_inputs, ratio=1.0, kronecker_structure=True):\n \"\"\"\n Given some training inputs, determine a good grid size for KISS-GP.\n\n :param x: the input data\n :type x: torch.Tensor (... x n x d)\n :param ratio: Amount of grid points per data point (default: 1.)\n :type ratio: float, optional\n :param kronecker_structure: Whether or not the model will use Kronecker structure in the grid\n (set to True unless there is an additive or product decomposition in the prior)\n :type kronecker_structure: bool, optional\n :return: Grid size\n :rtype: int\n \"\"\"\n # Scale features so they fit inside grid bounds\n num_data = train_inputs.numel() if train_inputs.dim() == 1 else train_inputs.size(-2)\n num_dim = 1 if train_inputs.dim() == 1 else train_inputs.size(-1)\n if kronecker_structure:\n return int(ratio * math.pow(num_data, 1.0 / num_dim))\n else:\n return ratio * num_data\n\n\ndef convert_legacy_grid(grid: torch.Tensor) -> List[torch.Tensor]:\n return [grid[:, i] for i in range(grid.size(-1))]\n\n\ndef create_data_from_grid(grid: List[torch.Tensor]) -> torch.Tensor:\n \"\"\"\n :param grid: Each Tensor is a 1D set of increments for the grid in that dimension\n :type grid: List[torch.Tensor]\n :return: The set of points on the grid going by column-major order\n :rtype: torch.Tensor\n \"\"\"\n if torch.is_tensor(grid):\n grid = convert_legacy_grid(grid)\n ndims = len(grid)\n assert all(axis.dim() == 1 for axis in grid)\n projections = torch.meshgrid(*grid)\n grid_tensor = torch.stack(projections, axis=-1)\n # Note that if we did\n # grid_data = grid_tensor.reshape(-1, ndims)\n # instead, we would be iterating through the points of our grid from the\n # last data dimension to the first data dimension. However, due to legacy\n # reasons, we need to iterate from the first data dimension to the last data\n # dimension when creating grid_data\n grid_data = grid_tensor.permute(*(reversed(range(ndims + 1)))).reshape(ndims, -1).transpose(0, 1)\n return grid_data\n\n\ndef create_grid(\n grid_sizes: List[int], grid_bounds: List[Tuple[float, float]], extend: bool = True, device=\"cpu\", dtype=torch.float,\n) -> List[torch.Tensor]:\n \"\"\"\n Creates a grid represented by a list of 1D Tensors representing the\n projections of the grid into each dimension\n\n If `extend`, we extend the grid by two points past the specified boundary\n which can be important for getting good grid interpolations.\n\n :param grid_sizes: Sizes of each grid dimension\n :type grid_sizes: List[int]\n :param grid_bounds: Lower and upper bounds of each grid dimension\n :type grid_sizes: List[Tuple[float, float]]\n :param device: target device for output (default: cpu)\n :type device: torch.device, optional\n :param dtype: target dtype for output (default: torch.float)\n :type dtype: torch.dtype, optional\n :return: Grid points for each dimension. Grid points are stored in a :obj:`torch.Tensor` with shape `grid_sizes[i]`.\n :rtype: List[torch.Tensor]\n \"\"\"\n grid = []\n for i in range(len(grid_bounds)):\n grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_sizes[i] - 2)\n if extend:\n proj = torch.linspace(\n grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_sizes[i], device=device, dtype=dtype,\n )\n else:\n proj = torch.linspace(grid_bounds[i][0], grid_bounds[i][1], grid_sizes[i], device=device, dtype=dtype,)\n grid.append(proj)\n return grid\n", "path": "gpytorch/utils/grid.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport math\nimport warnings\nfrom typing import List, Tuple\n\nimport torch\n\n\nclass ScaleToBounds(torch.nn.Module):\n \"\"\"\n Scale the input data so that it lies in between the lower and upper bounds.\n\n In training (`self.train()`), this module adjusts the scaling factor to the minibatch of data.\n During evaluation (`self.eval()`), this module uses the scaling factor from the previous minibatch of data.\n\n :param float lower_bound: lower bound of scaled data\n :param float upper_bound: upper bound of scaled data\n\n Example:\n >>> train_x = torch.randn(10, 5)\n >>> module = gpytorch.utils.grid.ScaleToBounds(lower_bound=-1., upper_bound=1.)\n >>>\n >>> module.train()\n >>> scaled_train_x = module(train_x) # Data should be between -0.95 and 0.95\n >>>\n >>> module.eval()\n >>> test_x = torch.randn(10, 5)\n >>> scaled_test_x = module(test_x) # Scaling is based on train_x\n \"\"\"\n\n def __init__(self, lower_bound, upper_bound):\n super().__init__()\n self.lower_bound = float(lower_bound)\n self.upper_bound = float(upper_bound)\n self.register_buffer(\"min_val\", torch.tensor(lower_bound))\n self.register_buffer(\"max_val\", torch.tensor(upper_bound))\n\n def forward(self, x):\n if self.training:\n min_val = x.min()\n max_val = x.max()\n self.min_val.data = min_val\n self.max_val.data = max_val\n else:\n min_val = self.min_val\n max_val = self.max_val\n # Clamp extreme values\n x = x.clamp(min_val, max_val)\n\n diff = max_val - min_val\n x = (x - min_val) * (0.95 * (self.upper_bound - self.lower_bound) / diff) + 0.95 * self.lower_bound\n return x\n\n\ndef scale_to_bounds(x, lower_bound, upper_bound):\n \"\"\"\n DEPRECATRED: Use :obj:`~gpytorch.utils.grid.ScaleToBounds` instead.\n\n :param x: the input data\n :type x: torch.Tensor (... x n x d)\n :param float lower_bound: lower bound of scaled data\n :param float upper_bound: upper bound of scaled data\n :return: scaled data\n :rtype: torch.Tensor (... x n x d)\n \"\"\"\n warnings.warn(\n \"The `scale_to_bounds` method is deprecated. Use the `gpytorch.utils.grid.ScaleToBounds` module instead.\",\n DeprecationWarning,\n )\n # Scale features so they fit inside grid bounds\n min_val = x.min()\n max_val = x.max()\n diff = max_val - min_val\n x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound\n return x\n\n\ndef choose_grid_size(train_inputs, ratio=1.0, kronecker_structure=True):\n \"\"\"\n Given some training inputs, determine a good grid size for KISS-GP.\n\n :param x: the input data\n :type x: torch.Tensor (... x n x d)\n :param ratio: Amount of grid points per data point (default: 1.)\n :type ratio: float, optional\n :param kronecker_structure: Whether or not the model will use Kronecker structure in the grid\n (set to True unless there is an additive or product decomposition in the prior)\n :type kronecker_structure: bool, optional\n :return: Grid size\n :rtype: int\n \"\"\"\n # Scale features so they fit inside grid bounds\n num_data = train_inputs.numel() if train_inputs.dim() == 1 else train_inputs.size(-2)\n num_dim = 1 if train_inputs.dim() == 1 else train_inputs.size(-1)\n if kronecker_structure:\n return int(ratio * math.pow(num_data, 1.0 / num_dim))\n else:\n return ratio * num_data\n\n\ndef convert_legacy_grid(grid: torch.Tensor) -> List[torch.Tensor]:\n return [grid[:, i] for i in range(grid.size(-1))]\n\n\ndef create_data_from_grid(grid: List[torch.Tensor]) -> torch.Tensor:\n \"\"\"\n :param grid: Each Tensor is a 1D set of increments for the grid in that dimension\n :type grid: List[torch.Tensor]\n :return: The set of points on the grid going by column-major order\n :rtype: torch.Tensor\n \"\"\"\n if torch.is_tensor(grid):\n grid = convert_legacy_grid(grid)\n ndims = len(grid)\n assert all(axis.dim() == 1 for axis in grid)\n projections = torch.meshgrid(*grid)\n grid_tensor = torch.stack(projections, axis=-1)\n # Note that if we did\n # grid_data = grid_tensor.reshape(-1, ndims)\n # instead, we would be iterating through the points of our grid from the\n # last data dimension to the first data dimension. However, due to legacy\n # reasons, we need to iterate from the first data dimension to the last data\n # dimension when creating grid_data\n grid_data = grid_tensor.permute(*(reversed(range(ndims + 1)))).reshape(ndims, -1).transpose(0, 1)\n return grid_data\n\n\ndef create_grid(\n grid_sizes: List[int], grid_bounds: List[Tuple[float, float]], extend: bool = True, device=\"cpu\", dtype=torch.float,\n) -> List[torch.Tensor]:\n \"\"\"\n Creates a grid represented by a list of 1D Tensors representing the\n projections of the grid into each dimension\n\n If `extend`, we extend the grid by two points past the specified boundary\n which can be important for getting good grid interpolations.\n\n :param grid_sizes: Sizes of each grid dimension\n :type grid_sizes: List[int]\n :param grid_bounds: Lower and upper bounds of each grid dimension\n :type grid_sizes: List[Tuple[float, float]]\n :param device: target device for output (default: cpu)\n :type device: torch.device, optional\n :param dtype: target dtype for output (default: torch.float)\n :type dtype: torch.dtype, optional\n :return: Grid points for each dimension. Grid points are stored in a :obj:`torch.Tensor` with shape `grid_sizes[i]`.\n :rtype: List[torch.Tensor]\n \"\"\"\n grid = []\n for i in range(len(grid_bounds)):\n grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_sizes[i] - 2)\n if extend:\n proj = torch.linspace(\n grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_sizes[i], device=device, dtype=dtype,\n )\n else:\n proj = torch.linspace(grid_bounds[i][0], grid_bounds[i][1], grid_sizes[i], device=device, dtype=dtype,)\n grid.append(proj)\n return grid\n", "path": "gpytorch/utils/grid.py"}]}
3,310
713
gh_patches_debug_30142
rasdani/github-patches
git_diff
helmholtz-analytics__heat-876
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clean up Lasso file, make examples work --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/lasso/demo.py` Content: ``` 1 import numpy as np 2 import torch 3 import sys 4 5 sys.path.append("../../") 6 7 import heat as ht 8 from matplotlib import pyplot as plt 9 from sklearn import datasets 10 import heat.regression.lasso as lasso 11 import plotfkt 12 13 # read scikit diabetes data set 14 diabetes = datasets.load_diabetes() 15 16 # load diabetes dataset from hdf5 file 17 X = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="x", split=0) 18 y = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="y", split=0) 19 20 # normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved 21 X = X / ht.sqrt((ht.mean(X ** 2, axis=0))) 22 23 # HeAT lasso instance 24 estimator = lasso.Lasso(max_iter=100) 25 26 # List lasso model parameters 27 theta_list = list() 28 29 # Range of lambda values 30 lamda = np.logspace(0, 4, 10) / 10 31 32 # compute the lasso path 33 for la in lamda: 34 estimator.lam = la 35 estimator.fit(X, y) 36 theta_list.append(estimator.theta.numpy().flatten()) 37 38 # Stack estimated model parameters into one numpy array 39 theta_lasso = np.stack(theta_list).T 40 41 # Stack into numpy array 42 theta_lasso = np.stack(theta_list).T[1:, :] 43 44 # plot lasso paths 45 plotfkt.plot_lasso_path( 46 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - HeAT implementation" 47 ) 48 49 if X.is_distributed(): 50 distributed = X.comm.rank 51 else: 52 distributed = False 53 54 # plot only with first rank 55 if distributed is False: 56 plt.show() 57 elif distributed == 0: 58 plt.show() 59 ``` Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 import codecs 3 4 5 with codecs.open("README.md", "r", "utf-8") as handle: 6 long_description = handle.read() 7 8 __version__ = None # appeases flake, assignment in exec() below 9 with open("./heat/core/version.py") as handle: 10 exec(handle.read()) 11 12 setup( 13 name="heat", 14 packages=find_packages(exclude=("*tests*", "*benchmarks*")), 15 data_files=["README.md", "LICENSE"], 16 version=__version__, 17 description="A framework for high-performance data analytics and machine learning.", 18 long_description=long_description, 19 long_description_content_type="text/markdown", 20 author="Helmholtz Association", 21 author_email="[email protected]", 22 url="https://github.com/helmholtz-analytics/heat", 23 keywords=["data", "analytics", "tensors", "distributed", "gpu"], 24 python_requires=">=3.7", 25 classifiers=[ 26 "Development Status :: 4 - Beta", 27 "Programming Language :: Python :: 3.7", 28 "Programming Language :: Python :: 3.8", 29 "License :: OSI Approved :: MIT License", 30 "Intended Audience :: Science/Research", 31 "Topic :: Scientific/Engineering", 32 ], 33 install_requires=[ 34 "mpi4py>=3.0.0", 35 "numpy>=1.13.0", 36 "torch>=1.7.0", 37 "scipy>=0.14.0", 38 "pillow>=6.0.0", 39 "torchvision>=0.8.0", 40 ], 41 extras_require={ 42 "docutils": ["docutils>=0.16"], 43 "hdf5": ["h5py>=2.8.0"], 44 "netcdf": ["netCDF4>=1.5.6"], 45 "dev": ["pre-commit>=1.18.3"], 46 }, 47 ) 48 ``` Path: `examples/classification/demo_knn.py` Content: ``` 1 import sys 2 import os 3 import random 4 5 # Fix python path if run from terminal 6 curdir = os.path.dirname(os.path.abspath(__file__)) 7 sys.path.insert(0, os.path.abspath(os.path.join(curdir, "../../"))) 8 9 import heat as ht 10 from heat.classification.kneighborsclassifier import KNeighborsClassifier 11 12 # Load dataset from hdf5 file 13 X = ht.load_hdf5("../../heat/datasets/data/iris.h5", dataset="data", split=0) 14 15 # Generate keys for the iris.h5 dataset 16 keys = [] 17 for i in range(50): 18 keys.append(0) 19 for i in range(50, 100): 20 keys.append(1) 21 for i in range(100, 150): 22 keys.append(2) 23 Y = ht.array(keys, split=0) 24 25 26 def calculate_accuracy(new_y, verification_y): 27 """ 28 Calculates the accuracy of classification/clustering-algorithms. 29 Note this only works with integer/discrete classes. For algorithms that give approximations an error function is 30 required. 31 32 Parameters 33 ---------- 34 new_y : ht.tensor of shape (n_samples, n_features), required 35 The new labels that where generated 36 verification_y : ht.tensor of shape (n_samples, n_features), required 37 Known labels 38 39 Returns 40 ---------- 41 float 42 the accuracy, number of properly labeled samples divided by amount of labels. 43 """ 44 45 if new_y.gshape != verification_y.gshape: 46 raise ValueError( 47 "Expecting results of same length, got {}, {}".format( 48 new_y.gshape, verification_y.gshape 49 ) 50 ) 51 52 count = ht.sum(ht.where(new_y == verification_y, 1, 0)) 53 54 return count / new_y.gshape[0] 55 56 57 def create_fold(dataset_x, dataset_y, size, seed=None): 58 """ 59 Randomly splits the dataset into two parts for cross-validation. 60 61 Parameters 62 ---------- 63 dataset_x : ht.DNDarray 64 data vectors, required 65 dataset_y : ht.DNDarray 66 labels for dataset_x, required 67 size : int 68 the size of the split to create 69 seed: int, optional 70 seed for the random generator, allows deterministic testing 71 72 Returns 73 ---------- 74 fold_x : ht.DNDarray 75 DNDarray of shape (size,) containing data vectors from dataset_x 76 fold_y : ht.DNDarray 77 DNDarray of shape(size,) containing labels from dataset_y 78 verification_x : ht.DNDarray 79 DNDarray of shape(len(dataset_x - size),) containing all items from dataset_x not in fold_x 80 verification_y : ht.DNDarray 81 DNDarray of shape(len(dataset_y - size),) containing all items from dataset_y not in fold_y 82 """ 83 assert len(dataset_y) == len(dataset_x) 84 assert size < len(dataset_x) 85 86 data_length = len(dataset_x) 87 88 if seed: 89 random.seed(seed) 90 indices = [i for i in range(data_length)] 91 random.shuffle(indices) 92 93 data_indices = ht.array(indices[0:size], split=0) 94 verification_indices = ht.array(indices[size:], split=0) 95 96 fold_x = ht.array(dataset_x[data_indices], is_split=0) 97 fold_y = ht.array(dataset_y[data_indices], is_split=0) 98 verification_y = ht.array(dataset_y[verification_indices], is_split=0) 99 verification_x = ht.array(dataset_x[verification_indices], is_split=0) 100 101 # Balance arrays 102 fold_x.balance_() 103 fold_y.balance_() 104 verification_y.balance_() 105 verification_x.balance_() 106 107 return fold_x, fold_y, verification_x, verification_y 108 109 110 def verify_algorithm(x, y, split_number, split_size, k, seed=None): 111 """ 112 Parameters 113 ---------- 114 x : ht.DNDarray 115 array containing data vectors 116 y : ht.DNDarray 117 array containing the labels for x (must be in same order) 118 split_number: int 119 the number of test iterations 120 split_size : int 121 the number of vectors used by the KNeighborsClassifier-Algorithm 122 k : int 123 The number of neighbours for KNeighborsClassifier-Algorithm 124 seed : int 125 Seed for the random generator used in creating folds. Used for deterministic testing purposes. 126 Returns 127 ------- 128 accuracies : ht.DNDarray 129 array of shape (split_number,) containing the accuracy per run 130 """ 131 assert len(x) == len(y) 132 assert split_size < len(x) 133 assert k < len(x) 134 135 accuracies = [] 136 137 for split_index in range(split_number): 138 fold_x, fold_y, verification_x, verification_y = create_fold(x, y, split_size, seed) 139 classifier = KNeighborsClassifier(fold_x, fold_y, k) 140 result_y = classifier.predict(verification_x) 141 accuracies.append(calculate_accuracy(result_y, verification_y).item()) 142 return accuracies 143 144 145 print(verify_algorithm(X, Y, 1, 30, 5, 1)) 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/classification/demo_knn.py b/examples/classification/demo_knn.py --- a/examples/classification/demo_knn.py +++ b/examples/classification/demo_knn.py @@ -10,7 +10,7 @@ from heat.classification.kneighborsclassifier import KNeighborsClassifier # Load dataset from hdf5 file -X = ht.load_hdf5("../../heat/datasets/data/iris.h5", dataset="data", split=0) +X = ht.load_hdf5("../../heat/datasets/iris.h5", dataset="data", split=0) # Generate keys for the iris.h5 dataset keys = [] diff --git a/examples/lasso/demo.py b/examples/lasso/demo.py --- a/examples/lasso/demo.py +++ b/examples/lasso/demo.py @@ -1,8 +1,11 @@ import numpy as np import torch import sys +import os -sys.path.append("../../") +# Fix python path if run from terminal +curdir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, os.path.abspath(os.path.join(curdir, "../../"))) import heat as ht from matplotlib import pyplot as plt @@ -14,8 +17,8 @@ diabetes = datasets.load_diabetes() # load diabetes dataset from hdf5 file -X = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="x", split=0) -y = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="y", split=0) +X = ht.load_hdf5("../../heat/datasets/diabetes.h5", dataset="x", split=0) +y = ht.load_hdf5("../../heat/datasets/diabetes.h5", dataset="y", split=0) # normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved X = X / ht.sqrt((ht.mean(X ** 2, axis=0))) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,5 +43,6 @@ "hdf5": ["h5py>=2.8.0"], "netcdf": ["netCDF4>=1.5.6"], "dev": ["pre-commit>=1.18.3"], + "examples": ["scikit-learn>=0.24.0", "matplotlib>=3.1.0"], }, )
{"golden_diff": "diff --git a/examples/classification/demo_knn.py b/examples/classification/demo_knn.py\n--- a/examples/classification/demo_knn.py\n+++ b/examples/classification/demo_knn.py\n@@ -10,7 +10,7 @@\n from heat.classification.kneighborsclassifier import KNeighborsClassifier\n \n # Load dataset from hdf5 file\n-X = ht.load_hdf5(\"../../heat/datasets/data/iris.h5\", dataset=\"data\", split=0)\n+X = ht.load_hdf5(\"../../heat/datasets/iris.h5\", dataset=\"data\", split=0)\n \n # Generate keys for the iris.h5 dataset\n keys = []\ndiff --git a/examples/lasso/demo.py b/examples/lasso/demo.py\n--- a/examples/lasso/demo.py\n+++ b/examples/lasso/demo.py\n@@ -1,8 +1,11 @@\n import numpy as np\n import torch\n import sys\n+import os\n \n-sys.path.append(\"../../\")\n+# Fix python path if run from terminal\n+curdir = os.path.dirname(os.path.abspath(__file__))\n+sys.path.insert(0, os.path.abspath(os.path.join(curdir, \"../../\")))\n \n import heat as ht\n from matplotlib import pyplot as plt\n@@ -14,8 +17,8 @@\n diabetes = datasets.load_diabetes()\n \n # load diabetes dataset from hdf5 file\n-X = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"x\", split=0)\n-y = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"y\", split=0)\n+X = ht.load_hdf5(\"../../heat/datasets/diabetes.h5\", dataset=\"x\", split=0)\n+y = ht.load_hdf5(\"../../heat/datasets/diabetes.h5\", dataset=\"y\", split=0)\n \n # normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\n X = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,5 +43,6 @@\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.5.6\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n+ \"examples\": [\"scikit-learn>=0.24.0\", \"matplotlib>=3.1.0\"],\n },\n )\n", "issue": "Clean up Lasso file, make examples work\n\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\nimport sys\n\nsys.path.append(\"../../\")\n\nimport heat as ht\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport heat.regression.lasso as lasso\nimport plotfkt\n\n# read scikit diabetes data set\ndiabetes = datasets.load_diabetes()\n\n# load diabetes dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"x\", split=0)\ny = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"y\", split=0)\n\n# normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\nX = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\n\n# HeAT lasso instance\nestimator = lasso.Lasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor la in lamda:\n estimator.lam = la\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - HeAT implementation\"\n)\n\nif X.is_distributed():\n distributed = X.comm.rank\nelse:\n distributed = False\n\n# plot only with first rank\nif distributed is False:\n plt.show()\nelif distributed == 0:\n plt.show()\n", "path": "examples/lasso/demo.py"}, {"content": "from setuptools import setup, find_packages\nimport codecs\n\n\nwith codecs.open(\"README.md\", \"r\", \"utf-8\") as handle:\n long_description = handle.read()\n\n__version__ = None # appeases flake, assignment in exec() below\nwith open(\"./heat/core/version.py\") as handle:\n exec(handle.read())\n\nsetup(\n name=\"heat\",\n packages=find_packages(exclude=(\"*tests*\", \"*benchmarks*\")),\n data_files=[\"README.md\", \"LICENSE\"],\n version=__version__,\n description=\"A framework for high-performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\">=3.7\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\n \"mpi4py>=3.0.0\",\n \"numpy>=1.13.0\",\n \"torch>=1.7.0\",\n \"scipy>=0.14.0\",\n \"pillow>=6.0.0\",\n \"torchvision>=0.8.0\",\n ],\n extras_require={\n \"docutils\": [\"docutils>=0.16\"],\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.5.6\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n },\n)\n", "path": "setup.py"}, {"content": "import sys\nimport os\nimport random\n\n# Fix python path if run from terminal\ncurdir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.abspath(os.path.join(curdir, \"../../\")))\n\nimport heat as ht\nfrom heat.classification.kneighborsclassifier import KNeighborsClassifier\n\n# Load dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/data/iris.h5\", dataset=\"data\", split=0)\n\n# Generate keys for the iris.h5 dataset\nkeys = []\nfor i in range(50):\n keys.append(0)\nfor i in range(50, 100):\n keys.append(1)\nfor i in range(100, 150):\n keys.append(2)\nY = ht.array(keys, split=0)\n\n\ndef calculate_accuracy(new_y, verification_y):\n \"\"\"\n Calculates the accuracy of classification/clustering-algorithms.\n Note this only works with integer/discrete classes. For algorithms that give approximations an error function is\n required.\n\n Parameters\n ----------\n new_y : ht.tensor of shape (n_samples, n_features), required\n The new labels that where generated\n verification_y : ht.tensor of shape (n_samples, n_features), required\n Known labels\n\n Returns\n ----------\n float\n the accuracy, number of properly labeled samples divided by amount of labels.\n \"\"\"\n\n if new_y.gshape != verification_y.gshape:\n raise ValueError(\n \"Expecting results of same length, got {}, {}\".format(\n new_y.gshape, verification_y.gshape\n )\n )\n\n count = ht.sum(ht.where(new_y == verification_y, 1, 0))\n\n return count / new_y.gshape[0]\n\n\ndef create_fold(dataset_x, dataset_y, size, seed=None):\n \"\"\"\n Randomly splits the dataset into two parts for cross-validation.\n\n Parameters\n ----------\n dataset_x : ht.DNDarray\n data vectors, required\n dataset_y : ht.DNDarray\n labels for dataset_x, required\n size : int\n the size of the split to create\n seed: int, optional\n seed for the random generator, allows deterministic testing\n\n Returns\n ----------\n fold_x : ht.DNDarray\n DNDarray of shape (size,) containing data vectors from dataset_x\n fold_y : ht.DNDarray\n DNDarray of shape(size,) containing labels from dataset_y\n verification_x : ht.DNDarray\n DNDarray of shape(len(dataset_x - size),) containing all items from dataset_x not in fold_x\n verification_y : ht.DNDarray\n DNDarray of shape(len(dataset_y - size),) containing all items from dataset_y not in fold_y\n \"\"\"\n assert len(dataset_y) == len(dataset_x)\n assert size < len(dataset_x)\n\n data_length = len(dataset_x)\n\n if seed:\n random.seed(seed)\n indices = [i for i in range(data_length)]\n random.shuffle(indices)\n\n data_indices = ht.array(indices[0:size], split=0)\n verification_indices = ht.array(indices[size:], split=0)\n\n fold_x = ht.array(dataset_x[data_indices], is_split=0)\n fold_y = ht.array(dataset_y[data_indices], is_split=0)\n verification_y = ht.array(dataset_y[verification_indices], is_split=0)\n verification_x = ht.array(dataset_x[verification_indices], is_split=0)\n\n # Balance arrays\n fold_x.balance_()\n fold_y.balance_()\n verification_y.balance_()\n verification_x.balance_()\n\n return fold_x, fold_y, verification_x, verification_y\n\n\ndef verify_algorithm(x, y, split_number, split_size, k, seed=None):\n \"\"\"\n Parameters\n ----------\n x : ht.DNDarray\n array containing data vectors\n y : ht.DNDarray\n array containing the labels for x (must be in same order)\n split_number: int\n the number of test iterations\n split_size : int\n the number of vectors used by the KNeighborsClassifier-Algorithm\n k : int\n The number of neighbours for KNeighborsClassifier-Algorithm\n seed : int\n Seed for the random generator used in creating folds. Used for deterministic testing purposes.\n Returns\n -------\n accuracies : ht.DNDarray\n array of shape (split_number,) containing the accuracy per run\n \"\"\"\n assert len(x) == len(y)\n assert split_size < len(x)\n assert k < len(x)\n\n accuracies = []\n\n for split_index in range(split_number):\n fold_x, fold_y, verification_x, verification_y = create_fold(x, y, split_size, seed)\n classifier = KNeighborsClassifier(fold_x, fold_y, k)\n result_y = classifier.predict(verification_x)\n accuracies.append(calculate_accuracy(result_y, verification_y).item())\n return accuracies\n\n\nprint(verify_algorithm(X, Y, 1, 30, 5, 1))\n", "path": "examples/classification/demo_knn.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nimport sys\nimport os\n\n# Fix python path if run from terminal\ncurdir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.abspath(os.path.join(curdir, \"../../\")))\n\nimport heat as ht\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport heat.regression.lasso as lasso\nimport plotfkt\n\n# read scikit diabetes data set\ndiabetes = datasets.load_diabetes()\n\n# load diabetes dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/diabetes.h5\", dataset=\"x\", split=0)\ny = ht.load_hdf5(\"../../heat/datasets/diabetes.h5\", dataset=\"y\", split=0)\n\n# normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\nX = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\n\n# HeAT lasso instance\nestimator = lasso.Lasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor la in lamda:\n estimator.lam = la\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - HeAT implementation\"\n)\n\nif X.is_distributed():\n distributed = X.comm.rank\nelse:\n distributed = False\n\n# plot only with first rank\nif distributed is False:\n plt.show()\nelif distributed == 0:\n plt.show()\n", "path": "examples/lasso/demo.py"}, {"content": "from setuptools import setup, find_packages\nimport codecs\n\n\nwith codecs.open(\"README.md\", \"r\", \"utf-8\") as handle:\n long_description = handle.read()\n\n__version__ = None # appeases flake, assignment in exec() below\nwith open(\"./heat/core/version.py\") as handle:\n exec(handle.read())\n\nsetup(\n name=\"heat\",\n packages=find_packages(exclude=(\"*tests*\", \"*benchmarks*\")),\n data_files=[\"README.md\", \"LICENSE\"],\n version=__version__,\n description=\"A framework for high-performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\">=3.7\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\n \"mpi4py>=3.0.0\",\n \"numpy>=1.13.0\",\n \"torch>=1.7.0\",\n \"scipy>=0.14.0\",\n \"pillow>=6.0.0\",\n \"torchvision>=0.8.0\",\n ],\n extras_require={\n \"docutils\": [\"docutils>=0.16\"],\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.5.6\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n \"examples\": [\"scikit-learn>=0.24.0\", \"matplotlib>=3.1.0\"],\n },\n)\n", "path": "setup.py"}, {"content": "import sys\nimport os\nimport random\n\n# Fix python path if run from terminal\ncurdir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.abspath(os.path.join(curdir, \"../../\")))\n\nimport heat as ht\nfrom heat.classification.kneighborsclassifier import KNeighborsClassifier\n\n# Load dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/iris.h5\", dataset=\"data\", split=0)\n\n# Generate keys for the iris.h5 dataset\nkeys = []\nfor i in range(50):\n keys.append(0)\nfor i in range(50, 100):\n keys.append(1)\nfor i in range(100, 150):\n keys.append(2)\nY = ht.array(keys, split=0)\n\n\ndef calculate_accuracy(new_y, verification_y):\n \"\"\"\n Calculates the accuracy of classification/clustering-algorithms.\n Note this only works with integer/discrete classes. For algorithms that give approximations an error function is\n required.\n\n Parameters\n ----------\n new_y : ht.tensor of shape (n_samples, n_features), required\n The new labels that where generated\n verification_y : ht.tensor of shape (n_samples, n_features), required\n Known labels\n\n Returns\n ----------\n float\n the accuracy, number of properly labeled samples divided by amount of labels.\n \"\"\"\n\n if new_y.gshape != verification_y.gshape:\n raise ValueError(\n \"Expecting results of same length, got {}, {}\".format(\n new_y.gshape, verification_y.gshape\n )\n )\n\n count = ht.sum(ht.where(new_y == verification_y, 1, 0))\n\n return count / new_y.gshape[0]\n\n\ndef create_fold(dataset_x, dataset_y, size, seed=None):\n \"\"\"\n Randomly splits the dataset into two parts for cross-validation.\n\n Parameters\n ----------\n dataset_x : ht.DNDarray\n data vectors, required\n dataset_y : ht.DNDarray\n labels for dataset_x, required\n size : int\n the size of the split to create\n seed: int, optional\n seed for the random generator, allows deterministic testing\n\n Returns\n ----------\n fold_x : ht.DNDarray\n DNDarray of shape (size,) containing data vectors from dataset_x\n fold_y : ht.DNDarray\n DNDarray of shape(size,) containing labels from dataset_y\n verification_x : ht.DNDarray\n DNDarray of shape(len(dataset_x - size),) containing all items from dataset_x not in fold_x\n verification_y : ht.DNDarray\n DNDarray of shape(len(dataset_y - size),) containing all items from dataset_y not in fold_y\n \"\"\"\n assert len(dataset_y) == len(dataset_x)\n assert size < len(dataset_x)\n\n data_length = len(dataset_x)\n\n if seed:\n random.seed(seed)\n indices = [i for i in range(data_length)]\n random.shuffle(indices)\n\n data_indices = ht.array(indices[0:size], split=0)\n verification_indices = ht.array(indices[size:], split=0)\n\n fold_x = ht.array(dataset_x[data_indices], is_split=0)\n fold_y = ht.array(dataset_y[data_indices], is_split=0)\n verification_y = ht.array(dataset_y[verification_indices], is_split=0)\n verification_x = ht.array(dataset_x[verification_indices], is_split=0)\n\n # Balance arrays\n fold_x.balance_()\n fold_y.balance_()\n verification_y.balance_()\n verification_x.balance_()\n\n return fold_x, fold_y, verification_x, verification_y\n\n\ndef verify_algorithm(x, y, split_number, split_size, k, seed=None):\n \"\"\"\n Parameters\n ----------\n x : ht.DNDarray\n array containing data vectors\n y : ht.DNDarray\n array containing the labels for x (must be in same order)\n split_number: int\n the number of test iterations\n split_size : int\n the number of vectors used by the KNeighborsClassifier-Algorithm\n k : int\n The number of neighbours for KNeighborsClassifier-Algorithm\n seed : int\n Seed for the random generator used in creating folds. Used for deterministic testing purposes.\n Returns\n -------\n accuracies : ht.DNDarray\n array of shape (split_number,) containing the accuracy per run\n \"\"\"\n assert len(x) == len(y)\n assert split_size < len(x)\n assert k < len(x)\n\n accuracies = []\n\n for split_index in range(split_number):\n fold_x, fold_y, verification_x, verification_y = create_fold(x, y, split_size, seed)\n classifier = KNeighborsClassifier(fold_x, fold_y, k)\n result_y = classifier.predict(verification_x)\n accuracies.append(calculate_accuracy(result_y, verification_y).item())\n return accuracies\n\n\nprint(verify_algorithm(X, Y, 1, 30, 5, 1))\n", "path": "examples/classification/demo_knn.py"}]}
2,748
542
gh_patches_debug_22857
rasdani/github-patches
git_diff
nilearn__nilearn-1802
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Super verbose CI logs The CI logs are brimming with noise, making it a chore to debug a real problem. Moreover TravisCI has a log length beyond which it terminates the testing and we have started bumping against it. We should systematically check all the warnings being generated and update the codebase so they are no longer generated. This should be made a priority, these spurious failures are costing time and enthusiasm. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/02_decoding/plot_haxby_different_estimators.py` Content: ``` 1 """ 2 Different classifiers in decoding the Haxby dataset 3 ===================================================== 4 5 Here we compare different classifiers on a visual object recognition 6 decoding task. 7 """ 8 9 ############################################################################# 10 # We start by loading the data and applying simple transformations to it 11 # ----------------------------------------------------------------------- 12 13 # Fetch data using nilearn dataset fetcher 14 from nilearn import datasets 15 # by default 2nd subject data will be fetched 16 haxby_dataset = datasets.fetch_haxby() 17 18 # print basic information on the dataset 19 print('First subject anatomical nifti image (3D) located is at: %s' % 20 haxby_dataset.anat[0]) 21 print('First subject functional nifti image (4D) is located at: %s' % 22 haxby_dataset.func[0]) 23 24 # load labels 25 import numpy as np 26 import pandas as pd 27 labels = pd.read_csv(haxby_dataset.session_target[0], sep=" ") 28 stimuli = labels['labels'] 29 # identify resting state labels in order to be able to remove them 30 task_mask = (stimuli != 'rest') 31 32 # find names of remaining active labels 33 categories = stimuli[task_mask].unique() 34 35 # extract tags indicating to which acquisition run a tag belongs 36 session_labels = labels['chunks'][task_mask] 37 38 # Load the fMRI data 39 from nilearn.input_data import NiftiMasker 40 41 # For decoding, standardizing is often very important 42 mask_filename = haxby_dataset.mask_vt[0] 43 masker = NiftiMasker(mask_img=mask_filename, standardize=True) 44 func_filename = haxby_dataset.func[0] 45 masked_timecourses = masker.fit_transform( 46 func_filename)[task_mask] 47 48 49 ############################################################################# 50 # Then we define the various classifiers that we use 51 # --------------------------------------------------- 52 # A support vector classifier 53 from sklearn.svm import SVC 54 svm = SVC(C=1., kernel="linear") 55 56 # The logistic regression 57 from sklearn.linear_model import (LogisticRegression, 58 RidgeClassifier, 59 RidgeClassifierCV, 60 ) 61 logistic = LogisticRegression(C=1., penalty="l1", solver='liblinear') 62 logistic_50 = LogisticRegression(C=50., penalty="l1", solver='liblinear') 63 logistic_l2 = LogisticRegression(C=1., penalty="l2", solver='liblinear') 64 65 # Cross-validated versions of these classifiers 66 from sklearn.model_selection import GridSearchCV 67 # GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that 68 # can significantly speed up the fitting process on computers with 69 # multiple cores 70 svm_cv = GridSearchCV(SVC(C=1., kernel="linear"), 71 param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, 72 scoring='f1', n_jobs=1, cv=3, iid=False) 73 74 logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"), 75 param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, 76 scoring='f1', cv=3, iid=False) 77 logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l2"), 78 param_grid={ 79 'C': [.1, .5, 1., 5., 10., 50., 100.] 80 }, 81 scoring='f1', cv=3, iid=False) 82 83 # The ridge classifier has a specific 'CV' object that can set it's 84 # parameters faster than using a GridSearchCV 85 ridge = RidgeClassifier() 86 ridge_cv = RidgeClassifierCV() 87 88 # A dictionary, to hold all our classifiers 89 classifiers = {'SVC': svm, 90 'SVC cv': svm_cv, 91 'log l1': logistic, 92 'log l1 50': logistic_50, 93 'log l1 cv': logistic_cv, 94 'log l2': logistic_l2, 95 'log l2 cv': logistic_l2_cv, 96 'ridge': ridge, 97 'ridge cv': ridge_cv 98 } 99 100 ############################################################################# 101 # Here we compute prediction scores 102 # ---------------------------------- 103 # Run time for all these classifiers 104 105 # Make a data splitting object for cross validation 106 from sklearn.model_selection import LeaveOneGroupOut, cross_val_score 107 cv = LeaveOneGroupOut() 108 109 import time 110 111 classifiers_scores = {} 112 113 for classifier_name, classifier in sorted(classifiers.items()): 114 classifiers_scores[classifier_name] = {} 115 print(70 * '_') 116 117 for category in categories: 118 classification_target = stimuli[task_mask].isin([category]) 119 t0 = time.time() 120 classifiers_scores[classifier_name][category] = cross_val_score( 121 classifier, 122 masked_timecourses, 123 classification_target, 124 cv=cv, 125 groups=session_labels, 126 scoring="f1", 127 ) 128 129 print( 130 "%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % 131 ( 132 classifier_name, 133 category, 134 classifiers_scores[classifier_name][category].mean(), 135 classifiers_scores[classifier_name][category].std(), 136 time.time() - t0, 137 ), 138 ) 139 140 ############################################################################### 141 # Then we make a rudimentary diagram 142 import matplotlib.pyplot as plt 143 plt.figure() 144 145 tick_position = np.arange(len(categories)) 146 plt.xticks(tick_position, categories, rotation=45) 147 148 for color, classifier_name in zip( 149 ['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'], 150 sorted(classifiers)): 151 score_means = [classifiers_scores[classifier_name][category].mean() 152 for category in categories] 153 plt.bar(tick_position, score_means, label=classifier_name, 154 width=.11, color=color) 155 tick_position = tick_position + .09 156 157 plt.ylabel('Classification accurancy (f1 score)') 158 plt.xlabel('Visual stimuli category') 159 plt.ylim(ymin=0) 160 plt.legend(loc='lower center', ncol=3) 161 plt.title( 162 'Category-specific classification accuracy for different classifiers') 163 plt.tight_layout() 164 165 ############################################################################### 166 # Finally, w plot the face vs house map for the different classifiers 167 168 # Use the average EPI as a background 169 from nilearn import image 170 mean_epi_img = image.mean_img(func_filename) 171 172 # Restrict the decoding to face vs house 173 condition_mask = stimuli.isin(['face', 'house']) 174 masked_timecourses = masked_timecourses[ 175 condition_mask[task_mask]] 176 stimuli = (stimuli[condition_mask] == 'face') 177 # Transform the stimuli to binary values 178 stimuli.astype(np.int) 179 180 from nilearn.plotting import plot_stat_map, show 181 182 for classifier_name, classifier in sorted(classifiers.items()): 183 classifier.fit(masked_timecourses, stimuli) 184 185 if hasattr(classifier, 'coef_'): 186 weights = classifier.coef_[0] 187 elif hasattr(classifier, 'best_estimator_'): 188 weights = classifier.best_estimator_.coef_[0] 189 else: 190 continue 191 weight_img = masker.inverse_transform(weights) 192 weight_map = weight_img.get_data() 193 threshold = np.max(np.abs(weight_map)) * 1e-3 194 plot_stat_map(weight_img, bg_img=mean_epi_img, 195 display_mode='z', cut_coords=[-15], 196 threshold=threshold, 197 title='%s: face vs house' % classifier_name) 198 199 show() 200 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/02_decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py --- a/examples/02_decoding/plot_haxby_different_estimators.py +++ b/examples/02_decoding/plot_haxby_different_estimators.py @@ -71,14 +71,18 @@ param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, scoring='f1', n_jobs=1, cv=3, iid=False) -logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"), - param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, - scoring='f1', cv=3, iid=False) -logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l2"), - param_grid={ - 'C': [.1, .5, 1., 5., 10., 50., 100.] - }, - scoring='f1', cv=3, iid=False) +logistic_cv = GridSearchCV( + LogisticRegression(C=1., penalty="l1", solver='liblinear'), + param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]}, + scoring='f1', cv=3, iid=False, + ) +logistic_l2_cv = GridSearchCV( + LogisticRegression(C=1., penalty="l2", solver='liblinear'), + param_grid={ + 'C': [.1, .5, 1., 5., 10., 50., 100.] + }, + scoring='f1', cv=3, iid=False, + ) # The ridge classifier has a specific 'CV' object that can set it's # parameters faster than using a GridSearchCV
{"golden_diff": "diff --git a/examples/02_decoding/plot_haxby_different_estimators.py b/examples/02_decoding/plot_haxby_different_estimators.py\n--- a/examples/02_decoding/plot_haxby_different_estimators.py\n+++ b/examples/02_decoding/plot_haxby_different_estimators.py\n@@ -71,14 +71,18 @@\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1', n_jobs=1, cv=3, iid=False)\n \n-logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l1\"),\n- param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n- scoring='f1', cv=3, iid=False)\n-logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l2\"),\n- param_grid={\n- 'C': [.1, .5, 1., 5., 10., 50., 100.]\n- },\n- scoring='f1', cv=3, iid=False)\n+logistic_cv = GridSearchCV(\n+ LogisticRegression(C=1., penalty=\"l1\", solver='liblinear'),\n+ param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n+ scoring='f1', cv=3, iid=False,\n+ )\n+logistic_l2_cv = GridSearchCV(\n+ LogisticRegression(C=1., penalty=\"l2\", solver='liblinear'),\n+ param_grid={\n+ 'C': [.1, .5, 1., 5., 10., 50., 100.]\n+ },\n+ scoring='f1', cv=3, iid=False,\n+ )\n \n # The ridge classifier has a specific 'CV' object that can set it's\n # parameters faster than using a GridSearchCV\n", "issue": "Super verbose CI logs\nThe CI logs are brimming with noise, making it a chore to debug a real problem.\r\n\r\nMoreover TravisCI has a log length beyond which it terminates the testing and we have started bumping against it.\r\n\r\nWe should systematically check all the warnings being generated and update the codebase so they are no longer generated.\r\n\r\nThis should be made a priority, these spurious failures are costing time and enthusiasm.\n", "before_files": [{"content": "\"\"\"\nDifferent classifiers in decoding the Haxby dataset\n=====================================================\n\nHere we compare different classifiers on a visual object recognition\ndecoding task.\n\"\"\"\n\n#############################################################################\n# We start by loading the data and applying simple transformations to it\n# -----------------------------------------------------------------------\n\n# Fetch data using nilearn dataset fetcher\nfrom nilearn import datasets\n# by default 2nd subject data will be fetched\nhaxby_dataset = datasets.fetch_haxby()\n\n# print basic information on the dataset\nprint('First subject anatomical nifti image (3D) located is at: %s' %\n haxby_dataset.anat[0])\nprint('First subject functional nifti image (4D) is located at: %s' %\n haxby_dataset.func[0])\n\n# load labels\nimport numpy as np\nimport pandas as pd\nlabels = pd.read_csv(haxby_dataset.session_target[0], sep=\" \")\nstimuli = labels['labels']\n# identify resting state labels in order to be able to remove them\ntask_mask = (stimuli != 'rest')\n\n# find names of remaining active labels\ncategories = stimuli[task_mask].unique()\n\n# extract tags indicating to which acquisition run a tag belongs\nsession_labels = labels['chunks'][task_mask]\n\n# Load the fMRI data\nfrom nilearn.input_data import NiftiMasker\n\n# For decoding, standardizing is often very important\nmask_filename = haxby_dataset.mask_vt[0]\nmasker = NiftiMasker(mask_img=mask_filename, standardize=True)\nfunc_filename = haxby_dataset.func[0]\nmasked_timecourses = masker.fit_transform(\n func_filename)[task_mask]\n\n\n#############################################################################\n# Then we define the various classifiers that we use\n# ---------------------------------------------------\n# A support vector classifier\nfrom sklearn.svm import SVC\nsvm = SVC(C=1., kernel=\"linear\")\n\n# The logistic regression\nfrom sklearn.linear_model import (LogisticRegression,\n RidgeClassifier,\n RidgeClassifierCV,\n )\nlogistic = LogisticRegression(C=1., penalty=\"l1\", solver='liblinear')\nlogistic_50 = LogisticRegression(C=50., penalty=\"l1\", solver='liblinear')\nlogistic_l2 = LogisticRegression(C=1., penalty=\"l2\", solver='liblinear')\n\n# Cross-validated versions of these classifiers\nfrom sklearn.model_selection import GridSearchCV\n# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that\n# can significantly speed up the fitting process on computers with\n# multiple cores\nsvm_cv = GridSearchCV(SVC(C=1., kernel=\"linear\"),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1', n_jobs=1, cv=3, iid=False)\n\nlogistic_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l1\"),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1', cv=3, iid=False)\nlogistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l2\"),\n param_grid={\n 'C': [.1, .5, 1., 5., 10., 50., 100.]\n },\n scoring='f1', cv=3, iid=False)\n\n# The ridge classifier has a specific 'CV' object that can set it's\n# parameters faster than using a GridSearchCV\nridge = RidgeClassifier()\nridge_cv = RidgeClassifierCV()\n\n# A dictionary, to hold all our classifiers\nclassifiers = {'SVC': svm,\n 'SVC cv': svm_cv,\n 'log l1': logistic,\n 'log l1 50': logistic_50,\n 'log l1 cv': logistic_cv,\n 'log l2': logistic_l2,\n 'log l2 cv': logistic_l2_cv,\n 'ridge': ridge,\n 'ridge cv': ridge_cv\n }\n\n#############################################################################\n# Here we compute prediction scores\n# ----------------------------------\n# Run time for all these classifiers\n\n# Make a data splitting object for cross validation\nfrom sklearn.model_selection import LeaveOneGroupOut, cross_val_score\ncv = LeaveOneGroupOut()\n\nimport time\n\nclassifiers_scores = {}\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifiers_scores[classifier_name] = {}\n print(70 * '_')\n\n for category in categories:\n classification_target = stimuli[task_mask].isin([category])\n t0 = time.time()\n classifiers_scores[classifier_name][category] = cross_val_score(\n classifier,\n masked_timecourses,\n classification_target,\n cv=cv,\n groups=session_labels,\n scoring=\"f1\",\n )\n\n print(\n \"%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs\" %\n (\n classifier_name,\n category,\n classifiers_scores[classifier_name][category].mean(),\n classifiers_scores[classifier_name][category].std(),\n time.time() - t0,\n ),\n )\n\n###############################################################################\n# Then we make a rudimentary diagram\nimport matplotlib.pyplot as plt\nplt.figure()\n\ntick_position = np.arange(len(categories))\nplt.xticks(tick_position, categories, rotation=45)\n\nfor color, classifier_name in zip(\n ['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],\n sorted(classifiers)):\n score_means = [classifiers_scores[classifier_name][category].mean()\n for category in categories]\n plt.bar(tick_position, score_means, label=classifier_name,\n width=.11, color=color)\n tick_position = tick_position + .09\n\nplt.ylabel('Classification accurancy (f1 score)')\nplt.xlabel('Visual stimuli category')\nplt.ylim(ymin=0)\nplt.legend(loc='lower center', ncol=3)\nplt.title(\n 'Category-specific classification accuracy for different classifiers')\nplt.tight_layout()\n\n###############################################################################\n# Finally, w plot the face vs house map for the different classifiers\n\n# Use the average EPI as a background\nfrom nilearn import image\nmean_epi_img = image.mean_img(func_filename)\n\n# Restrict the decoding to face vs house\ncondition_mask = stimuli.isin(['face', 'house'])\nmasked_timecourses = masked_timecourses[\n condition_mask[task_mask]]\nstimuli = (stimuli[condition_mask] == 'face')\n# Transform the stimuli to binary values\nstimuli.astype(np.int)\n\nfrom nilearn.plotting import plot_stat_map, show\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifier.fit(masked_timecourses, stimuli)\n\n if hasattr(classifier, 'coef_'):\n weights = classifier.coef_[0]\n elif hasattr(classifier, 'best_estimator_'):\n weights = classifier.best_estimator_.coef_[0]\n else:\n continue\n weight_img = masker.inverse_transform(weights)\n weight_map = weight_img.get_data()\n threshold = np.max(np.abs(weight_map)) * 1e-3\n plot_stat_map(weight_img, bg_img=mean_epi_img,\n display_mode='z', cut_coords=[-15],\n threshold=threshold,\n title='%s: face vs house' % classifier_name)\n\nshow()\n", "path": "examples/02_decoding/plot_haxby_different_estimators.py"}], "after_files": [{"content": "\"\"\"\nDifferent classifiers in decoding the Haxby dataset\n=====================================================\n\nHere we compare different classifiers on a visual object recognition\ndecoding task.\n\"\"\"\n\n#############################################################################\n# We start by loading the data and applying simple transformations to it\n# -----------------------------------------------------------------------\n\n# Fetch data using nilearn dataset fetcher\nfrom nilearn import datasets\n# by default 2nd subject data will be fetched\nhaxby_dataset = datasets.fetch_haxby()\n\n# print basic information on the dataset\nprint('First subject anatomical nifti image (3D) located is at: %s' %\n haxby_dataset.anat[0])\nprint('First subject functional nifti image (4D) is located at: %s' %\n haxby_dataset.func[0])\n\n# load labels\nimport numpy as np\nimport pandas as pd\nlabels = pd.read_csv(haxby_dataset.session_target[0], sep=\" \")\nstimuli = labels['labels']\n# identify resting state labels in order to be able to remove them\ntask_mask = (stimuli != 'rest')\n\n# find names of remaining active labels\ncategories = stimuli[task_mask].unique()\n\n# extract tags indicating to which acquisition run a tag belongs\nsession_labels = labels['chunks'][task_mask]\n\n# Load the fMRI data\nfrom nilearn.input_data import NiftiMasker\n\n# For decoding, standardizing is often very important\nmask_filename = haxby_dataset.mask_vt[0]\nmasker = NiftiMasker(mask_img=mask_filename, standardize=True)\nfunc_filename = haxby_dataset.func[0]\nmasked_timecourses = masker.fit_transform(\n func_filename)[task_mask]\n\n\n#############################################################################\n# Then we define the various classifiers that we use\n# ---------------------------------------------------\n# A support vector classifier\nfrom sklearn.svm import SVC\nsvm = SVC(C=1., kernel=\"linear\")\n\n# The logistic regression\nfrom sklearn.linear_model import (LogisticRegression,\n RidgeClassifier,\n RidgeClassifierCV,\n )\nlogistic = LogisticRegression(C=1., penalty=\"l1\", solver='liblinear')\nlogistic_50 = LogisticRegression(C=50., penalty=\"l1\", solver='liblinear')\nlogistic_l2 = LogisticRegression(C=1., penalty=\"l2\", solver='liblinear')\n\n# Cross-validated versions of these classifiers\nfrom sklearn.model_selection import GridSearchCV\n# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that\n# can significantly speed up the fitting process on computers with\n# multiple cores\nsvm_cv = GridSearchCV(SVC(C=1., kernel=\"linear\"),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1', n_jobs=1, cv=3, iid=False)\n\nlogistic_cv = GridSearchCV(\n LogisticRegression(C=1., penalty=\"l1\", solver='liblinear'),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1', cv=3, iid=False,\n )\nlogistic_l2_cv = GridSearchCV(\n LogisticRegression(C=1., penalty=\"l2\", solver='liblinear'),\n param_grid={\n 'C': [.1, .5, 1., 5., 10., 50., 100.]\n },\n scoring='f1', cv=3, iid=False,\n )\n\n# The ridge classifier has a specific 'CV' object that can set it's\n# parameters faster than using a GridSearchCV\nridge = RidgeClassifier()\nridge_cv = RidgeClassifierCV()\n\n# A dictionary, to hold all our classifiers\nclassifiers = {'SVC': svm,\n 'SVC cv': svm_cv,\n 'log l1': logistic,\n 'log l1 50': logistic_50,\n 'log l1 cv': logistic_cv,\n 'log l2': logistic_l2,\n 'log l2 cv': logistic_l2_cv,\n 'ridge': ridge,\n 'ridge cv': ridge_cv\n }\n\n#############################################################################\n# Here we compute prediction scores\n# ----------------------------------\n# Run time for all these classifiers\n\n# Make a data splitting object for cross validation\nfrom sklearn.model_selection import LeaveOneGroupOut, cross_val_score\ncv = LeaveOneGroupOut()\n\nimport time\n\nclassifiers_scores = {}\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifiers_scores[classifier_name] = {}\n print(70 * '_')\n\n for category in categories:\n classification_target = stimuli[task_mask].isin([category])\n t0 = time.time()\n classifiers_scores[classifier_name][category] = cross_val_score(\n classifier,\n masked_timecourses,\n classification_target,\n cv=cv,\n groups=session_labels,\n scoring=\"f1\",\n )\n\n print(\n \"%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs\" %\n (\n classifier_name,\n category,\n classifiers_scores[classifier_name][category].mean(),\n classifiers_scores[classifier_name][category].std(),\n time.time() - t0,\n ),\n )\n\n###############################################################################\n# Then we make a rudimentary diagram\nimport matplotlib.pyplot as plt\nplt.figure()\n\ntick_position = np.arange(len(categories))\nplt.xticks(tick_position, categories, rotation=45)\n\nfor color, classifier_name in zip(\n ['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],\n sorted(classifiers)):\n score_means = [classifiers_scores[classifier_name][category].mean()\n for category in categories]\n plt.bar(tick_position, score_means, label=classifier_name,\n width=.11, color=color)\n tick_position = tick_position + .09\n\nplt.ylabel('Classification accurancy (f1 score)')\nplt.xlabel('Visual stimuli category')\nplt.ylim(ymin=0)\nplt.legend(loc='lower center', ncol=3)\nplt.title(\n 'Category-specific classification accuracy for different classifiers')\nplt.tight_layout()\n\n###############################################################################\n# Finally, w plot the face vs house map for the different classifiers\n\n# Use the average EPI as a background\nfrom nilearn import image\nmean_epi_img = image.mean_img(func_filename)\n\n# Restrict the decoding to face vs house\ncondition_mask = stimuli.isin(['face', 'house'])\nmasked_timecourses = masked_timecourses[\n condition_mask[task_mask]]\nstimuli = (stimuli[condition_mask] == 'face')\n# Transform the stimuli to binary values\nstimuli.astype(np.int)\n\nfrom nilearn.plotting import plot_stat_map, show\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifier.fit(masked_timecourses, stimuli)\n\n if hasattr(classifier, 'coef_'):\n weights = classifier.coef_[0]\n elif hasattr(classifier, 'best_estimator_'):\n weights = classifier.best_estimator_.coef_[0]\n else:\n continue\n weight_img = masker.inverse_transform(weights)\n weight_map = weight_img.get_data()\n threshold = np.max(np.abs(weight_map)) * 1e-3\n plot_stat_map(weight_img, bg_img=mean_epi_img,\n display_mode='z', cut_coords=[-15],\n threshold=threshold,\n title='%s: face vs house' % classifier_name)\n\nshow()\n", "path": "examples/02_decoding/plot_haxby_different_estimators.py"}]}
2,436
463
gh_patches_debug_19954
rasdani/github-patches
git_diff
mindsdb__lightwood-324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stop using pickle Apparently torch 1.5 will no longer support pickling, thus we'll have to switch to `torch.save` instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightwood/api/predictor.py` Content: ``` 1 import traceback 2 import time 3 4 import dill 5 import pickle 6 import pandas 7 import numpy as np 8 import torch 9 10 from lightwood.api.data_source import DataSource 11 from lightwood.data_schemas.predictor_config import predictor_config_schema 12 from lightwood.config.config import CONFIG 13 from lightwood.constants.lightwood import COLUMN_DATA_TYPES 14 from lightwood.helpers.device import get_devices 15 from lightwood.logger import log 16 17 18 class Predictor: 19 def __init__(self, config=None, output=None, load_from_path=None): 20 """ 21 :param config: dict 22 :param output: list, the columns you want to predict, ludwig will try to generate a config 23 :param load_from_path: str, the path to load the predictor from 24 """ 25 if load_from_path is not None: 26 with open(load_from_path, 'rb') as pickle_in: 27 self_dict = pickle.load(pickle_in) 28 self.__dict__ = self_dict 29 self.convert_to_device() 30 return 31 32 if output is None and config is None: 33 raise ValueError('You must provide either `output` or `config`') 34 35 if config is not None and output is None: 36 try: 37 self.config = predictor_config_schema.validate(config) 38 except Exception: 39 error = traceback.format_exc(1) 40 raise ValueError('[BAD DEFINITION] argument has errors: {err}'.format(err=error)) 41 42 # this is if we need to automatically generate a configuration variable 43 self._generate_config = True if output is not None or self.config is None else False 44 45 self._output_columns = output 46 self._input_columns = None 47 self.train_accuracy = None 48 49 self._mixer = None 50 51 def convert_to_device(self, device_str=None): 52 if hasattr(self._mixer, 'to') and callable(self._mixer.to): 53 if device_str is not None: 54 device = torch.device(device_str) 55 available_devices = 1 56 if device_str == 'cuda': 57 available_devices = torch.cuda.device_count() 58 else: 59 device, available_devices = get_devices() 60 61 self._mixer.to(device, available_devices) 62 63 def _type_map(self, from_data, col_name): 64 """ 65 This is a helper function that will help us auto-determine roughly what data types are in each column 66 NOTE: That this assumes the data is clean and will only return types for 'CATEGORICAL', 'NUMERIC' and 'TEXT' 67 """ 68 69 col_pd_type = from_data[col_name].dtype 70 col_pd_type = str(col_pd_type) 71 72 if col_pd_type in ['int64', 'float64', 'timedelta']: 73 return COLUMN_DATA_TYPES.NUMERIC 74 elif col_pd_type in ['bool', 'category']: 75 return COLUMN_DATA_TYPES.CATEGORICAL 76 else: 77 # if the number of uniques is less than 100 or less 78 # than 10% of the total number of rows then keep it as categorical 79 unique = from_data[col_name].nunique() 80 if unique < 100 or unique < len(from_data[col_name]) / 10: 81 return COLUMN_DATA_TYPES.CATEGORICAL 82 else: 83 return COLUMN_DATA_TYPES.TEXT 84 85 def learn(self, from_data, test_data=None): 86 """ 87 Train and save a model (you can use this to retrain model from data). 88 89 :param from_data: DataFrame or DataSource 90 The data to learn from 91 92 :param test_data: DataFrame or DataSource 93 The data to test accuracy and learn_error from 94 """ 95 device, _available_devices = get_devices() 96 log.info(f'Computing device used: {device}') 97 # generate the configuration and set the order for the input and output columns 98 if self._generate_config is True: 99 self._input_columns = [col for col in from_data if col not in self._output_columns] 100 self.config = { 101 'input_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._input_columns], 102 'output_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._output_columns] 103 } 104 self.config = predictor_config_schema.validate(self.config) 105 log.info('Automatically generated a configuration') 106 log.info(self.config) 107 else: 108 self._output_columns = [col['name'] for col in self.config['output_features']] 109 self._input_columns = [col['name'] for col in self.config['input_features']] 110 111 if isinstance(from_data, pandas.DataFrame): 112 train_ds = DataSource(from_data, self.config) 113 elif isinstance(from_data, DataSource): 114 train_ds = from_data 115 else: 116 raise TypeError(':from_data: must be either DataFrame or DataSource') 117 118 nr_subsets = 3 if len(train_ds) > 100 else 1 119 120 if test_data is None: 121 test_ds = train_ds.subset(0.1) 122 elif isinstance(test_data, pandas.DataFrame): 123 test_ds = train_ds.make_child(test_data) 124 elif isinstance(test_data, DataSource): 125 test_ds = test_data 126 else: 127 raise TypeError(':test_data: must be either DataFrame or DataSource') 128 129 train_ds.create_subsets(nr_subsets) 130 test_ds.create_subsets(nr_subsets) 131 132 train_ds.train() 133 test_ds.train() 134 135 mixer_class = self.config['mixer']['class'] 136 mixer_kwargs = self.config['mixer']['kwargs'] 137 self._mixer = mixer_class(**mixer_kwargs) 138 self._mixer.fit(train_ds=train_ds, test_ds=test_ds) 139 self.train_accuracy = self._mixer.calculate_accuracy(test_ds) 140 141 return self 142 143 def predict(self, when_data=None, when=None): 144 """ 145 Predict given when conditions. 146 147 :param when_data: pandas.DataFrame 148 :param when: dict 149 150 :return: pandas.DataFrame 151 """ 152 device, _available_devices = get_devices() 153 log.info(f'Computing device used: {device}') 154 if when is not None: 155 when_dict = {key: [when[key]] for key in when} 156 when_data = pandas.DataFrame(when_dict) 157 158 when_data_ds = DataSource(when_data, self.config, prepare_encoders=False) 159 160 when_data_ds.eval() 161 162 kwargs = {'include_extra_data': self.config.get('include_extra_data', False)} 163 164 return self._mixer.predict(when_data_ds, **kwargs) 165 166 def calculate_accuracy(self, from_data): 167 """ 168 calculates the accuracy of the model 169 :param from_data:a dataframe 170 :return accuracies: dictionaries of accuracies 171 """ 172 173 if self._mixer is None: 174 log.error("Please train the model before calculating accuracy") 175 return 176 177 ds = from_data if isinstance(from_data, DataSource) else DataSource(from_data, self.config, prepare_encoders=False) 178 predictions = self._mixer.predict(ds, include_extra_data=True) 179 accuracies = {} 180 181 for output_column in self._output_columns: 182 183 col_type = ds.get_column_config(output_column)['type'] 184 185 if col_type == COLUMN_DATA_TYPES.MULTIPLE_CATEGORICAL: 186 real = list(map(tuple, ds.get_column_original_data(output_column))) 187 predicted = list(map(tuple, predictions[output_column]['predictions'])) 188 else: 189 real = list(map(str,ds.get_column_original_data(output_column))) 190 predicted = list(map(str,predictions[output_column]['predictions'])) 191 192 weight_map = None 193 if 'weights' in ds.get_column_config(output_column): 194 weight_map = ds.get_column_config(output_column)['weights'] 195 196 accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'], 197 real, 198 predicted, 199 weight_map=weight_map, 200 encoder=ds.encoders[output_column]) 201 202 if ds.get_column_config(output_column)['type'] == COLUMN_DATA_TYPES.NUMERIC: 203 ds.encoders[output_column].decode_log = True 204 predicted = ds.get_decoded_column_data(output_column, predictions[output_column]['encoded_predictions']) 205 206 alternative_accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'], real, predicted,weight_map=weight_map) 207 208 if alternative_accuracy['value'] > accuracy['value']: 209 accuracy = alternative_accuracy 210 else: 211 ds.encoders[output_column].decode_log = False 212 213 accuracies[output_column] = accuracy 214 215 return accuracies 216 217 def save(self, path_to): 218 """ 219 Save trained model to a file. 220 221 :param path_to: str, full path of file, where we store results 222 """ 223 with open(path_to, 'wb') as f: 224 # Null out certain object we don't want to store 225 if hasattr(self._mixer, '_nonpersistent'): 226 self._mixer._nonpersistent = {} 227 self.config['mixer']['kwargs']['callback_on_iter'] = None 228 229 230 # Dump everything relevant to cpu before saving 231 self.convert_to_device("cpu") 232 pickle.dump(self.__dict__, f) 233 self.convert_to_device() 234 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightwood/api/predictor.py b/lightwood/api/predictor.py --- a/lightwood/api/predictor.py +++ b/lightwood/api/predictor.py @@ -1,8 +1,6 @@ import traceback import time -import dill -import pickle import pandas import numpy as np import torch @@ -24,7 +22,7 @@ """ if load_from_path is not None: with open(load_from_path, 'rb') as pickle_in: - self_dict = pickle.load(pickle_in) + self_dict = torch.load(pickle_in) self.__dict__ = self_dict self.convert_to_device() return @@ -229,5 +227,5 @@ # Dump everything relevant to cpu before saving self.convert_to_device("cpu") - pickle.dump(self.__dict__, f) + torch.save(self.__dict__, f) self.convert_to_device()
{"golden_diff": "diff --git a/lightwood/api/predictor.py b/lightwood/api/predictor.py\n--- a/lightwood/api/predictor.py\n+++ b/lightwood/api/predictor.py\n@@ -1,8 +1,6 @@\n import traceback\n import time\n \n-import dill\n-import pickle\n import pandas\n import numpy as np\n import torch\n@@ -24,7 +22,7 @@\n \"\"\"\n if load_from_path is not None:\n with open(load_from_path, 'rb') as pickle_in:\n- self_dict = pickle.load(pickle_in)\n+ self_dict = torch.load(pickle_in)\n self.__dict__ = self_dict\n self.convert_to_device()\n return\n@@ -229,5 +227,5 @@\n \n # Dump everything relevant to cpu before saving\n self.convert_to_device(\"cpu\")\n- pickle.dump(self.__dict__, f)\n+ torch.save(self.__dict__, f)\n self.convert_to_device()\n", "issue": "Stop using pickle\nApparently torch 1.5 will no longer support pickling, thus we'll have to switch to `torch.save` instead.\n", "before_files": [{"content": "import traceback\nimport time\n\nimport dill\nimport pickle\nimport pandas\nimport numpy as np\nimport torch\n\nfrom lightwood.api.data_source import DataSource\nfrom lightwood.data_schemas.predictor_config import predictor_config_schema\nfrom lightwood.config.config import CONFIG\nfrom lightwood.constants.lightwood import COLUMN_DATA_TYPES\nfrom lightwood.helpers.device import get_devices\nfrom lightwood.logger import log\n\n\nclass Predictor:\n def __init__(self, config=None, output=None, load_from_path=None):\n \"\"\"\n :param config: dict\n :param output: list, the columns you want to predict, ludwig will try to generate a config\n :param load_from_path: str, the path to load the predictor from\n \"\"\"\n if load_from_path is not None:\n with open(load_from_path, 'rb') as pickle_in:\n self_dict = pickle.load(pickle_in)\n self.__dict__ = self_dict\n self.convert_to_device()\n return\n\n if output is None and config is None:\n raise ValueError('You must provide either `output` or `config`')\n\n if config is not None and output is None:\n try:\n self.config = predictor_config_schema.validate(config)\n except Exception:\n error = traceback.format_exc(1)\n raise ValueError('[BAD DEFINITION] argument has errors: {err}'.format(err=error))\n\n # this is if we need to automatically generate a configuration variable\n self._generate_config = True if output is not None or self.config is None else False\n\n self._output_columns = output\n self._input_columns = None\n self.train_accuracy = None\n\n self._mixer = None\n\n def convert_to_device(self, device_str=None):\n if hasattr(self._mixer, 'to') and callable(self._mixer.to):\n if device_str is not None:\n device = torch.device(device_str)\n available_devices = 1\n if device_str == 'cuda':\n available_devices = torch.cuda.device_count()\n else:\n device, available_devices = get_devices()\n\n self._mixer.to(device, available_devices)\n\n def _type_map(self, from_data, col_name):\n \"\"\"\n This is a helper function that will help us auto-determine roughly what data types are in each column\n NOTE: That this assumes the data is clean and will only return types for 'CATEGORICAL', 'NUMERIC' and 'TEXT'\n \"\"\"\n\n col_pd_type = from_data[col_name].dtype\n col_pd_type = str(col_pd_type)\n\n if col_pd_type in ['int64', 'float64', 'timedelta']:\n return COLUMN_DATA_TYPES.NUMERIC\n elif col_pd_type in ['bool', 'category']:\n return COLUMN_DATA_TYPES.CATEGORICAL\n else:\n # if the number of uniques is less than 100 or less\n # than 10% of the total number of rows then keep it as categorical\n unique = from_data[col_name].nunique()\n if unique < 100 or unique < len(from_data[col_name]) / 10:\n return COLUMN_DATA_TYPES.CATEGORICAL\n else:\n return COLUMN_DATA_TYPES.TEXT\n\n def learn(self, from_data, test_data=None):\n \"\"\"\n Train and save a model (you can use this to retrain model from data).\n\n :param from_data: DataFrame or DataSource\n The data to learn from\n\n :param test_data: DataFrame or DataSource\n The data to test accuracy and learn_error from\n \"\"\"\n device, _available_devices = get_devices()\n log.info(f'Computing device used: {device}')\n # generate the configuration and set the order for the input and output columns\n if self._generate_config is True:\n self._input_columns = [col for col in from_data if col not in self._output_columns]\n self.config = {\n 'input_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._input_columns],\n 'output_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._output_columns]\n }\n self.config = predictor_config_schema.validate(self.config)\n log.info('Automatically generated a configuration')\n log.info(self.config)\n else:\n self._output_columns = [col['name'] for col in self.config['output_features']]\n self._input_columns = [col['name'] for col in self.config['input_features']]\n\n if isinstance(from_data, pandas.DataFrame):\n train_ds = DataSource(from_data, self.config)\n elif isinstance(from_data, DataSource):\n train_ds = from_data\n else:\n raise TypeError(':from_data: must be either DataFrame or DataSource')\n\n nr_subsets = 3 if len(train_ds) > 100 else 1\n\n if test_data is None:\n test_ds = train_ds.subset(0.1)\n elif isinstance(test_data, pandas.DataFrame):\n test_ds = train_ds.make_child(test_data)\n elif isinstance(test_data, DataSource):\n test_ds = test_data\n else:\n raise TypeError(':test_data: must be either DataFrame or DataSource')\n\n train_ds.create_subsets(nr_subsets)\n test_ds.create_subsets(nr_subsets)\n\n train_ds.train()\n test_ds.train()\n\n mixer_class = self.config['mixer']['class']\n mixer_kwargs = self.config['mixer']['kwargs']\n self._mixer = mixer_class(**mixer_kwargs)\n self._mixer.fit(train_ds=train_ds, test_ds=test_ds)\n self.train_accuracy = self._mixer.calculate_accuracy(test_ds)\n\n return self\n\n def predict(self, when_data=None, when=None):\n \"\"\"\n Predict given when conditions.\n\n :param when_data: pandas.DataFrame\n :param when: dict\n\n :return: pandas.DataFrame\n \"\"\"\n device, _available_devices = get_devices()\n log.info(f'Computing device used: {device}')\n if when is not None:\n when_dict = {key: [when[key]] for key in when}\n when_data = pandas.DataFrame(when_dict)\n\n when_data_ds = DataSource(when_data, self.config, prepare_encoders=False)\n\n when_data_ds.eval()\n\n kwargs = {'include_extra_data': self.config.get('include_extra_data', False)}\n\n return self._mixer.predict(when_data_ds, **kwargs)\n\n def calculate_accuracy(self, from_data):\n \"\"\"\n calculates the accuracy of the model\n :param from_data:a dataframe\n :return accuracies: dictionaries of accuracies\n \"\"\"\n\n if self._mixer is None:\n log.error(\"Please train the model before calculating accuracy\")\n return\n\n ds = from_data if isinstance(from_data, DataSource) else DataSource(from_data, self.config, prepare_encoders=False)\n predictions = self._mixer.predict(ds, include_extra_data=True)\n accuracies = {}\n\n for output_column in self._output_columns:\n\n col_type = ds.get_column_config(output_column)['type']\n\n if col_type == COLUMN_DATA_TYPES.MULTIPLE_CATEGORICAL:\n real = list(map(tuple, ds.get_column_original_data(output_column)))\n predicted = list(map(tuple, predictions[output_column]['predictions']))\n else:\n real = list(map(str,ds.get_column_original_data(output_column)))\n predicted = list(map(str,predictions[output_column]['predictions']))\n\n weight_map = None\n if 'weights' in ds.get_column_config(output_column):\n weight_map = ds.get_column_config(output_column)['weights']\n\n accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'],\n real,\n predicted,\n weight_map=weight_map,\n encoder=ds.encoders[output_column])\n\n if ds.get_column_config(output_column)['type'] == COLUMN_DATA_TYPES.NUMERIC:\n ds.encoders[output_column].decode_log = True\n predicted = ds.get_decoded_column_data(output_column, predictions[output_column]['encoded_predictions'])\n\n alternative_accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'], real, predicted,weight_map=weight_map)\n\n if alternative_accuracy['value'] > accuracy['value']:\n accuracy = alternative_accuracy\n else:\n ds.encoders[output_column].decode_log = False\n\n accuracies[output_column] = accuracy\n\n return accuracies\n\n def save(self, path_to):\n \"\"\"\n Save trained model to a file.\n\n :param path_to: str, full path of file, where we store results\n \"\"\"\n with open(path_to, 'wb') as f:\n # Null out certain object we don't want to store\n if hasattr(self._mixer, '_nonpersistent'):\n self._mixer._nonpersistent = {}\n self.config['mixer']['kwargs']['callback_on_iter'] = None\n\n\n # Dump everything relevant to cpu before saving\n self.convert_to_device(\"cpu\")\n pickle.dump(self.__dict__, f)\n self.convert_to_device()\n", "path": "lightwood/api/predictor.py"}], "after_files": [{"content": "import traceback\nimport time\n\nimport pandas\nimport numpy as np\nimport torch\n\nfrom lightwood.api.data_source import DataSource\nfrom lightwood.data_schemas.predictor_config import predictor_config_schema\nfrom lightwood.config.config import CONFIG\nfrom lightwood.constants.lightwood import COLUMN_DATA_TYPES\nfrom lightwood.helpers.device import get_devices\nfrom lightwood.logger import log\n\n\nclass Predictor:\n def __init__(self, config=None, output=None, load_from_path=None):\n \"\"\"\n :param config: dict\n :param output: list, the columns you want to predict, ludwig will try to generate a config\n :param load_from_path: str, the path to load the predictor from\n \"\"\"\n if load_from_path is not None:\n with open(load_from_path, 'rb') as pickle_in:\n self_dict = torch.load(pickle_in)\n self.__dict__ = self_dict\n self.convert_to_device()\n return\n\n if output is None and config is None:\n raise ValueError('You must provide either `output` or `config`')\n\n if config is not None and output is None:\n try:\n self.config = predictor_config_schema.validate(config)\n except Exception:\n error = traceback.format_exc(1)\n raise ValueError('[BAD DEFINITION] argument has errors: {err}'.format(err=error))\n\n # this is if we need to automatically generate a configuration variable\n self._generate_config = True if output is not None or self.config is None else False\n\n self._output_columns = output\n self._input_columns = None\n self.train_accuracy = None\n\n self._mixer = None\n\n def convert_to_device(self, device_str=None):\n if hasattr(self._mixer, 'to') and callable(self._mixer.to):\n if device_str is not None:\n device = torch.device(device_str)\n available_devices = 1\n if device_str == 'cuda':\n available_devices = torch.cuda.device_count()\n else:\n device, available_devices = get_devices()\n\n self._mixer.to(device, available_devices)\n\n def _type_map(self, from_data, col_name):\n \"\"\"\n This is a helper function that will help us auto-determine roughly what data types are in each column\n NOTE: That this assumes the data is clean and will only return types for 'CATEGORICAL', 'NUMERIC' and 'TEXT'\n \"\"\"\n\n col_pd_type = from_data[col_name].dtype\n col_pd_type = str(col_pd_type)\n\n if col_pd_type in ['int64', 'float64', 'timedelta']:\n return COLUMN_DATA_TYPES.NUMERIC\n elif col_pd_type in ['bool', 'category']:\n return COLUMN_DATA_TYPES.CATEGORICAL\n else:\n # if the number of uniques is less than 100 or less\n # than 10% of the total number of rows then keep it as categorical\n unique = from_data[col_name].nunique()\n if unique < 100 or unique < len(from_data[col_name]) / 10:\n return COLUMN_DATA_TYPES.CATEGORICAL\n else:\n return COLUMN_DATA_TYPES.TEXT\n\n def learn(self, from_data, test_data=None):\n \"\"\"\n Train and save a model (you can use this to retrain model from data).\n\n :param from_data: DataFrame or DataSource\n The data to learn from\n\n :param test_data: DataFrame or DataSource\n The data to test accuracy and learn_error from\n \"\"\"\n device, _available_devices = get_devices()\n log.info(f'Computing device used: {device}')\n # generate the configuration and set the order for the input and output columns\n if self._generate_config is True:\n self._input_columns = [col for col in from_data if col not in self._output_columns]\n self.config = {\n 'input_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._input_columns],\n 'output_features': [{'name': col, 'type': self._type_map(from_data, col)} for col in self._output_columns]\n }\n self.config = predictor_config_schema.validate(self.config)\n log.info('Automatically generated a configuration')\n log.info(self.config)\n else:\n self._output_columns = [col['name'] for col in self.config['output_features']]\n self._input_columns = [col['name'] for col in self.config['input_features']]\n\n if isinstance(from_data, pandas.DataFrame):\n train_ds = DataSource(from_data, self.config)\n elif isinstance(from_data, DataSource):\n train_ds = from_data\n else:\n raise TypeError(':from_data: must be either DataFrame or DataSource')\n\n nr_subsets = 3 if len(train_ds) > 100 else 1\n\n if test_data is None:\n test_ds = train_ds.subset(0.1)\n elif isinstance(test_data, pandas.DataFrame):\n test_ds = train_ds.make_child(test_data)\n elif isinstance(test_data, DataSource):\n test_ds = test_data\n else:\n raise TypeError(':test_data: must be either DataFrame or DataSource')\n\n train_ds.create_subsets(nr_subsets)\n test_ds.create_subsets(nr_subsets)\n\n train_ds.train()\n test_ds.train()\n\n mixer_class = self.config['mixer']['class']\n mixer_kwargs = self.config['mixer']['kwargs']\n self._mixer = mixer_class(**mixer_kwargs)\n self._mixer.fit(train_ds=train_ds, test_ds=test_ds)\n self.train_accuracy = self._mixer.calculate_accuracy(test_ds)\n\n return self\n\n def predict(self, when_data=None, when=None):\n \"\"\"\n Predict given when conditions.\n\n :param when_data: pandas.DataFrame\n :param when: dict\n\n :return: pandas.DataFrame\n \"\"\"\n device, _available_devices = get_devices()\n log.info(f'Computing device used: {device}')\n if when is not None:\n when_dict = {key: [when[key]] for key in when}\n when_data = pandas.DataFrame(when_dict)\n\n when_data_ds = DataSource(when_data, self.config, prepare_encoders=False)\n\n when_data_ds.eval()\n\n kwargs = {'include_extra_data': self.config.get('include_extra_data', False)}\n\n return self._mixer.predict(when_data_ds, **kwargs)\n\n def calculate_accuracy(self, from_data):\n \"\"\"\n calculates the accuracy of the model\n :param from_data:a dataframe\n :return accuracies: dictionaries of accuracies\n \"\"\"\n\n if self._mixer is None:\n log.error(\"Please train the model before calculating accuracy\")\n return\n\n ds = from_data if isinstance(from_data, DataSource) else DataSource(from_data, self.config, prepare_encoders=False)\n predictions = self._mixer.predict(ds, include_extra_data=True)\n accuracies = {}\n\n for output_column in self._output_columns:\n\n col_type = ds.get_column_config(output_column)['type']\n\n if col_type == COLUMN_DATA_TYPES.MULTIPLE_CATEGORICAL:\n real = list(map(tuple, ds.get_column_original_data(output_column)))\n predicted = list(map(tuple, predictions[output_column]['predictions']))\n else:\n real = list(map(str,ds.get_column_original_data(output_column)))\n predicted = list(map(str,predictions[output_column]['predictions']))\n\n weight_map = None\n if 'weights' in ds.get_column_config(output_column):\n weight_map = ds.get_column_config(output_column)['weights']\n\n accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'],\n real,\n predicted,\n weight_map=weight_map,\n encoder=ds.encoders[output_column])\n\n if ds.get_column_config(output_column)['type'] == COLUMN_DATA_TYPES.NUMERIC:\n ds.encoders[output_column].decode_log = True\n predicted = ds.get_decoded_column_data(output_column, predictions[output_column]['encoded_predictions'])\n\n alternative_accuracy = self.apply_accuracy_function(ds.get_column_config(output_column)['type'], real, predicted,weight_map=weight_map)\n\n if alternative_accuracy['value'] > accuracy['value']:\n accuracy = alternative_accuracy\n else:\n ds.encoders[output_column].decode_log = False\n\n accuracies[output_column] = accuracy\n\n return accuracies\n\n def save(self, path_to):\n \"\"\"\n Save trained model to a file.\n\n :param path_to: str, full path of file, where we store results\n \"\"\"\n with open(path_to, 'wb') as f:\n # Null out certain object we don't want to store\n if hasattr(self._mixer, '_nonpersistent'):\n self._mixer._nonpersistent = {}\n self.config['mixer']['kwargs']['callback_on_iter'] = None\n\n\n # Dump everything relevant to cpu before saving\n self.convert_to_device(\"cpu\")\n torch.save(self.__dict__, f)\n self.convert_to_device()\n", "path": "lightwood/api/predictor.py"}]}
2,837
210
gh_patches_debug_37167
rasdani/github-patches
git_diff
beeware__toga-873
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Menubar / Appmenu inconsistencies X11 vs Wayland, Gnome shell ## Expected Behavior All menu items are visible, on a menubar (or hamburger menu equivalent) - something like: ![Screenshot from 2020-04-27 12-51-16](https://user-images.githubusercontent.com/421383/80324622-d8599980-8885-11ea-8f8f-e2133a783779.png) ## Current Behavior <!--- If you're describing a bug, what currently happens? --> The Appmenu is deprecated in latest GTK style guide. Currently it is not consistently shown. Its visibility can be turned off, and apps need to be able to show it some other way. 859a80226b8dfa7529e41ebb21e29df2ff61e2b4 The other application menus also sometimes don't show up. Behaviour varies depending on whether X11 or Wayland is being used. ## Steps to reproduce <!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below --> Gnome appmenu enabled `GDK_BACKEND=wayland python -m box` ![Screenshot from 2020-04-27 12-43-45](https://user-images.githubusercontent.com/421383/80324391-c7f4ef00-8884-11ea-961d-2ca09f74b90e.png) Note that with wayland, the name and icon in the appmenu are also wrong (x11 OK) `GDK_BACKEND=x11 python -m box` ![Screenshot from 2020-04-27 12-43-27](https://user-images.githubusercontent.com/421383/80324392-c9261c00-8884-11ea-9bb4-adbc5300f84e.png) Gnome appmenu disabled `GDK_BACKEND=wayland python -m box` ![Screenshot from 2020-04-27 12-42-45](https://user-images.githubusercontent.com/421383/80324393-c9261c00-8884-11ea-92cd-f7b58dd77938.png) `GDK_BACKEND=x11 python -m box` ![Screenshot from 2020-04-27 12-42-14](https://user-images.githubusercontent.com/421383/80324394-c9beb280-8884-11ea-8fbb-0787f4403a8a.png) ## Your Environment <!--- Provide details on your current environment you found the bug in --> * Python Version (list the specific version number) 3.7.3 * Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well) - [ ] macOS - version: - [x] Linux - Debian 10 - [ ] Windows - version: - [ ] Other - name: - version: * Toga Version (list the specific version number or git hash) 2020-04-26 859a80226b8dfa7529e41ebb21e29df2ff61e2b4 * Toga Target (the type of app you are trying to generate) - [ ] android - [ ] cocoa - [ ] django - [x] gtk - [ ] iOS - [ ] tvOS - [ ] watchOS - [ ] winforms - [ ] win32 - [ ] Other (please specify) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/gtk/toga_gtk/app.py` Content: ``` 1 import asyncio 2 import os 3 import os.path 4 import signal 5 import sys 6 from urllib.parse import unquote, urlparse 7 8 import gbulb 9 import toga 10 from toga import App as toga_App 11 from toga.command import GROUP_BREAK, SECTION_BREAK, Command 12 13 from .keys import gtk_accel 14 from .libs import Gio, GLib, Gtk 15 from .window import Window 16 17 18 def gtk_menu_item_activate(cmd): 19 """Convert a GTK menu item activation into a command invocation""" 20 def _handler(action, data): 21 cmd.action(cmd) 22 return _handler 23 24 25 class MainWindow(Window): 26 _IMPL_CLASS = Gtk.ApplicationWindow 27 28 def create(self): 29 super().create() 30 self.native.set_role("MainWindow") 31 self.native.set_icon(toga_App.app.icon._impl.native_72.get_pixbuf()) 32 33 def set_app(self, app): 34 super().set_app(app) 35 36 # The GTK docs list set_wmclass() as deprecated (and "pointless") 37 # but it's the only way I've found that actually sets the 38 # Application name to something other than '__main__.py'. 39 self.native.set_wmclass(app.interface.name, app.interface.name) 40 41 def on_close(self, *args): 42 pass 43 44 45 class App: 46 """ 47 Todo: 48 * Creation of Menus is not working. 49 * Disabling of menu items is not working. 50 * App Icon is not showing up 51 """ 52 def __init__(self, interface): 53 self.interface = interface 54 self.interface._impl = self 55 56 gbulb.install(gtk=True) 57 self.loop = asyncio.get_event_loop() 58 59 self.create() 60 61 def create(self): 62 # Stimulate the build of the app 63 self.native = Gtk.Application( 64 application_id=self.interface.app_id, 65 flags=Gio.ApplicationFlags.FLAGS_NONE 66 ) 67 68 # Connect the GTK signal that will cause app startup to occur 69 self.native.connect('startup', self.gtk_startup) 70 self.native.connect('activate', self.gtk_activate) 71 # self.native.connect('shutdown', self.shutdown) 72 73 self.actions = None 74 75 def gtk_startup(self, data=None): 76 self.interface.commands.add( 77 Command(None, 'About ' + self.interface.name, group=toga.Group.APP), 78 Command(None, 'Preferences', group=toga.Group.APP), 79 # Quit should always be the last item, in a section on it's own 80 Command( 81 lambda widget, data: self.exit(), 82 'Quit ' + self.interface.name, 83 shortcut=toga.Key.MOD_1 + 'q', 84 group=toga.Group.APP, 85 section=sys.maxsize 86 ), 87 Command(None, 'Visit homepage', group=toga.Group.HELP) 88 ) 89 self._create_app_commands() 90 91 self.interface.startup() 92 93 # Create the lookup table of menu items, 94 # then force the creation of the menus. 95 self._actions = {} 96 self.create_menus() 97 # self.interface.main_window._impl.create_toolbar() 98 99 def _create_app_commands(self): 100 # No extra menus 101 pass 102 103 def gtk_activate(self, data=None): 104 pass 105 106 def create_menus(self): 107 # Only create the menu if the menu item index has been created. 108 if hasattr(self, '_actions'): 109 self._actions = {} 110 menubar = Gio.Menu() 111 label = None 112 submenu = None 113 section = None 114 for cmd in self.interface.commands: 115 if cmd == GROUP_BREAK: 116 if section: 117 submenu.append_section(None, section) 118 119 if label == '*': 120 self.native.set_app_menu(submenu) 121 else: 122 menubar.append_submenu(label, submenu) 123 124 label = None 125 submenu = None 126 section = None 127 elif cmd == SECTION_BREAK: 128 submenu.append_section(None, section) 129 section = None 130 131 else: 132 if submenu is None: 133 label = cmd.group.label 134 submenu = Gio.Menu() 135 136 if section is None: 137 section = Gio.Menu() 138 139 try: 140 action = self._actions[cmd] 141 except KeyError: 142 cmd_id = "command-%s" % id(cmd) 143 action = Gio.SimpleAction.new(cmd_id, None) 144 if cmd.action: 145 action.connect("activate", gtk_menu_item_activate(cmd)) 146 147 cmd._impl.native.append(action) 148 cmd._impl.set_enabled(cmd.enabled) 149 self._actions[cmd] = action 150 self.native.add_action(action) 151 152 item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id) 153 if cmd.shortcut: 154 item.set_attribute_value('accel', GLib.Variant('s', gtk_accel(cmd.shortcut))) 155 156 section.append_item(item) 157 158 if section: 159 submenu.append_section(None, section) 160 161 if submenu: 162 if label == '*': 163 self.native.set_app_menu(submenu) 164 else: 165 menubar.append_submenu(label, submenu) 166 167 # Set the menu for the app. 168 self.native.set_menubar(menubar) 169 170 def main_loop(self): 171 # Modify signal handlers to make sure Ctrl-C is caught and handled. 172 signal.signal(signal.SIGINT, signal.SIG_DFL) 173 174 self.loop.run_forever(application=self.native) 175 176 def set_main_window(self, window): 177 pass 178 179 def exit(self): 180 self.native.quit() 181 182 def set_on_exit(self, value): 183 pass 184 185 def current_window(self): 186 return self.native.get_active_window()._impl 187 188 def enter_full_screen(self, windows): 189 for window in windows: 190 window._impl.set_full_screen(True) 191 192 def exit_full_screen(self, windows): 193 for window in windows: 194 window._impl.set_full_screen(False) 195 196 def show_cursor(self): 197 self.interface.factory.not_implemented('App.show_cursor()') 198 199 def hide_cursor(self): 200 self.interface.factory.not_implemented('App.hide_cursor()') 201 202 def add_background_task(self, handler): 203 self.interface.factory.not_implemented('App.add_background_task()') 204 205 206 class DocumentApp(App): 207 def _create_app_commands(self): 208 self.interface.commands.add( 209 toga.Command( 210 self.open_file, 211 label='Open...', 212 shortcut=toga.Key.MOD_1 + 'o', 213 group=toga.Group.FILE, 214 section=0 215 ), 216 ) 217 218 def gtk_startup(self, data=None): 219 super().gtk_startup(data=data) 220 221 try: 222 # Look for a filename specified on the command line 223 file_name = os.path.abspath(sys.argv[1]) 224 except IndexError: 225 # Nothing on the command line; open a file dialog instead. 226 # TODO: This causes a blank window to be shown. 227 # Is there a way to open a file dialog without having a window? 228 m = toga.Window() 229 file_name = m.select_folder_dialog(self.interface.name, None, False)[0] 230 231 self.open_document(file_name) 232 233 def open_file(self, widget, **kwargs): 234 # TODO: This causes a blank window to be shown. 235 # Is there a way to open a file dialog without having a window? 236 m = toga.Window() 237 file_name = m.select_folder_dialog(self.interface.name, None, False)[0] 238 239 self.open_document(file_name) 240 241 def open_document(self, fileURL): 242 """Open a new document in this app. 243 244 Args: 245 fileURL (str): The URL/path to the file to add as a document. 246 """ 247 # Convert the fileURL to a file path. 248 fileURL = fileURL.rstrip('/') 249 path = unquote(urlparse(fileURL).path) 250 extension = os.path.splitext(path)[1][1:] 251 252 # Create the document instance 253 DocType = self.interface.document_types[extension] 254 document = DocType(fileURL, self.interface) 255 self.interface._documents.append(document) 256 257 document.show() 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/gtk/toga_gtk/app.py b/src/gtk/toga_gtk/app.py --- a/src/gtk/toga_gtk/app.py +++ b/src/gtk/toga_gtk/app.py @@ -73,12 +73,13 @@ self.actions = None def gtk_startup(self, data=None): + # Set up the default commands for the interface. self.interface.commands.add( - Command(None, 'About ' + self.interface.name, group=toga.Group.APP), + Command(None, 'About ' + self.interface.name, group=toga.Group.HELP), Command(None, 'Preferences', group=toga.Group.APP), # Quit should always be the last item, in a section on it's own Command( - lambda widget, data: self.exit(), + lambda widget: self.exit(), 'Quit ' + self.interface.name, shortcut=toga.Key.MOD_1 + 'q', group=toga.Group.APP, @@ -94,7 +95,14 @@ # then force the creation of the menus. self._actions = {} self.create_menus() - # self.interface.main_window._impl.create_toolbar() + + # Now that we have menus, make the app take responsibility for + # showing the menubar. + # This is required because of inconsistencies in how the Gnome + # shell operates on different windowing environments; + # see #872 for details. + settings = Gtk.Settings.get_default() + settings.set_property("gtk-shell-shows-menubar", False) def _create_app_commands(self): # No extra menus @@ -117,9 +125,8 @@ submenu.append_section(None, section) if label == '*': - self.native.set_app_menu(submenu) - else: - menubar.append_submenu(label, submenu) + label = self.interface.name + menubar.append_submenu(label, submenu) label = None submenu = None @@ -160,9 +167,8 @@ if submenu: if label == '*': - self.native.set_app_menu(submenu) - else: - menubar.append_submenu(label, submenu) + label = self.interface.name + menubar.append_submenu(label, submenu) # Set the menu for the app. self.native.set_menubar(menubar)
{"golden_diff": "diff --git a/src/gtk/toga_gtk/app.py b/src/gtk/toga_gtk/app.py\n--- a/src/gtk/toga_gtk/app.py\n+++ b/src/gtk/toga_gtk/app.py\n@@ -73,12 +73,13 @@\n self.actions = None\n \n def gtk_startup(self, data=None):\n+ # Set up the default commands for the interface.\n self.interface.commands.add(\n- Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n+ Command(None, 'About ' + self.interface.name, group=toga.Group.HELP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(\n- lambda widget, data: self.exit(),\n+ lambda widget: self.exit(),\n 'Quit ' + self.interface.name,\n shortcut=toga.Key.MOD_1 + 'q',\n group=toga.Group.APP,\n@@ -94,7 +95,14 @@\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n- # self.interface.main_window._impl.create_toolbar()\n+\n+ # Now that we have menus, make the app take responsibility for\n+ # showing the menubar.\n+ # This is required because of inconsistencies in how the Gnome\n+ # shell operates on different windowing environments;\n+ # see #872 for details.\n+ settings = Gtk.Settings.get_default()\n+ settings.set_property(\"gtk-shell-shows-menubar\", False)\n \n def _create_app_commands(self):\n # No extra menus\n@@ -117,9 +125,8 @@\n submenu.append_section(None, section)\n \n if label == '*':\n- self.native.set_app_menu(submenu)\n- else:\n- menubar.append_submenu(label, submenu)\n+ label = self.interface.name\n+ menubar.append_submenu(label, submenu)\n \n label = None\n submenu = None\n@@ -160,9 +167,8 @@\n \n if submenu:\n if label == '*':\n- self.native.set_app_menu(submenu)\n- else:\n- menubar.append_submenu(label, submenu)\n+ label = self.interface.name\n+ menubar.append_submenu(label, submenu)\n \n # Set the menu for the app.\n self.native.set_menubar(menubar)\n", "issue": "Menubar / Appmenu inconsistencies X11 vs Wayland, Gnome shell\n## Expected Behavior\r\nAll menu items are visible, on a menubar (or hamburger menu equivalent) - something like:\r\n![Screenshot from 2020-04-27 12-51-16](https://user-images.githubusercontent.com/421383/80324622-d8599980-8885-11ea-8f8f-e2133a783779.png)\r\n\r\n## Current Behavior\r\n<!--- If you're describing a bug, what currently happens? -->\r\nThe Appmenu is deprecated in latest GTK style guide. \r\nCurrently it is not consistently shown. Its visibility can be turned off, and apps need to be\r\nable to show it some other way.\r\n\r\n\r\n859a80226b8dfa7529e41ebb21e29df2ff61e2b4\r\n\r\nThe other application menus also sometimes don't show up.\r\n\r\nBehaviour varies depending on whether X11 or Wayland is being used.\r\n\r\n\r\n## Steps to reproduce\r\n<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->\r\nGnome appmenu enabled\r\n\r\n`GDK_BACKEND=wayland python -m box`\r\n![Screenshot from 2020-04-27 12-43-45](https://user-images.githubusercontent.com/421383/80324391-c7f4ef00-8884-11ea-961d-2ca09f74b90e.png)\r\nNote that with wayland, the name and icon in the appmenu are also wrong (x11 OK)\r\n\r\n`GDK_BACKEND=x11 python -m box`\r\n![Screenshot from 2020-04-27 12-43-27](https://user-images.githubusercontent.com/421383/80324392-c9261c00-8884-11ea-9bb4-adbc5300f84e.png)\r\n\r\nGnome appmenu disabled\r\n`GDK_BACKEND=wayland python -m box`\r\n![Screenshot from 2020-04-27 12-42-45](https://user-images.githubusercontent.com/421383/80324393-c9261c00-8884-11ea-92cd-f7b58dd77938.png)\r\n\r\n`GDK_BACKEND=x11 python -m box`\r\n![Screenshot from 2020-04-27 12-42-14](https://user-images.githubusercontent.com/421383/80324394-c9beb280-8884-11ea-8fbb-0787f4403a8a.png)\r\n\r\n\r\n## Your Environment\r\n<!--- Provide details on your current environment you found the bug in -->\r\n\r\n* Python Version (list the specific version number)\r\n3.7.3\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version:\r\n - [x] Linux - Debian 10\r\n - [ ] Windows - version:\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Version (list the specific version number or git hash)\r\n2020-04-26\r\n859a80226b8dfa7529e41ebb21e29df2ff61e2b4\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [x] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [ ] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "import asyncio\nimport os\nimport os.path\nimport signal\nimport sys\nfrom urllib.parse import unquote, urlparse\n\nimport gbulb\nimport toga\nfrom toga import App as toga_App\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command\n\nfrom .keys import gtk_accel\nfrom .libs import Gio, GLib, Gtk\nfrom .window import Window\n\n\ndef gtk_menu_item_activate(cmd):\n \"\"\"Convert a GTK menu item activation into a command invocation\"\"\"\n def _handler(action, data):\n cmd.action(cmd)\n return _handler\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def create(self):\n super().create()\n self.native.set_role(\"MainWindow\")\n self.native.set_icon(toga_App.app.icon._impl.native_72.get_pixbuf())\n\n def set_app(self, app):\n super().set_app(app)\n\n # The GTK docs list set_wmclass() as deprecated (and \"pointless\")\n # but it's the only way I've found that actually sets the\n # Application name to something other than '__main__.py'.\n self.native.set_wmclass(app.interface.name, app.interface.name)\n\n def on_close(self, *args):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n # Stimulate the build of the app\n self.native = Gtk.Application(\n application_id=self.interface.app_id,\n flags=Gio.ApplicationFlags.FLAGS_NONE\n )\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.gtk_startup)\n self.native.connect('activate', self.gtk_activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def gtk_startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(\n lambda widget, data: self.exit(),\n 'Quit ' + self.interface.name,\n shortcut=toga.Key.MOD_1 + 'q',\n group=toga.Group.APP,\n section=sys.maxsize\n ),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n self._create_app_commands()\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def _create_app_commands(self):\n # No extra menus\n pass\n\n def gtk_activate(self, data=None):\n pass\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", gtk_menu_item_activate(cmd))\n\n cmd._impl.native.append(action)\n cmd._impl.set_enabled(cmd.enabled)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', gtk_accel(cmd.shortcut)))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def set_main_window(self, window):\n pass\n\n def exit(self):\n self.native.quit()\n\n def set_on_exit(self, value):\n pass\n\n def current_window(self):\n return self.native.get_active_window()._impl\n\n def enter_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(True)\n\n def exit_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(False)\n\n def show_cursor(self):\n self.interface.factory.not_implemented('App.show_cursor()')\n\n def hide_cursor(self):\n self.interface.factory.not_implemented('App.hide_cursor()')\n\n def add_background_task(self, handler):\n self.interface.factory.not_implemented('App.add_background_task()')\n\n\nclass DocumentApp(App):\n def _create_app_commands(self):\n self.interface.commands.add(\n toga.Command(\n self.open_file,\n label='Open...',\n shortcut=toga.Key.MOD_1 + 'o',\n group=toga.Group.FILE,\n section=0\n ),\n )\n\n def gtk_startup(self, data=None):\n super().gtk_startup(data=data)\n\n try:\n # Look for a filename specified on the command line\n file_name = os.path.abspath(sys.argv[1])\n except IndexError:\n # Nothing on the command line; open a file dialog instead.\n # TODO: This causes a blank window to be shown.\n # Is there a way to open a file dialog without having a window?\n m = toga.Window()\n file_name = m.select_folder_dialog(self.interface.name, None, False)[0]\n\n self.open_document(file_name)\n\n def open_file(self, widget, **kwargs):\n # TODO: This causes a blank window to be shown.\n # Is there a way to open a file dialog without having a window?\n m = toga.Window()\n file_name = m.select_folder_dialog(self.interface.name, None, False)[0]\n\n self.open_document(file_name)\n\n def open_document(self, fileURL):\n \"\"\"Open a new document in this app.\n\n Args:\n fileURL (str): The URL/path to the file to add as a document.\n \"\"\"\n # Convert the fileURL to a file path.\n fileURL = fileURL.rstrip('/')\n path = unquote(urlparse(fileURL).path)\n extension = os.path.splitext(path)[1][1:]\n\n # Create the document instance\n DocType = self.interface.document_types[extension]\n document = DocType(fileURL, self.interface)\n self.interface._documents.append(document)\n\n document.show()\n", "path": "src/gtk/toga_gtk/app.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport os.path\nimport signal\nimport sys\nfrom urllib.parse import unquote, urlparse\n\nimport gbulb\nimport toga\nfrom toga import App as toga_App\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command\n\nfrom .keys import gtk_accel\nfrom .libs import Gio, GLib, Gtk\nfrom .window import Window\n\n\ndef gtk_menu_item_activate(cmd):\n \"\"\"Convert a GTK menu item activation into a command invocation\"\"\"\n def _handler(action, data):\n cmd.action(cmd)\n return _handler\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def create(self):\n super().create()\n self.native.set_role(\"MainWindow\")\n self.native.set_icon(toga_App.app.icon._impl.native_72.get_pixbuf())\n\n def set_app(self, app):\n super().set_app(app)\n\n # The GTK docs list set_wmclass() as deprecated (and \"pointless\")\n # but it's the only way I've found that actually sets the\n # Application name to something other than '__main__.py'.\n self.native.set_wmclass(app.interface.name, app.interface.name)\n\n def on_close(self, *args):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n # Stimulate the build of the app\n self.native = Gtk.Application(\n application_id=self.interface.app_id,\n flags=Gio.ApplicationFlags.FLAGS_NONE\n )\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.gtk_startup)\n self.native.connect('activate', self.gtk_activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def gtk_startup(self, data=None):\n # Set up the default commands for the interface.\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.HELP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(\n lambda widget: self.exit(),\n 'Quit ' + self.interface.name,\n shortcut=toga.Key.MOD_1 + 'q',\n group=toga.Group.APP,\n section=sys.maxsize\n ),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n self._create_app_commands()\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n\n # Now that we have menus, make the app take responsibility for\n # showing the menubar.\n # This is required because of inconsistencies in how the Gnome\n # shell operates on different windowing environments;\n # see #872 for details.\n settings = Gtk.Settings.get_default()\n settings.set_property(\"gtk-shell-shows-menubar\", False)\n\n def _create_app_commands(self):\n # No extra menus\n pass\n\n def gtk_activate(self, data=None):\n pass\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n label = self.interface.name\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", gtk_menu_item_activate(cmd))\n\n cmd._impl.native.append(action)\n cmd._impl.set_enabled(cmd.enabled)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', gtk_accel(cmd.shortcut)))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n label = self.interface.name\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def set_main_window(self, window):\n pass\n\n def exit(self):\n self.native.quit()\n\n def set_on_exit(self, value):\n pass\n\n def current_window(self):\n return self.native.get_active_window()._impl\n\n def enter_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(True)\n\n def exit_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(False)\n\n def show_cursor(self):\n self.interface.factory.not_implemented('App.show_cursor()')\n\n def hide_cursor(self):\n self.interface.factory.not_implemented('App.hide_cursor()')\n\n def add_background_task(self, handler):\n self.interface.factory.not_implemented('App.add_background_task()')\n\n\nclass DocumentApp(App):\n def _create_app_commands(self):\n self.interface.commands.add(\n toga.Command(\n self.open_file,\n label='Open...',\n shortcut=toga.Key.MOD_1 + 'o',\n group=toga.Group.FILE,\n section=0\n ),\n )\n\n def gtk_startup(self, data=None):\n super().gtk_startup(data=data)\n\n try:\n # Look for a filename specified on the command line\n file_name = os.path.abspath(sys.argv[1])\n except IndexError:\n # Nothing on the command line; open a file dialog instead.\n # TODO: This causes a blank window to be shown.\n # Is there a way to open a file dialog without having a window?\n m = toga.Window()\n file_name = m.select_folder_dialog(self.interface.name, None, False)[0]\n\n self.open_document(file_name)\n\n def open_file(self, widget, **kwargs):\n # TODO: This causes a blank window to be shown.\n # Is there a way to open a file dialog without having a window?\n m = toga.Window()\n file_name = m.select_folder_dialog(self.interface.name, None, False)[0]\n\n self.open_document(file_name)\n\n def open_document(self, fileURL):\n \"\"\"Open a new document in this app.\n\n Args:\n fileURL (str): The URL/path to the file to add as a document.\n \"\"\"\n # Convert the fileURL to a file path.\n fileURL = fileURL.rstrip('/')\n path = unquote(urlparse(fileURL).path)\n extension = os.path.splitext(path)[1][1:]\n\n # Create the document instance\n DocType = self.interface.document_types[extension]\n document = DocType(fileURL, self.interface)\n self.interface._documents.append(document)\n\n document.show()\n", "path": "src/gtk/toga_gtk/app.py"}]}
3,538
531
gh_patches_debug_41922
rasdani/github-patches
git_diff
spack__spack-851
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- spack can't bootstrap from release tarball Spack release tarballs don't include `.git` in the top directory like a clone of the repo would. The bootstrap relies on this to bootrstrap a copy from github: ``` [jawestlu@master4-centos71 spack-0.8.17]$ ./bin/spack bootstrap /tmp/ ==> Error: command '/bin/git --git-dir=/mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/.git config --get remote.origin.url' returned error code 1 [jawestlu@master4-centos71 spack-0.8.17]$ ls -la /mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/ total 52 drwxr-xr-x 6 jawestlu jawestlu 4096 Jan 13 15:21 . drwxr-xr-x 14 jawestlu jawestlu 4096 Jan 13 15:16 .. -rw-r--r-- 1 jawestlu jawestlu 106 Mar 24 2015 .gitignore -rw-r--r-- 1 jawestlu jawestlu 20309 Mar 24 2015 LICENSE -rw-r--r-- 1 jawestlu jawestlu 2894 Mar 24 2015 README.md drwxr-xr-x 2 jawestlu jawestlu 4096 Mar 24 2015 bin drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 lib drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 share drwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 var ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/spack/spack/cmd/bootstrap.py` Content: ``` 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the LICENSE file for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 import os 26 from subprocess import check_call 27 28 import llnl.util.tty as tty 29 from llnl.util.filesystem import join_path, mkdirp 30 31 import spack 32 from spack.util.executable import which 33 34 description = "Create a new installation of spack in another prefix" 35 36 def setup_parser(subparser): 37 subparser.add_argument('prefix', help="names of prefix where we should install spack") 38 39 40 def get_origin_url(): 41 git_dir = join_path(spack.prefix, '.git') 42 git = which('git', required=True) 43 origin_url = git( 44 '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url', 45 output=str) 46 return origin_url.strip() 47 48 49 def bootstrap(parser, args): 50 origin_url = get_origin_url() 51 prefix = args.prefix 52 53 tty.msg("Fetching spack from origin: %s" % origin_url) 54 55 if os.path.isfile(prefix): 56 tty.die("There is already a file at %s" % prefix) 57 58 mkdirp(prefix) 59 60 if os.path.exists(join_path(prefix, '.git')): 61 tty.die("There already seems to be a git repository in %s" % prefix) 62 63 files_in_the_way = os.listdir(prefix) 64 if files_in_the_way: 65 tty.die("There are already files there! Delete these files before boostrapping spack.", 66 *files_in_the_way) 67 68 tty.msg("Installing:", 69 "%s/bin/spack" % prefix, 70 "%s/lib/spack/..." % prefix) 71 72 os.chdir(prefix) 73 git = which('git', required=True) 74 git('init', '--shared', '-q') 75 git('remote', 'add', 'origin', origin_url) 76 git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q') 77 git('reset', '--hard', 'origin/master', '-q') 78 79 tty.msg("Successfully created a new spack in %s" % prefix, 80 "Run %s/bin/spack to use this installation." % prefix) 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py --- a/lib/spack/spack/cmd/bootstrap.py +++ b/lib/spack/spack/cmd/bootstrap.py @@ -23,7 +23,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -from subprocess import check_call import llnl.util.tty as tty from llnl.util.filesystem import join_path, mkdirp @@ -31,26 +30,49 @@ import spack from spack.util.executable import which +_SPACK_UPSTREAM = 'https://github.com/llnl/spack' + description = "Create a new installation of spack in another prefix" + def setup_parser(subparser): - subparser.add_argument('prefix', help="names of prefix where we should install spack") + subparser.add_argument( + '-r', '--remote', action='store', dest='remote', + help="name of the remote to bootstrap from", default='origin') + subparser.add_argument( + 'prefix', + help="names of prefix where we should install spack") -def get_origin_url(): +def get_origin_info(remote): git_dir = join_path(spack.prefix, '.git') git = which('git', required=True) - origin_url = git( - '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url', - output=str) - return origin_url.strip() + try: + branch = git('symbolic-ref', '--short', 'HEAD', output=str) + except ProcessError: + branch = 'develop' + tty.warn('No branch found; using default branch: %s' % branch) + if remote == 'origin' and \ + branch not in ('master', 'develop'): + branch = 'develop' + tty.warn('Unknown branch found; using default branch: %s' % branch) + try: + origin_url = git( + '--git-dir=%s' % git_dir, + 'config', '--get', 'remote.%s.url' % remote, + output=str) + except ProcessError: + origin_url = _SPACK_UPSTREAM + tty.warn('No git repository found; ' + 'using default upstream URL: %s' % origin_url) + return (origin_url.strip(), branch.strip()) def bootstrap(parser, args): - origin_url = get_origin_url() + origin_url, branch = get_origin_info(args.remote) prefix = args.prefix - tty.msg("Fetching spack from origin: %s" % origin_url) + tty.msg("Fetching spack from '%s': %s" % (args.remote, origin_url)) if os.path.isfile(prefix): tty.die("There is already a file at %s" % prefix) @@ -62,7 +84,8 @@ files_in_the_way = os.listdir(prefix) if files_in_the_way: - tty.die("There are already files there! Delete these files before boostrapping spack.", + tty.die("There are already files there! " + "Delete these files before boostrapping spack.", *files_in_the_way) tty.msg("Installing:", @@ -73,8 +96,10 @@ git = which('git', required=True) git('init', '--shared', '-q') git('remote', 'add', 'origin', origin_url) - git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q') - git('reset', '--hard', 'origin/master', '-q') + git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch), + '-n', '-q') + git('reset', '--hard', 'origin/%s' % branch, '-q') + git('checkout', '-B', branch, 'origin/%s' % branch, '-q') tty.msg("Successfully created a new spack in %s" % prefix, "Run %s/bin/spack to use this installation." % prefix)
{"golden_diff": "diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py\n--- a/lib/spack/spack/cmd/bootstrap.py\n+++ b/lib/spack/spack/cmd/bootstrap.py\n@@ -23,7 +23,6 @@\n # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n ##############################################################################\n import os\n-from subprocess import check_call\n \n import llnl.util.tty as tty\n from llnl.util.filesystem import join_path, mkdirp\n@@ -31,26 +30,49 @@\n import spack\n from spack.util.executable import which\n \n+_SPACK_UPSTREAM = 'https://github.com/llnl/spack'\n+\n description = \"Create a new installation of spack in another prefix\"\n \n+\n def setup_parser(subparser):\n- subparser.add_argument('prefix', help=\"names of prefix where we should install spack\")\n+ subparser.add_argument(\n+ '-r', '--remote', action='store', dest='remote',\n+ help=\"name of the remote to bootstrap from\", default='origin')\n+ subparser.add_argument(\n+ 'prefix',\n+ help=\"names of prefix where we should install spack\")\n \n \n-def get_origin_url():\n+def get_origin_info(remote):\n git_dir = join_path(spack.prefix, '.git')\n git = which('git', required=True)\n- origin_url = git(\n- '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',\n- output=str)\n- return origin_url.strip()\n+ try:\n+ branch = git('symbolic-ref', '--short', 'HEAD', output=str)\n+ except ProcessError:\n+ branch = 'develop'\n+ tty.warn('No branch found; using default branch: %s' % branch)\n+ if remote == 'origin' and \\\n+ branch not in ('master', 'develop'):\n+ branch = 'develop'\n+ tty.warn('Unknown branch found; using default branch: %s' % branch)\n+ try:\n+ origin_url = git(\n+ '--git-dir=%s' % git_dir,\n+ 'config', '--get', 'remote.%s.url' % remote,\n+ output=str)\n+ except ProcessError:\n+ origin_url = _SPACK_UPSTREAM\n+ tty.warn('No git repository found; '\n+ 'using default upstream URL: %s' % origin_url)\n+ return (origin_url.strip(), branch.strip())\n \n \n def bootstrap(parser, args):\n- origin_url = get_origin_url()\n+ origin_url, branch = get_origin_info(args.remote)\n prefix = args.prefix\n \n- tty.msg(\"Fetching spack from origin: %s\" % origin_url)\n+ tty.msg(\"Fetching spack from '%s': %s\" % (args.remote, origin_url))\n \n if os.path.isfile(prefix):\n tty.die(\"There is already a file at %s\" % prefix)\n@@ -62,7 +84,8 @@\n \n files_in_the_way = os.listdir(prefix)\n if files_in_the_way:\n- tty.die(\"There are already files there! Delete these files before boostrapping spack.\",\n+ tty.die(\"There are already files there! \"\n+ \"Delete these files before boostrapping spack.\",\n *files_in_the_way)\n \n tty.msg(\"Installing:\",\n@@ -73,8 +96,10 @@\n git = which('git', required=True)\n git('init', '--shared', '-q')\n git('remote', 'add', 'origin', origin_url)\n- git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')\n- git('reset', '--hard', 'origin/master', '-q')\n+ git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),\n+ '-n', '-q')\n+ git('reset', '--hard', 'origin/%s' % branch, '-q')\n+ git('checkout', '-B', branch, 'origin/%s' % branch, '-q')\n \n tty.msg(\"Successfully created a new spack in %s\" % prefix,\n \"Run %s/bin/spack to use this installation.\" % prefix)\n", "issue": "spack can't bootstrap from release tarball\nSpack release tarballs don't include `.git` in the top directory like a clone of the repo would. The bootstrap relies on this to bootrstrap a copy from github:\n\n```\n[jawestlu@master4-centos71 spack-0.8.17]$ ./bin/spack bootstrap /tmp/\n==> Error: command '/bin/git --git-dir=/mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/.git config --get remote.origin.url' returned error code 1\n[jawestlu@master4-centos71 spack-0.8.17]$ ls -la /mnt/lustre/jawestlu/rpmbuild/BUILD/spack-0.8.17/\ntotal 52\ndrwxr-xr-x 6 jawestlu jawestlu 4096 Jan 13 15:21 .\ndrwxr-xr-x 14 jawestlu jawestlu 4096 Jan 13 15:16 ..\n-rw-r--r-- 1 jawestlu jawestlu 106 Mar 24 2015 .gitignore\n-rw-r--r-- 1 jawestlu jawestlu 20309 Mar 24 2015 LICENSE\n-rw-r--r-- 1 jawestlu jawestlu 2894 Mar 24 2015 README.md\ndrwxr-xr-x 2 jawestlu jawestlu 4096 Mar 24 2015 bin\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 lib\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 share\ndrwxr-xr-x 3 jawestlu jawestlu 4096 Mar 24 2015 var\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nimport os\nfrom subprocess import check_call\n\nimport llnl.util.tty as tty\nfrom llnl.util.filesystem import join_path, mkdirp\n\nimport spack\nfrom spack.util.executable import which\n\ndescription = \"Create a new installation of spack in another prefix\"\n\ndef setup_parser(subparser):\n subparser.add_argument('prefix', help=\"names of prefix where we should install spack\")\n\n\ndef get_origin_url():\n git_dir = join_path(spack.prefix, '.git')\n git = which('git', required=True)\n origin_url = git(\n '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',\n output=str)\n return origin_url.strip()\n\n\ndef bootstrap(parser, args):\n origin_url = get_origin_url()\n prefix = args.prefix\n\n tty.msg(\"Fetching spack from origin: %s\" % origin_url)\n\n if os.path.isfile(prefix):\n tty.die(\"There is already a file at %s\" % prefix)\n\n mkdirp(prefix)\n\n if os.path.exists(join_path(prefix, '.git')):\n tty.die(\"There already seems to be a git repository in %s\" % prefix)\n\n files_in_the_way = os.listdir(prefix)\n if files_in_the_way:\n tty.die(\"There are already files there! Delete these files before boostrapping spack.\",\n *files_in_the_way)\n\n tty.msg(\"Installing:\",\n \"%s/bin/spack\" % prefix,\n \"%s/lib/spack/...\" % prefix)\n\n os.chdir(prefix)\n git = which('git', required=True)\n git('init', '--shared', '-q')\n git('remote', 'add', 'origin', origin_url)\n git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')\n git('reset', '--hard', 'origin/master', '-q')\n\n tty.msg(\"Successfully created a new spack in %s\" % prefix,\n \"Run %s/bin/spack to use this installation.\" % prefix)\n", "path": "lib/spack/spack/cmd/bootstrap.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nimport os\n\nimport llnl.util.tty as tty\nfrom llnl.util.filesystem import join_path, mkdirp\n\nimport spack\nfrom spack.util.executable import which\n\n_SPACK_UPSTREAM = 'https://github.com/llnl/spack'\n\ndescription = \"Create a new installation of spack in another prefix\"\n\n\ndef setup_parser(subparser):\n subparser.add_argument(\n '-r', '--remote', action='store', dest='remote',\n help=\"name of the remote to bootstrap from\", default='origin')\n subparser.add_argument(\n 'prefix',\n help=\"names of prefix where we should install spack\")\n\n\ndef get_origin_info(remote):\n git_dir = join_path(spack.prefix, '.git')\n git = which('git', required=True)\n try:\n branch = git('symbolic-ref', '--short', 'HEAD', output=str)\n except ProcessError:\n branch = 'develop'\n tty.warn('No branch found; using default branch: %s' % branch)\n if remote == 'origin' and \\\n branch not in ('master', 'develop'):\n branch = 'develop'\n tty.warn('Unknown branch found; using default branch: %s' % branch)\n try:\n origin_url = git(\n '--git-dir=%s' % git_dir,\n 'config', '--get', 'remote.%s.url' % remote,\n output=str)\n except ProcessError:\n origin_url = _SPACK_UPSTREAM\n tty.warn('No git repository found; '\n 'using default upstream URL: %s' % origin_url)\n return (origin_url.strip(), branch.strip())\n\n\ndef bootstrap(parser, args):\n origin_url, branch = get_origin_info(args.remote)\n prefix = args.prefix\n\n tty.msg(\"Fetching spack from '%s': %s\" % (args.remote, origin_url))\n\n if os.path.isfile(prefix):\n tty.die(\"There is already a file at %s\" % prefix)\n\n mkdirp(prefix)\n\n if os.path.exists(join_path(prefix, '.git')):\n tty.die(\"There already seems to be a git repository in %s\" % prefix)\n\n files_in_the_way = os.listdir(prefix)\n if files_in_the_way:\n tty.die(\"There are already files there! \"\n \"Delete these files before boostrapping spack.\",\n *files_in_the_way)\n\n tty.msg(\"Installing:\",\n \"%s/bin/spack\" % prefix,\n \"%s/lib/spack/...\" % prefix)\n\n os.chdir(prefix)\n git = which('git', required=True)\n git('init', '--shared', '-q')\n git('remote', 'add', 'origin', origin_url)\n git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),\n '-n', '-q')\n git('reset', '--hard', 'origin/%s' % branch, '-q')\n git('checkout', '-B', branch, 'origin/%s' % branch, '-q')\n\n tty.msg(\"Successfully created a new spack in %s\" % prefix,\n \"Run %s/bin/spack to use this installation.\" % prefix)\n", "path": "lib/spack/spack/cmd/bootstrap.py"}]}
1,630
949
gh_patches_debug_22477
rasdani/github-patches
git_diff
spyder-ide__spyder-9672
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Relative paths in workspace.ini to make projects moveable ## Description of your problem **What steps will reproduce the problem?** 1. Add a new project `Projects > New project...` 2. Open some files. 3. Close Spyder. 4. Copy the whole project's directory to another computer to a different path (or have it synced with other means to different directories, e.g. `/home/mm/Software/Project` and `/home/MickeyMouse/Work/Software/Project` for your home and work computers). 5. Open the project on the other computer. **What is the expected output? What do you see instead?** I would like to see the same files opened automatically on both computers when opening the project, but the `.spyproject/workspace.ini` file stores the full paths in `[main]recent_files`. If the paths were stored relative to `.spyproject/..` (the project's base directory), it would be possible to move the project to a different location. **Please provide any additional information below** There seems to be two related bugs: #1133 and #1343. Maybe more? ## Versions and main components * Spyder Version: 3.2.3 * Python Version: 3.5.2 64bits * Qt Version: 5.5.1 * PyQt Version: 5.5.1 * Operating system: Linux Mint 18.2 Cinnamon 64-bit ## Dependencies Please go to the menu entry `Help > Optional Dependencies` (or `Help > Dependencies`), press the button `Copy to clipboard` and paste the contents below: IPython >=4.0 : 6.1.0 (OK) cython >=0.21 : None (NOK) jedi >=0.9.0 : 0.10.2 (OK) nbconvert >=4.0 : 5.3.1 (OK) numpy >=1.7 : 1.12.0 (OK) pandas >=0.13.1 : 0.19.2 (OK) psutil >=0.3 : 5.3.1 (OK) pycodestyle >=2.3: 2.3.1 (OK) pyflakes >=0.6.0 : 1.6.0 (OK) pygments >=2.0 : 2.2.0 (OK) pylint >=0.25 : 1.7.2 (OK) qtconsole >=4.2.0: 4.3.1 (OK) rope >=0.9.4 : 0.10.7 (OK) sphinx >=0.6.6 : 1.6.3 (OK) sympy >=0.7.3 : 1.0 (OK) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `spyder/plugins/projects/api.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # ----------------------------------------------------------------------------- 3 # Copyright © Spyder Project Contributors 4 # 5 # Licensed under the terms of the MIT License 6 # (see spyder/__init__.py for details) 7 # ----------------------------------------------------------------------------- 8 """Project API""" 9 10 import os 11 import os.path as osp 12 from collections import OrderedDict 13 14 from spyder.config.base import _ 15 from spyder.py3compat import to_text_string 16 from spyder.plugins.projects.utils.config import (ProjectConfig, CODESTYLE, 17 CODESTYLE_DEFAULTS, 18 CODESTYLE_VERSION, WORKSPACE, 19 WORKSPACE_DEFAULTS, 20 WORKSPACE_VERSION, 21 ENCODING, ENCODING_DEFAULTS, 22 ENCODING_VERSION, 23 VCS, VCS_DEFAULTS, VCS_VERSION) 24 25 26 class BaseProject(object): 27 """Spyder base project. 28 29 This base class must not be used directly, but inherited from. It does not 30 assume that python is specific to this project. 31 """ 32 PROJECT_FOLDER = '.spyproject' 33 PROJECT_TYPE_NAME = None 34 IGNORE_FILE = "" 35 CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE), 36 'defaults': WORKSPACE_DEFAULTS, 37 'version': WORKSPACE_VERSION}, 38 CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE), 39 'defaults': CODESTYLE_DEFAULTS, 40 'version': CODESTYLE_VERSION}, 41 ENCODING: {'filename': '{0}.ini'.format(ENCODING), 42 'defaults': ENCODING_DEFAULTS, 43 'version': ENCODING_VERSION}, 44 VCS: {'filename': '{0}.ini'.format(VCS), 45 'defaults': VCS_DEFAULTS, 46 'version': VCS_VERSION} 47 } 48 49 def __init__(self, root_path): 50 self.name = None 51 self.root_path = root_path 52 self.open_project_files = [] 53 self.open_non_project_files = [] 54 self.config_files = [] 55 self.CONF = {} 56 57 # Configuration files 58 59 self.related_projects = [] # storing project path, not project objects 60 # self.pythonpath = [] 61 self.opened = True 62 63 self.ioerror_flag = False 64 self.create_project_config_files() 65 66 # --- Helpers 67 # ------------------------------------------------------------------------- 68 def set_recent_files(self, recent_files): 69 """Set a list of files opened by the project.""" 70 for recent_file in recent_files[:]: 71 if not os.path.isfile(recent_file): 72 recent_files.remove(recent_file) 73 try: 74 self.CONF[WORKSPACE].set('main', 'recent_files', 75 list(OrderedDict.fromkeys(recent_files))) 76 except EnvironmentError: 77 pass 78 79 def get_recent_files(self): 80 """Return a list of files opened by the project.""" 81 try: 82 recent_files = self.CONF[WORKSPACE].get('main', 'recent_files', 83 default=[]) 84 except EnvironmentError: 85 return [] 86 87 for recent_file in recent_files[:]: 88 if not os.path.isfile(recent_file): 89 recent_files.remove(recent_file) 90 return list(OrderedDict.fromkeys(recent_files)) 91 92 def create_project_config_files(self): 93 """ """ 94 dic = self.CONFIG_SETUP 95 for key in dic: 96 name = key 97 filename = dic[key]['filename'] 98 defaults = dic[key]['defaults'] 99 version = dic[key]['version'] 100 self.CONF[key] = ProjectConfig(name, self.root_path, filename, 101 defaults=defaults, load=True, 102 version=version) 103 104 def get_conf_files(self): 105 """ """ 106 return self.CONF 107 108 def add_ignore_lines(self, lines): 109 """ """ 110 text = self.IGNORE_FILE 111 for line in lines: 112 text += line 113 self.IGNORE_FILE = text 114 115 def set_root_path(self, root_path): 116 """Set project root path.""" 117 if self.name is None: 118 self.name = osp.basename(root_path) 119 self.root_path = to_text_string(root_path) 120 config_path = self.__get_project_config_path() 121 if osp.exists(config_path): 122 self.load() 123 else: 124 if not osp.isdir(self.root_path): 125 os.mkdir(self.root_path) 126 self.save() 127 128 def rename(self, new_name): 129 """Rename project and rename its root path accordingly.""" 130 old_name = self.name 131 self.name = new_name 132 pypath = self.relative_pythonpath # ?? 133 self.root_path = self.root_path[:-len(old_name)]+new_name 134 self.relative_pythonpath = pypath # ?? 135 self.save() 136 137 def __get_project_config_folder(self): 138 """Return project configuration folder.""" 139 return osp.join(self.root_path, self.PROJECT_FOLDER) 140 141 def __get_project_config_path(self): 142 """Return project configuration path""" 143 return osp.join(self.root_path, self.CONFIG_NAME) 144 145 def load(self): 146 """Load project data""" 147 # fname = self.__get_project_config_path() 148 # try: 149 # # Old format (Spyder 2.0-2.1 for Python 2) 150 # with open(fname, 'U') as fdesc: 151 # data = pickle.loads(fdesc.read()) 152 # except (pickle.PickleError, TypeError, UnicodeDecodeError, 153 # AttributeError): 154 # try: 155 # # New format (Spyder >=2.2 for Python 2 and Python 3) 156 # with open(fname, 'rb') as fdesc: 157 # data = pickle.loads(fdesc.read()) 158 # except (IOError, OSError, pickle.PickleError): 159 # self.ioerror_flag = True 160 # return 161 # Compatibilty with old project explorer file format: 162 # if 'relative_pythonpath' not in data: 163 # print("Warning: converting old configuration file " 164 # "for project '%s'" % data['name'], file=STDERR) 165 # self.pythonpath = data['pythonpath'] 166 # data['relative_pythonpath'] = self.relative_pythonpath 167 # for attr in self.CONFIG_ATTR: 168 # setattr(self, attr, data[attr]) 169 # self.save() 170 171 def save(self): 172 """Save project data""" 173 # data = {} 174 # for attr in self.PROJECT_ATTR: 175 # data[attr] = getattr(self, attr) 176 # try: 177 # with open(self.__get_project_config_path(), 'wb') as fdesc: 178 # pickle.dump(data, fdesc, 2) 179 # except (IOError, OSError): 180 # self.ioerror_flag = True 181 182 # def delete(self): 183 # """Delete project""" 184 # os.remove(self.__get_project_config_path()) 185 # 186 # # --- Misc. 187 # def get_related_projects(self): 188 # """Return related projects path list""" 189 # return self.related_projects 190 # 191 # def set_related_projects(self, related_projects): 192 # """Set related projects""" 193 # self.related_projects = related_projects 194 # self.save() 195 # 196 # def open(self): 197 # """Open project""" 198 # self.opened = True 199 # self.save() 200 # 201 # def close(self): 202 # """Close project""" 203 # self.opened = False 204 # self.save() 205 # 206 # def is_opened(self): 207 # """Return True if project is opened""" 208 # return self.opened 209 # 210 # def is_file_in_project(self, fname): 211 # """Return True if file *fname* is in one of the project subfolders""" 212 # fixed_root = fixpath(self.root_path) 213 # return fixpath(fname) == fixed_root or\ 214 # fixpath(osp.dirname(fname)).startswith(fixed_root) 215 # 216 # def is_root_path(self, dirname): 217 # """Return True if dirname is project's root path""" 218 # return fixpath(dirname) == fixpath(self.root_path) 219 220 221 class EmptyProject(BaseProject): 222 """Empty Project""" 223 PROJECT_TYPE_NAME = _('Empty project') 224 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/spyder/plugins/projects/api.py b/spyder/plugins/projects/api.py --- a/spyder/plugins/projects/api.py +++ b/spyder/plugins/projects/api.py @@ -70,6 +70,8 @@ for recent_file in recent_files[:]: if not os.path.isfile(recent_file): recent_files.remove(recent_file) + recent_files = [os.path.relpath(recent_file, self.root_path) + for recent_file in recent_files] try: self.CONF[WORKSPACE].set('main', 'recent_files', list(OrderedDict.fromkeys(recent_files))) @@ -84,6 +86,9 @@ except EnvironmentError: return [] + recent_files = [recent_file if os.path.isabs(recent_file) + else os.path.join(self.root_path, recent_file) + for recent_file in recent_files] for recent_file in recent_files[:]: if not os.path.isfile(recent_file): recent_files.remove(recent_file)
{"golden_diff": "diff --git a/spyder/plugins/projects/api.py b/spyder/plugins/projects/api.py\n--- a/spyder/plugins/projects/api.py\n+++ b/spyder/plugins/projects/api.py\n@@ -70,6 +70,8 @@\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n+ recent_files = [os.path.relpath(recent_file, self.root_path)\r\n+ for recent_file in recent_files]\r\n try:\r\n self.CONF[WORKSPACE].set('main', 'recent_files',\r\n list(OrderedDict.fromkeys(recent_files)))\r\n@@ -84,6 +86,9 @@\n except EnvironmentError:\r\n return []\r\n \r\n+ recent_files = [recent_file if os.path.isabs(recent_file)\r\n+ else os.path.join(self.root_path, recent_file)\r\n+ for recent_file in recent_files]\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\n", "issue": "Relative paths in workspace.ini to make projects moveable\n## Description of your problem\r\n\r\n**What steps will reproduce the problem?**\r\n\r\n1. Add a new project `Projects > New project...`\r\n2. Open some files.\r\n3. Close Spyder.\r\n4. Copy the whole project's directory to another computer to a different path (or have it synced with other means to different directories, e.g. `/home/mm/Software/Project` and `/home/MickeyMouse/Work/Software/Project` for your home and work computers).\r\n5. Open the project on the other computer.\r\n\r\n**What is the expected output? What do you see instead?**\r\n\r\nI would like to see the same files opened automatically on both computers when opening the project, but the `.spyproject/workspace.ini` file stores the full paths in `[main]recent_files`. If the paths were stored relative to `.spyproject/..` (the project's base directory), it would be possible to move the project to a different location.\r\n\r\n**Please provide any additional information below**\r\n\r\nThere seems to be two related bugs: #1133 and #1343. Maybe more?\r\n\r\n## Versions and main components\r\n\r\n* Spyder Version: 3.2.3\r\n* Python Version: 3.5.2 64bits\r\n* Qt Version: 5.5.1\r\n* PyQt Version: 5.5.1\r\n* Operating system: Linux Mint 18.2 Cinnamon 64-bit\r\n\r\n\r\n## Dependencies\r\n\r\nPlease go to the menu entry `Help > Optional Dependencies` (or\r\n`Help > Dependencies`), press the button `Copy to clipboard`\r\nand paste the contents below:\r\n\r\nIPython >=4.0 : 6.1.0 (OK)\r\ncython >=0.21 : None (NOK)\r\njedi >=0.9.0 : 0.10.2 (OK)\r\nnbconvert >=4.0 : 5.3.1 (OK)\r\nnumpy >=1.7 : 1.12.0 (OK)\r\npandas >=0.13.1 : 0.19.2 (OK)\r\npsutil >=0.3 : 5.3.1 (OK)\r\npycodestyle >=2.3: 2.3.1 (OK)\r\npyflakes >=0.6.0 : 1.6.0 (OK)\r\npygments >=2.0 : 2.2.0 (OK)\r\npylint >=0.25 : 1.7.2 (OK)\r\nqtconsole >=4.2.0: 4.3.1 (OK)\r\nrope >=0.9.4 : 0.10.7 (OK)\r\nsphinx >=0.6.6 : 1.6.3 (OK)\r\nsympy >=0.7.3 : 1.0 (OK)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# Copyright \u00a9 Spyder Project Contributors\r\n#\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n# -----------------------------------------------------------------------------\r\n\"\"\"Project API\"\"\"\r\n\r\nimport os\r\nimport os.path as osp\r\nfrom collections import OrderedDict\r\n\r\nfrom spyder.config.base import _\r\nfrom spyder.py3compat import to_text_string\r\nfrom spyder.plugins.projects.utils.config import (ProjectConfig, CODESTYLE,\r\n CODESTYLE_DEFAULTS,\r\n CODESTYLE_VERSION, WORKSPACE,\r\n WORKSPACE_DEFAULTS,\r\n WORKSPACE_VERSION,\r\n ENCODING, ENCODING_DEFAULTS,\r\n ENCODING_VERSION,\r\n VCS, VCS_DEFAULTS, VCS_VERSION)\r\n\r\n\r\nclass BaseProject(object):\r\n \"\"\"Spyder base project.\r\n\r\n This base class must not be used directly, but inherited from. It does not\r\n assume that python is specific to this project.\r\n \"\"\"\r\n PROJECT_FOLDER = '.spyproject'\r\n PROJECT_TYPE_NAME = None\r\n IGNORE_FILE = \"\"\r\n CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE),\r\n 'defaults': WORKSPACE_DEFAULTS,\r\n 'version': WORKSPACE_VERSION},\r\n CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE),\r\n 'defaults': CODESTYLE_DEFAULTS,\r\n 'version': CODESTYLE_VERSION},\r\n ENCODING: {'filename': '{0}.ini'.format(ENCODING),\r\n 'defaults': ENCODING_DEFAULTS,\r\n 'version': ENCODING_VERSION},\r\n VCS: {'filename': '{0}.ini'.format(VCS),\r\n 'defaults': VCS_DEFAULTS,\r\n 'version': VCS_VERSION}\r\n }\r\n\r\n def __init__(self, root_path):\r\n self.name = None\r\n self.root_path = root_path\r\n self.open_project_files = []\r\n self.open_non_project_files = []\r\n self.config_files = []\r\n self.CONF = {}\r\n\r\n # Configuration files\r\n\r\n self.related_projects = [] # storing project path, not project objects\r\n# self.pythonpath = []\r\n self.opened = True\r\n\r\n self.ioerror_flag = False\r\n self.create_project_config_files()\r\n\r\n # --- Helpers\r\n # -------------------------------------------------------------------------\r\n def set_recent_files(self, recent_files):\r\n \"\"\"Set a list of files opened by the project.\"\"\"\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n try:\r\n self.CONF[WORKSPACE].set('main', 'recent_files',\r\n list(OrderedDict.fromkeys(recent_files)))\r\n except EnvironmentError:\r\n pass\r\n\r\n def get_recent_files(self):\r\n \"\"\"Return a list of files opened by the project.\"\"\"\r\n try:\r\n recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n default=[])\r\n except EnvironmentError:\r\n return []\r\n\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n return list(OrderedDict.fromkeys(recent_files))\r\n\r\n def create_project_config_files(self):\r\n \"\"\" \"\"\"\r\n dic = self.CONFIG_SETUP\r\n for key in dic:\r\n name = key\r\n filename = dic[key]['filename']\r\n defaults = dic[key]['defaults']\r\n version = dic[key]['version']\r\n self.CONF[key] = ProjectConfig(name, self.root_path, filename,\r\n defaults=defaults, load=True,\r\n version=version)\r\n\r\n def get_conf_files(self):\r\n \"\"\" \"\"\"\r\n return self.CONF\r\n\r\n def add_ignore_lines(self, lines):\r\n \"\"\" \"\"\"\r\n text = self.IGNORE_FILE\r\n for line in lines:\r\n text += line\r\n self.IGNORE_FILE = text\r\n\r\n def set_root_path(self, root_path):\r\n \"\"\"Set project root path.\"\"\"\r\n if self.name is None:\r\n self.name = osp.basename(root_path)\r\n self.root_path = to_text_string(root_path)\r\n config_path = self.__get_project_config_path()\r\n if osp.exists(config_path):\r\n self.load()\r\n else:\r\n if not osp.isdir(self.root_path):\r\n os.mkdir(self.root_path)\r\n self.save()\r\n\r\n def rename(self, new_name):\r\n \"\"\"Rename project and rename its root path accordingly.\"\"\"\r\n old_name = self.name\r\n self.name = new_name\r\n pypath = self.relative_pythonpath # ??\r\n self.root_path = self.root_path[:-len(old_name)]+new_name\r\n self.relative_pythonpath = pypath # ??\r\n self.save()\r\n\r\n def __get_project_config_folder(self):\r\n \"\"\"Return project configuration folder.\"\"\"\r\n return osp.join(self.root_path, self.PROJECT_FOLDER)\r\n\r\n def __get_project_config_path(self):\r\n \"\"\"Return project configuration path\"\"\"\r\n return osp.join(self.root_path, self.CONFIG_NAME)\r\n\r\n def load(self):\r\n \"\"\"Load project data\"\"\"\r\n# fname = self.__get_project_config_path()\r\n# try:\r\n# # Old format (Spyder 2.0-2.1 for Python 2)\r\n# with open(fname, 'U') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (pickle.PickleError, TypeError, UnicodeDecodeError,\r\n# AttributeError):\r\n# try:\r\n# # New format (Spyder >=2.2 for Python 2 and Python 3)\r\n# with open(fname, 'rb') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (IOError, OSError, pickle.PickleError):\r\n# self.ioerror_flag = True\r\n# return\r\n # Compatibilty with old project explorer file format:\r\n# if 'relative_pythonpath' not in data:\r\n# print(\"Warning: converting old configuration file \"\r\n# \"for project '%s'\" % data['name'], file=STDERR)\r\n# self.pythonpath = data['pythonpath']\r\n# data['relative_pythonpath'] = self.relative_pythonpath\r\n# for attr in self.CONFIG_ATTR:\r\n# setattr(self, attr, data[attr])\r\n# self.save()\r\n\r\n def save(self):\r\n \"\"\"Save project data\"\"\"\r\n# data = {}\r\n# for attr in self.PROJECT_ATTR:\r\n# data[attr] = getattr(self, attr)\r\n# try:\r\n# with open(self.__get_project_config_path(), 'wb') as fdesc:\r\n# pickle.dump(data, fdesc, 2)\r\n# except (IOError, OSError):\r\n# self.ioerror_flag = True\r\n\r\n# def delete(self):\r\n# \"\"\"Delete project\"\"\"\r\n# os.remove(self.__get_project_config_path())\r\n#\r\n# # --- Misc.\r\n# def get_related_projects(self):\r\n# \"\"\"Return related projects path list\"\"\"\r\n# return self.related_projects\r\n#\r\n# def set_related_projects(self, related_projects):\r\n# \"\"\"Set related projects\"\"\"\r\n# self.related_projects = related_projects\r\n# self.save()\r\n#\r\n# def open(self):\r\n# \"\"\"Open project\"\"\"\r\n# self.opened = True\r\n# self.save()\r\n#\r\n# def close(self):\r\n# \"\"\"Close project\"\"\"\r\n# self.opened = False\r\n# self.save()\r\n#\r\n# def is_opened(self):\r\n# \"\"\"Return True if project is opened\"\"\"\r\n# return self.opened\r\n#\r\n# def is_file_in_project(self, fname):\r\n# \"\"\"Return True if file *fname* is in one of the project subfolders\"\"\"\r\n# fixed_root = fixpath(self.root_path)\r\n# return fixpath(fname) == fixed_root or\\\r\n# fixpath(osp.dirname(fname)).startswith(fixed_root)\r\n#\r\n# def is_root_path(self, dirname):\r\n# \"\"\"Return True if dirname is project's root path\"\"\"\r\n# return fixpath(dirname) == fixpath(self.root_path)\r\n\r\n\r\nclass EmptyProject(BaseProject):\r\n \"\"\"Empty Project\"\"\"\r\n PROJECT_TYPE_NAME = _('Empty project')\r\n", "path": "spyder/plugins/projects/api.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# Copyright \u00a9 Spyder Project Contributors\r\n#\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n# -----------------------------------------------------------------------------\r\n\"\"\"Project API\"\"\"\r\n\r\nimport os\r\nimport os.path as osp\r\nfrom collections import OrderedDict\r\n\r\nfrom spyder.config.base import _\r\nfrom spyder.py3compat import to_text_string\r\nfrom spyder.plugins.projects.utils.config import (ProjectConfig, CODESTYLE,\r\n CODESTYLE_DEFAULTS,\r\n CODESTYLE_VERSION, WORKSPACE,\r\n WORKSPACE_DEFAULTS,\r\n WORKSPACE_VERSION,\r\n ENCODING, ENCODING_DEFAULTS,\r\n ENCODING_VERSION,\r\n VCS, VCS_DEFAULTS, VCS_VERSION)\r\n\r\n\r\nclass BaseProject(object):\r\n \"\"\"Spyder base project.\r\n\r\n This base class must not be used directly, but inherited from. It does not\r\n assume that python is specific to this project.\r\n \"\"\"\r\n PROJECT_FOLDER = '.spyproject'\r\n PROJECT_TYPE_NAME = None\r\n IGNORE_FILE = \"\"\r\n CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE),\r\n 'defaults': WORKSPACE_DEFAULTS,\r\n 'version': WORKSPACE_VERSION},\r\n CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE),\r\n 'defaults': CODESTYLE_DEFAULTS,\r\n 'version': CODESTYLE_VERSION},\r\n ENCODING: {'filename': '{0}.ini'.format(ENCODING),\r\n 'defaults': ENCODING_DEFAULTS,\r\n 'version': ENCODING_VERSION},\r\n VCS: {'filename': '{0}.ini'.format(VCS),\r\n 'defaults': VCS_DEFAULTS,\r\n 'version': VCS_VERSION}\r\n }\r\n\r\n def __init__(self, root_path):\r\n self.name = None\r\n self.root_path = root_path\r\n self.open_project_files = []\r\n self.open_non_project_files = []\r\n self.config_files = []\r\n self.CONF = {}\r\n\r\n # Configuration files\r\n\r\n self.related_projects = [] # storing project path, not project objects\r\n# self.pythonpath = []\r\n self.opened = True\r\n\r\n self.ioerror_flag = False\r\n self.create_project_config_files()\r\n\r\n # --- Helpers\r\n # -------------------------------------------------------------------------\r\n def set_recent_files(self, recent_files):\r\n \"\"\"Set a list of files opened by the project.\"\"\"\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n recent_files = [os.path.relpath(recent_file, self.root_path)\r\n for recent_file in recent_files]\r\n try:\r\n self.CONF[WORKSPACE].set('main', 'recent_files',\r\n list(OrderedDict.fromkeys(recent_files)))\r\n except EnvironmentError:\r\n pass\r\n\r\n def get_recent_files(self):\r\n \"\"\"Return a list of files opened by the project.\"\"\"\r\n try:\r\n recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n default=[])\r\n except EnvironmentError:\r\n return []\r\n\r\n recent_files = [recent_file if os.path.isabs(recent_file)\r\n else os.path.join(self.root_path, recent_file)\r\n for recent_file in recent_files]\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n return list(OrderedDict.fromkeys(recent_files))\r\n\r\n def create_project_config_files(self):\r\n \"\"\" \"\"\"\r\n dic = self.CONFIG_SETUP\r\n for key in dic:\r\n name = key\r\n filename = dic[key]['filename']\r\n defaults = dic[key]['defaults']\r\n version = dic[key]['version']\r\n self.CONF[key] = ProjectConfig(name, self.root_path, filename,\r\n defaults=defaults, load=True,\r\n version=version)\r\n\r\n def get_conf_files(self):\r\n \"\"\" \"\"\"\r\n return self.CONF\r\n\r\n def add_ignore_lines(self, lines):\r\n \"\"\" \"\"\"\r\n text = self.IGNORE_FILE\r\n for line in lines:\r\n text += line\r\n self.IGNORE_FILE = text\r\n\r\n def set_root_path(self, root_path):\r\n \"\"\"Set project root path.\"\"\"\r\n if self.name is None:\r\n self.name = osp.basename(root_path)\r\n self.root_path = to_text_string(root_path)\r\n config_path = self.__get_project_config_path()\r\n if osp.exists(config_path):\r\n self.load()\r\n else:\r\n if not osp.isdir(self.root_path):\r\n os.mkdir(self.root_path)\r\n self.save()\r\n\r\n def rename(self, new_name):\r\n \"\"\"Rename project and rename its root path accordingly.\"\"\"\r\n old_name = self.name\r\n self.name = new_name\r\n pypath = self.relative_pythonpath # ??\r\n self.root_path = self.root_path[:-len(old_name)]+new_name\r\n self.relative_pythonpath = pypath # ??\r\n self.save()\r\n\r\n def __get_project_config_folder(self):\r\n \"\"\"Return project configuration folder.\"\"\"\r\n return osp.join(self.root_path, self.PROJECT_FOLDER)\r\n\r\n def __get_project_config_path(self):\r\n \"\"\"Return project configuration path\"\"\"\r\n return osp.join(self.root_path, self.CONFIG_NAME)\r\n\r\n def load(self):\r\n \"\"\"Load project data\"\"\"\r\n# fname = self.__get_project_config_path()\r\n# try:\r\n# # Old format (Spyder 2.0-2.1 for Python 2)\r\n# with open(fname, 'U') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (pickle.PickleError, TypeError, UnicodeDecodeError,\r\n# AttributeError):\r\n# try:\r\n# # New format (Spyder >=2.2 for Python 2 and Python 3)\r\n# with open(fname, 'rb') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (IOError, OSError, pickle.PickleError):\r\n# self.ioerror_flag = True\r\n# return\r\n # Compatibilty with old project explorer file format:\r\n# if 'relative_pythonpath' not in data:\r\n# print(\"Warning: converting old configuration file \"\r\n# \"for project '%s'\" % data['name'], file=STDERR)\r\n# self.pythonpath = data['pythonpath']\r\n# data['relative_pythonpath'] = self.relative_pythonpath\r\n# for attr in self.CONFIG_ATTR:\r\n# setattr(self, attr, data[attr])\r\n# self.save()\r\n\r\n def save(self):\r\n \"\"\"Save project data\"\"\"\r\n# data = {}\r\n# for attr in self.PROJECT_ATTR:\r\n# data[attr] = getattr(self, attr)\r\n# try:\r\n# with open(self.__get_project_config_path(), 'wb') as fdesc:\r\n# pickle.dump(data, fdesc, 2)\r\n# except (IOError, OSError):\r\n# self.ioerror_flag = True\r\n\r\n# def delete(self):\r\n# \"\"\"Delete project\"\"\"\r\n# os.remove(self.__get_project_config_path())\r\n#\r\n# # --- Misc.\r\n# def get_related_projects(self):\r\n# \"\"\"Return related projects path list\"\"\"\r\n# return self.related_projects\r\n#\r\n# def set_related_projects(self, related_projects):\r\n# \"\"\"Set related projects\"\"\"\r\n# self.related_projects = related_projects\r\n# self.save()\r\n#\r\n# def open(self):\r\n# \"\"\"Open project\"\"\"\r\n# self.opened = True\r\n# self.save()\r\n#\r\n# def close(self):\r\n# \"\"\"Close project\"\"\"\r\n# self.opened = False\r\n# self.save()\r\n#\r\n# def is_opened(self):\r\n# \"\"\"Return True if project is opened\"\"\"\r\n# return self.opened\r\n#\r\n# def is_file_in_project(self, fname):\r\n# \"\"\"Return True if file *fname* is in one of the project subfolders\"\"\"\r\n# fixed_root = fixpath(self.root_path)\r\n# return fixpath(fname) == fixed_root or\\\r\n# fixpath(osp.dirname(fname)).startswith(fixed_root)\r\n#\r\n# def is_root_path(self, dirname):\r\n# \"\"\"Return True if dirname is project's root path\"\"\"\r\n# return fixpath(dirname) == fixpath(self.root_path)\r\n\r\n\r\nclass EmptyProject(BaseProject):\r\n \"\"\"Empty Project\"\"\"\r\n PROJECT_TYPE_NAME = _('Empty project')\r\n", "path": "spyder/plugins/projects/api.py"}]}
3,147
223
gh_patches_debug_55949
rasdani/github-patches
git_diff
saleor__saleor-4062
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The voucher form doesn't show selected products ### Steps to reproduce the problem 1. Open Voucher form. 2. Set Discount type to specific product. 3. Choose the products you want. 4. Create the voucher. 5. Open the created voucher from the list of vouchers. What happens is the products field goes blank. ### What I expected to happen The chosen products must show up when viewing the created voucher. **System information** Operating system: Ubuntu 16.04 Browser: Chrome. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/dashboard/discount/forms.py` Content: ``` 1 from django import forms 2 from django.conf import settings 3 from django.urls import reverse_lazy 4 from django.utils.translation import pgettext_lazy 5 from django_countries import countries 6 from django_prices.forms import MoneyField 7 from mptt.forms import TreeNodeMultipleChoiceField 8 9 from ...core.utils.taxes import ZERO_MONEY 10 from ...discount import DiscountValueType 11 from ...discount.models import Sale, Voucher 12 from ...discount.utils import generate_voucher_code 13 from ...product.models import Category, Product 14 from ..forms import AjaxSelect2MultipleChoiceField 15 16 MinAmountSpent = MoneyField( 17 min_value=ZERO_MONEY, required=False, 18 currency=settings.DEFAULT_CURRENCY, 19 label=pgettext_lazy( 20 'Lowest value for order to be able to use the voucher', 21 'Apply only if the purchase value is greater than or equal to')) 22 23 24 class SaleForm(forms.ModelForm): 25 products = AjaxSelect2MultipleChoiceField( 26 queryset=Product.objects.all(), 27 fetch_data_url=reverse_lazy('dashboard:ajax-products'), 28 required=False, 29 label=pgettext_lazy('Discounted products', 'Discounted products')) 30 31 class Meta: 32 model = Sale 33 exclude = [] 34 labels = { 35 'name': pgettext_lazy( 36 'Sale name', 37 'Name'), 38 'type': pgettext_lazy( 39 'Discount type', 40 'Fixed or percentage'), 41 'start_date': pgettext_lazy( 42 'Sale date restrictions', 43 'Start date'), 44 'end_date': pgettext_lazy( 45 'Sale date restrictions', 46 'End date'), 47 'value': pgettext_lazy( 48 'Percentage or fixed amount value', 49 'Value'), 50 'categories': pgettext_lazy( 51 'Discounted categories', 52 'Discounted categories'), 53 'collections': pgettext_lazy( 54 'Discounted collections', 55 'Discounted collections')} 56 57 def __init__(self, *args, **kwargs): 58 super().__init__(*args, **kwargs) 59 if self.instance.pk: 60 self.fields['products'].set_initial(self.instance.products.all()) 61 62 def clean(self): 63 cleaned_data = super().clean() 64 discount_type = cleaned_data['type'] 65 value = cleaned_data['value'] 66 if discount_type == DiscountValueType.PERCENTAGE and value > 100: 67 self.add_error('value', pgettext_lazy( 68 'Sale (discount) error', 69 'Sale cannot exceed 100%')) 70 products = cleaned_data.get('products') 71 categories = cleaned_data.get('categories') 72 collections = cleaned_data.get('collections') 73 if not any([products, categories, collections]): 74 raise forms.ValidationError(pgettext_lazy( 75 'Sale (discount) error', 76 'A single sale must point to at least one product, collection' 77 'and/or category.')) 78 return cleaned_data 79 80 81 class VoucherForm(forms.ModelForm): 82 83 class Meta: 84 model = Voucher 85 exclude = [ 86 'min_amount_spent', 'countries', 'products', 'collections', 87 'categories', 'used'] 88 labels = { 89 'type': pgettext_lazy( 90 'Discount type', 91 'Discount type'), 92 'name': pgettext_lazy( 93 'Item name', 94 'Name'), 95 'code': pgettext_lazy( 96 'Coupon code', 97 'Code'), 98 'usage_limit': pgettext_lazy( 99 'Usage limit', 100 'Usage limit'), 101 'start_date': pgettext_lazy( 102 'Voucher date restrictions', 103 'Start date'), 104 'end_date': pgettext_lazy( 105 'Voucher date restrictions', 106 'End date'), 107 'discount_value_type': pgettext_lazy( 108 'Discount type of the voucher', 109 'Discount type'), 110 'discount_value': pgettext_lazy( 111 'Discount value of the voucher', 112 'Discount value')} 113 114 def __init__(self, *args, **kwargs): 115 initial = kwargs.get('initial', {}) 116 instance = kwargs.get('instance') 117 if instance and instance.id is None and not initial.get('code'): 118 initial['code'] = generate_voucher_code() 119 kwargs['initial'] = initial 120 super().__init__(*args, **kwargs) 121 122 123 class ShippingVoucherForm(forms.ModelForm): 124 min_amount_spent = MinAmountSpent 125 countries = forms.MultipleChoiceField( 126 choices=countries, 127 required=False, 128 label=pgettext_lazy( 129 'Text above the dropdown of countries', 130 'Limit countries that voucher should apply to')) 131 132 class Meta: 133 model = Voucher 134 fields = ['countries', 'min_amount_spent'] 135 136 137 class ValueVoucherForm(forms.ModelForm): 138 min_amount_spent = MinAmountSpent 139 140 class Meta: 141 model = Voucher 142 fields = ['min_amount_spent'] 143 144 def save(self, commit=True): 145 self.instance.category = None 146 self.instance.countries = [] 147 self.instance.product = None 148 return super().save(commit) 149 150 151 class CommonVoucherForm(forms.ModelForm): 152 use_required_attribute = False 153 min_amount_spent = MinAmountSpent 154 apply_once_per_order = forms.BooleanField( 155 required=False, 156 label=pgettext_lazy( 157 'Field label, apply discount value only once per order', 158 'Only apply once per order'), 159 help_text=pgettext_lazy( 160 'Help text of checkbox for applying discount only once per order', 161 'If unchecked, discount value will be taken ' 162 'off each suitable item in an order.')) 163 164 165 class ProductVoucherForm(CommonVoucherForm): 166 products = AjaxSelect2MultipleChoiceField( 167 queryset=Product.objects.all(), 168 fetch_data_url=reverse_lazy('dashboard:ajax-products'), 169 required=True, 170 label=pgettext_lazy('Product', 'Products')) 171 172 class Meta: 173 model = Voucher 174 fields = ['products', 'apply_once_per_order'] 175 176 177 class CollectionVoucherForm(CommonVoucherForm): 178 179 class Meta: 180 model = Voucher 181 fields = ['collections', 'apply_once_per_order'] 182 labels = { 183 'collections': pgettext_lazy( 184 'Collections', 'Collections')} 185 186 def __init__(self, *args, **kwargs): 187 super().__init__(*args, **kwargs) 188 self.fields['collections'].required = True 189 190 191 class CategoryVoucherForm(CommonVoucherForm): 192 categories = TreeNodeMultipleChoiceField( 193 queryset=Category.objects.all(), 194 required=True, 195 label=pgettext_lazy('Categories', 'Categories')) 196 197 class Meta: 198 model = Voucher 199 fields = ['categories', 'apply_once_per_order'] 200 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/dashboard/discount/forms.py b/saleor/dashboard/discount/forms.py --- a/saleor/dashboard/discount/forms.py +++ b/saleor/dashboard/discount/forms.py @@ -173,6 +173,11 @@ model = Voucher fields = ['products', 'apply_once_per_order'] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.instance.pk: + self.fields['products'].set_initial(self.instance.products.all()) + class CollectionVoucherForm(CommonVoucherForm):
{"golden_diff": "diff --git a/saleor/dashboard/discount/forms.py b/saleor/dashboard/discount/forms.py\n--- a/saleor/dashboard/discount/forms.py\n+++ b/saleor/dashboard/discount/forms.py\n@@ -173,6 +173,11 @@\n model = Voucher\n fields = ['products', 'apply_once_per_order']\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ if self.instance.pk:\n+ self.fields['products'].set_initial(self.instance.products.all())\n+\n \n class CollectionVoucherForm(CommonVoucherForm):\n", "issue": "The voucher form doesn't show selected products\n### Steps to reproduce the problem\r\n1. Open Voucher form.\r\n2. Set Discount type to specific product.\r\n3. Choose the products you want.\r\n4. Create the voucher.\r\n5. Open the created voucher from the list of vouchers.\r\n\r\nWhat happens is the products field goes blank.\r\n\r\n### What I expected to happen\r\nThe chosen products must show up when viewing the created voucher.\r\n\r\n**System information**\r\nOperating system: Ubuntu 16.04\r\nBrowser: Chrome.\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import pgettext_lazy\nfrom django_countries import countries\nfrom django_prices.forms import MoneyField\nfrom mptt.forms import TreeNodeMultipleChoiceField\n\nfrom ...core.utils.taxes import ZERO_MONEY\nfrom ...discount import DiscountValueType\nfrom ...discount.models import Sale, Voucher\nfrom ...discount.utils import generate_voucher_code\nfrom ...product.models import Category, Product\nfrom ..forms import AjaxSelect2MultipleChoiceField\n\nMinAmountSpent = MoneyField(\n min_value=ZERO_MONEY, required=False,\n currency=settings.DEFAULT_CURRENCY,\n label=pgettext_lazy(\n 'Lowest value for order to be able to use the voucher',\n 'Apply only if the purchase value is greater than or equal to'))\n\n\nclass SaleForm(forms.ModelForm):\n products = AjaxSelect2MultipleChoiceField(\n queryset=Product.objects.all(),\n fetch_data_url=reverse_lazy('dashboard:ajax-products'),\n required=False,\n label=pgettext_lazy('Discounted products', 'Discounted products'))\n\n class Meta:\n model = Sale\n exclude = []\n labels = {\n 'name': pgettext_lazy(\n 'Sale name',\n 'Name'),\n 'type': pgettext_lazy(\n 'Discount type',\n 'Fixed or percentage'),\n 'start_date': pgettext_lazy(\n 'Sale date restrictions',\n 'Start date'),\n 'end_date': pgettext_lazy(\n 'Sale date restrictions',\n 'End date'),\n 'value': pgettext_lazy(\n 'Percentage or fixed amount value',\n 'Value'),\n 'categories': pgettext_lazy(\n 'Discounted categories',\n 'Discounted categories'),\n 'collections': pgettext_lazy(\n 'Discounted collections',\n 'Discounted collections')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.pk:\n self.fields['products'].set_initial(self.instance.products.all())\n\n def clean(self):\n cleaned_data = super().clean()\n discount_type = cleaned_data['type']\n value = cleaned_data['value']\n if discount_type == DiscountValueType.PERCENTAGE and value > 100:\n self.add_error('value', pgettext_lazy(\n 'Sale (discount) error',\n 'Sale cannot exceed 100%'))\n products = cleaned_data.get('products')\n categories = cleaned_data.get('categories')\n collections = cleaned_data.get('collections')\n if not any([products, categories, collections]):\n raise forms.ValidationError(pgettext_lazy(\n 'Sale (discount) error',\n 'A single sale must point to at least one product, collection'\n 'and/or category.'))\n return cleaned_data\n\n\nclass VoucherForm(forms.ModelForm):\n\n class Meta:\n model = Voucher\n exclude = [\n 'min_amount_spent', 'countries', 'products', 'collections',\n 'categories', 'used']\n labels = {\n 'type': pgettext_lazy(\n 'Discount type',\n 'Discount type'),\n 'name': pgettext_lazy(\n 'Item name',\n 'Name'),\n 'code': pgettext_lazy(\n 'Coupon code',\n 'Code'),\n 'usage_limit': pgettext_lazy(\n 'Usage limit',\n 'Usage limit'),\n 'start_date': pgettext_lazy(\n 'Voucher date restrictions',\n 'Start date'),\n 'end_date': pgettext_lazy(\n 'Voucher date restrictions',\n 'End date'),\n 'discount_value_type': pgettext_lazy(\n 'Discount type of the voucher',\n 'Discount type'),\n 'discount_value': pgettext_lazy(\n 'Discount value of the voucher',\n 'Discount value')}\n\n def __init__(self, *args, **kwargs):\n initial = kwargs.get('initial', {})\n instance = kwargs.get('instance')\n if instance and instance.id is None and not initial.get('code'):\n initial['code'] = generate_voucher_code()\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n\n\nclass ShippingVoucherForm(forms.ModelForm):\n min_amount_spent = MinAmountSpent\n countries = forms.MultipleChoiceField(\n choices=countries,\n required=False,\n label=pgettext_lazy(\n 'Text above the dropdown of countries',\n 'Limit countries that voucher should apply to'))\n\n class Meta:\n model = Voucher\n fields = ['countries', 'min_amount_spent']\n\n\nclass ValueVoucherForm(forms.ModelForm):\n min_amount_spent = MinAmountSpent\n\n class Meta:\n model = Voucher\n fields = ['min_amount_spent']\n\n def save(self, commit=True):\n self.instance.category = None\n self.instance.countries = []\n self.instance.product = None\n return super().save(commit)\n\n\nclass CommonVoucherForm(forms.ModelForm):\n use_required_attribute = False\n min_amount_spent = MinAmountSpent\n apply_once_per_order = forms.BooleanField(\n required=False,\n label=pgettext_lazy(\n 'Field label, apply discount value only once per order',\n 'Only apply once per order'),\n help_text=pgettext_lazy(\n 'Help text of checkbox for applying discount only once per order',\n 'If unchecked, discount value will be taken '\n 'off each suitable item in an order.'))\n\n\nclass ProductVoucherForm(CommonVoucherForm):\n products = AjaxSelect2MultipleChoiceField(\n queryset=Product.objects.all(),\n fetch_data_url=reverse_lazy('dashboard:ajax-products'),\n required=True,\n label=pgettext_lazy('Product', 'Products'))\n\n class Meta:\n model = Voucher\n fields = ['products', 'apply_once_per_order']\n\n\nclass CollectionVoucherForm(CommonVoucherForm):\n\n class Meta:\n model = Voucher\n fields = ['collections', 'apply_once_per_order']\n labels = {\n 'collections': pgettext_lazy(\n 'Collections', 'Collections')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['collections'].required = True\n\n\nclass CategoryVoucherForm(CommonVoucherForm):\n categories = TreeNodeMultipleChoiceField(\n queryset=Category.objects.all(),\n required=True,\n label=pgettext_lazy('Categories', 'Categories'))\n\n class Meta:\n model = Voucher\n fields = ['categories', 'apply_once_per_order']\n", "path": "saleor/dashboard/discount/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import pgettext_lazy\nfrom django_countries import countries\nfrom django_prices.forms import MoneyField\nfrom mptt.forms import TreeNodeMultipleChoiceField\n\nfrom ...core.utils.taxes import ZERO_MONEY\nfrom ...discount import DiscountValueType\nfrom ...discount.models import Sale, Voucher\nfrom ...discount.utils import generate_voucher_code\nfrom ...product.models import Category, Product\nfrom ..forms import AjaxSelect2MultipleChoiceField\n\nMinAmountSpent = MoneyField(\n min_value=ZERO_MONEY, required=False,\n currency=settings.DEFAULT_CURRENCY,\n label=pgettext_lazy(\n 'Lowest value for order to be able to use the voucher',\n 'Apply only if the purchase value is greater than or equal to'))\n\n\nclass SaleForm(forms.ModelForm):\n products = AjaxSelect2MultipleChoiceField(\n queryset=Product.objects.all(),\n fetch_data_url=reverse_lazy('dashboard:ajax-products'),\n required=False,\n label=pgettext_lazy('Discounted products', 'Discounted products'))\n\n class Meta:\n model = Sale\n exclude = []\n labels = {\n 'name': pgettext_lazy(\n 'Sale name',\n 'Name'),\n 'type': pgettext_lazy(\n 'Discount type',\n 'Fixed or percentage'),\n 'start_date': pgettext_lazy(\n 'Sale date restrictions',\n 'Start date'),\n 'end_date': pgettext_lazy(\n 'Sale date restrictions',\n 'End date'),\n 'value': pgettext_lazy(\n 'Percentage or fixed amount value',\n 'Value'),\n 'categories': pgettext_lazy(\n 'Discounted categories',\n 'Discounted categories'),\n 'collections': pgettext_lazy(\n 'Discounted collections',\n 'Discounted collections')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.pk:\n self.fields['products'].set_initial(self.instance.products.all())\n\n def clean(self):\n cleaned_data = super().clean()\n discount_type = cleaned_data['type']\n value = cleaned_data['value']\n if discount_type == DiscountValueType.PERCENTAGE and value > 100:\n self.add_error('value', pgettext_lazy(\n 'Sale (discount) error',\n 'Sale cannot exceed 100%'))\n products = cleaned_data.get('products')\n categories = cleaned_data.get('categories')\n collections = cleaned_data.get('collections')\n if not any([products, categories, collections]):\n raise forms.ValidationError(pgettext_lazy(\n 'Sale (discount) error',\n 'A single sale must point to at least one product, collection'\n 'and/or category.'))\n return cleaned_data\n\n\nclass VoucherForm(forms.ModelForm):\n\n class Meta:\n model = Voucher\n exclude = [\n 'min_amount_spent', 'countries', 'products', 'collections',\n 'categories', 'used']\n labels = {\n 'type': pgettext_lazy(\n 'Discount type',\n 'Discount type'),\n 'name': pgettext_lazy(\n 'Item name',\n 'Name'),\n 'code': pgettext_lazy(\n 'Coupon code',\n 'Code'),\n 'usage_limit': pgettext_lazy(\n 'Usage limit',\n 'Usage limit'),\n 'start_date': pgettext_lazy(\n 'Voucher date restrictions',\n 'Start date'),\n 'end_date': pgettext_lazy(\n 'Voucher date restrictions',\n 'End date'),\n 'discount_value_type': pgettext_lazy(\n 'Discount type of the voucher',\n 'Discount type'),\n 'discount_value': pgettext_lazy(\n 'Discount value of the voucher',\n 'Discount value')}\n\n def __init__(self, *args, **kwargs):\n initial = kwargs.get('initial', {})\n instance = kwargs.get('instance')\n if instance and instance.id is None and not initial.get('code'):\n initial['code'] = generate_voucher_code()\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n\n\nclass ShippingVoucherForm(forms.ModelForm):\n min_amount_spent = MinAmountSpent\n countries = forms.MultipleChoiceField(\n choices=countries,\n required=False,\n label=pgettext_lazy(\n 'Text above the dropdown of countries',\n 'Limit countries that voucher should apply to'))\n\n class Meta:\n model = Voucher\n fields = ['countries', 'min_amount_spent']\n\n\nclass ValueVoucherForm(forms.ModelForm):\n min_amount_spent = MinAmountSpent\n\n class Meta:\n model = Voucher\n fields = ['min_amount_spent']\n\n def save(self, commit=True):\n self.instance.category = None\n self.instance.countries = []\n self.instance.product = None\n return super().save(commit)\n\n\nclass CommonVoucherForm(forms.ModelForm):\n use_required_attribute = False\n min_amount_spent = MinAmountSpent\n apply_once_per_order = forms.BooleanField(\n required=False,\n label=pgettext_lazy(\n 'Field label, apply discount value only once per order',\n 'Only apply once per order'),\n help_text=pgettext_lazy(\n 'Help text of checkbox for applying discount only once per order',\n 'If unchecked, discount value will be taken '\n 'off each suitable item in an order.'))\n\n\nclass ProductVoucherForm(CommonVoucherForm):\n products = AjaxSelect2MultipleChoiceField(\n queryset=Product.objects.all(),\n fetch_data_url=reverse_lazy('dashboard:ajax-products'),\n required=True,\n label=pgettext_lazy('Product', 'Products'))\n\n class Meta:\n model = Voucher\n fields = ['products', 'apply_once_per_order']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.pk:\n self.fields['products'].set_initial(self.instance.products.all())\n\n\nclass CollectionVoucherForm(CommonVoucherForm):\n\n class Meta:\n model = Voucher\n fields = ['collections', 'apply_once_per_order']\n labels = {\n 'collections': pgettext_lazy(\n 'Collections', 'Collections')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['collections'].required = True\n\n\nclass CategoryVoucherForm(CommonVoucherForm):\n categories = TreeNodeMultipleChoiceField(\n queryset=Category.objects.all(),\n required=True,\n label=pgettext_lazy('Categories', 'Categories'))\n\n class Meta:\n model = Voucher\n fields = ['categories', 'apply_once_per_order']\n", "path": "saleor/dashboard/discount/forms.py"}]}
2,244
136
gh_patches_debug_13683
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2753
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- UnicodeDecodeError for invalid headers in ASGI middleware When the ASGI middleware receives headers with invalid encoding, for example as the result of somebody fuzzing an endpoint as is the case here, it introduces a Unicode decode error into the HTTP pipeline. It shouldn't be doing this. ### Which version of dd-trace-py are you using? 0.46.0 ### Which version of the libraries are you using? What I believe are the potentially relevant libraries are listed below. ``` datadog==0.40.1 ... ddtrace==0.46.0 ... fastapi==0.63.0 ... httpcore==0.12.3 httptools==0.1.1 ... starlette==0.13.6 ... uvicorn==0.13.4 uvloop==0.15.2 ``` ### How can we reproduce your problem? ```python from ddtrace.contrib.asgi.middleware import _extract_headers def test_ddtrace_invalid_header_name_encoding(): assert _extract_headers({"headers": [((bytes.fromhex("c0")), "test")]}) == {} def test_ddtrace_invalid_header_value_encoding(): assert _extract_headers({"headers": [("test", (bytes.fromhex("c0")))]}) == {} ``` ### What is the result that you get? The middleware blows up with a `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 0: invalid start byte` exception. ### What is the result that you expected? This is trace middleware so it should not introduce exceptions into request processing. In the case of a header it can't decode then I'd expect it to either: 1. Omit the invalid header from the result and continue attempting to decode the remainder, or 2. Substitute a sentinel value for the unprocessable header to indicate an error I've suggested (1) in the repro above, as that seems like it'd be the least likely to cause any problems. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/asgi/middleware.py` Content: ``` 1 import sys 2 3 import ddtrace 4 from ddtrace import config 5 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY 6 from ddtrace.ext import SpanTypes 7 from ddtrace.ext import http 8 9 from .. import trace_utils 10 from ...internal.compat import reraise 11 from ...internal.logger import get_logger 12 from .utils import guarantee_single_callable 13 14 15 log = get_logger(__name__) 16 17 config._add( 18 "asgi", 19 dict(service_name=config._get_service(default="asgi"), request_span_name="asgi.request", distributed_tracing=True), 20 ) 21 22 ASGI_VERSION = "asgi.version" 23 ASGI_SPEC_VERSION = "asgi.spec_version" 24 25 26 def bytes_to_str(str_or_bytes): 27 return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes 28 29 30 def _extract_versions_from_scope(scope, integration_config): 31 tags = {} 32 33 http_version = scope.get("http_version") 34 if http_version: 35 tags[http.VERSION] = http_version 36 37 scope_asgi = scope.get("asgi") 38 39 if scope_asgi and "version" in scope_asgi: 40 tags[ASGI_VERSION] = scope_asgi["version"] 41 42 if scope_asgi and "spec_version" in scope_asgi: 43 tags[ASGI_SPEC_VERSION] = scope_asgi["spec_version"] 44 45 return tags 46 47 48 def _extract_headers(scope): 49 headers = scope.get("headers") 50 if headers: 51 # headers: (Iterable[[byte string, byte string]]) 52 return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers) 53 return {} 54 55 56 def _default_handle_exception_span(exc, span): 57 """Default handler for exception for span""" 58 span.set_tag(http.STATUS_CODE, 500) 59 60 61 class TraceMiddleware: 62 """ 63 ASGI application middleware that traces the requests. 64 65 Args: 66 app: The ASGI application. 67 tracer: Custom tracer. Defaults to the global tracer. 68 """ 69 70 def __init__( 71 self, 72 app, 73 tracer=None, 74 integration_config=config.asgi, 75 handle_exception_span=_default_handle_exception_span, 76 span_modifier=None, 77 ): 78 self.app = guarantee_single_callable(app) 79 self.tracer = tracer or ddtrace.tracer 80 self.integration_config = integration_config 81 self.handle_exception_span = handle_exception_span 82 self.span_modifier = span_modifier 83 84 async def __call__(self, scope, receive, send): 85 if scope["type"] != "http": 86 return await self.app(scope, receive, send) 87 88 headers = _extract_headers(scope) 89 90 trace_utils.activate_distributed_headers( 91 self.tracer, int_config=self.integration_config, request_headers=headers 92 ) 93 94 resource = "{} {}".format(scope["method"], scope["path"]) 95 96 span = self.tracer.trace( 97 name=self.integration_config.get("request_span_name", "asgi.request"), 98 service=trace_utils.int_service(None, self.integration_config), 99 resource=resource, 100 span_type=SpanTypes.WEB, 101 ) 102 103 if self.span_modifier: 104 self.span_modifier(span, scope) 105 106 sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True) 107 if sample_rate is not None: 108 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) 109 110 method = scope.get("method") 111 server = scope.get("server") 112 if server and len(server) == 2: 113 port = server[1] 114 server_host = server[0] + (":" + str(port) if port is not None and port != 80 else "") 115 full_path = scope.get("root_path", "") + scope.get("path", "") 116 url = scope.get("scheme", "http") + "://" + server_host + full_path 117 else: 118 url = None 119 120 if self.integration_config.trace_query_string: 121 query_string = scope.get("query_string") 122 if len(query_string) > 0: 123 query_string = bytes_to_str(query_string) 124 else: 125 query_string = None 126 127 trace_utils.set_http_meta( 128 span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers 129 ) 130 131 tags = _extract_versions_from_scope(scope, self.integration_config) 132 span.set_tags(tags) 133 134 async def wrapped_send(message): 135 if span and message.get("type") == "http.response.start" and "status" in message: 136 status_code = message["status"] 137 else: 138 status_code = None 139 140 if "headers" in message: 141 response_headers = message["headers"] 142 else: 143 response_headers = None 144 145 trace_utils.set_http_meta( 146 span, self.integration_config, status_code=status_code, response_headers=response_headers 147 ) 148 149 return await send(message) 150 151 try: 152 return await self.app(scope, receive, wrapped_send) 153 except Exception as exc: 154 (exc_type, exc_val, exc_tb) = sys.exc_info() 155 span.set_exc_info(exc_type, exc_val, exc_tb) 156 self.handle_exception_span(exc, span) 157 reraise(exc_type, exc_val, exc_tb) 158 finally: 159 span.finish() 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py --- a/ddtrace/contrib/asgi/middleware.py +++ b/ddtrace/contrib/asgi/middleware.py @@ -85,11 +85,15 @@ if scope["type"] != "http": return await self.app(scope, receive, send) - headers = _extract_headers(scope) - - trace_utils.activate_distributed_headers( - self.tracer, int_config=self.integration_config, request_headers=headers - ) + try: + headers = _extract_headers(scope) + except Exception: + log.warning("failed to decode headers for distributed tracing", exc_info=True) + headers = {} + else: + trace_utils.activate_distributed_headers( + self.tracer, int_config=self.integration_config, request_headers=headers + ) resource = "{} {}".format(scope["method"], scope["path"])
{"golden_diff": "diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py\n--- a/ddtrace/contrib/asgi/middleware.py\n+++ b/ddtrace/contrib/asgi/middleware.py\n@@ -85,11 +85,15 @@\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n \n- headers = _extract_headers(scope)\n-\n- trace_utils.activate_distributed_headers(\n- self.tracer, int_config=self.integration_config, request_headers=headers\n- )\n+ try:\n+ headers = _extract_headers(scope)\n+ except Exception:\n+ log.warning(\"failed to decode headers for distributed tracing\", exc_info=True)\n+ headers = {}\n+ else:\n+ trace_utils.activate_distributed_headers(\n+ self.tracer, int_config=self.integration_config, request_headers=headers\n+ )\n \n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n", "issue": "UnicodeDecodeError for invalid headers in ASGI middleware\nWhen the ASGI middleware receives headers with invalid encoding, for example as the result of somebody fuzzing an endpoint as is the case here, it introduces a Unicode decode error into the HTTP pipeline. It shouldn't be doing this.\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n0.46.0\r\n\r\n### Which version of the libraries are you using?\r\n\r\nWhat I believe are the potentially relevant libraries are listed below.\r\n\r\n```\r\ndatadog==0.40.1\r\n...\r\nddtrace==0.46.0\r\n...\r\nfastapi==0.63.0\r\n...\r\nhttpcore==0.12.3\r\nhttptools==0.1.1\r\n...\r\nstarlette==0.13.6\r\n...\r\nuvicorn==0.13.4\r\nuvloop==0.15.2\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n```python\r\nfrom ddtrace.contrib.asgi.middleware import _extract_headers\r\n\r\n\r\ndef test_ddtrace_invalid_header_name_encoding():\r\n assert _extract_headers({\"headers\": [((bytes.fromhex(\"c0\")), \"test\")]}) == {}\r\n\r\n\r\ndef test_ddtrace_invalid_header_value_encoding():\r\n assert _extract_headers({\"headers\": [(\"test\", (bytes.fromhex(\"c0\")))]}) == {}\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe middleware blows up with a `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 0: invalid start byte` exception.\r\n\r\n### What is the result that you expected?\r\n\r\nThis is trace middleware so it should not introduce exceptions into request processing. In the case of a header it can't decode then I'd expect it to either:\r\n\r\n1. Omit the invalid header from the result and continue attempting to decode the remainder, or\r\n2. Substitute a sentinel value for the unprocessable header to indicate an error\r\n\r\nI've suggested (1) in the repro above, as that seems like it'd be the least likely to cause any problems.\n", "before_files": [{"content": "import sys\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import http\n\nfrom .. import trace_utils\nfrom ...internal.compat import reraise\nfrom ...internal.logger import get_logger\nfrom .utils import guarantee_single_callable\n\n\nlog = get_logger(__name__)\n\nconfig._add(\n \"asgi\",\n dict(service_name=config._get_service(default=\"asgi\"), request_span_name=\"asgi.request\", distributed_tracing=True),\n)\n\nASGI_VERSION = \"asgi.version\"\nASGI_SPEC_VERSION = \"asgi.spec_version\"\n\n\ndef bytes_to_str(str_or_bytes):\n return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes\n\n\ndef _extract_versions_from_scope(scope, integration_config):\n tags = {}\n\n http_version = scope.get(\"http_version\")\n if http_version:\n tags[http.VERSION] = http_version\n\n scope_asgi = scope.get(\"asgi\")\n\n if scope_asgi and \"version\" in scope_asgi:\n tags[ASGI_VERSION] = scope_asgi[\"version\"]\n\n if scope_asgi and \"spec_version\" in scope_asgi:\n tags[ASGI_SPEC_VERSION] = scope_asgi[\"spec_version\"]\n\n return tags\n\n\ndef _extract_headers(scope):\n headers = scope.get(\"headers\")\n if headers:\n # headers: (Iterable[[byte string, byte string]])\n return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)\n return {}\n\n\ndef _default_handle_exception_span(exc, span):\n \"\"\"Default handler for exception for span\"\"\"\n span.set_tag(http.STATUS_CODE, 500)\n\n\nclass TraceMiddleware:\n \"\"\"\n ASGI application middleware that traces the requests.\n\n Args:\n app: The ASGI application.\n tracer: Custom tracer. Defaults to the global tracer.\n \"\"\"\n\n def __init__(\n self,\n app,\n tracer=None,\n integration_config=config.asgi,\n handle_exception_span=_default_handle_exception_span,\n span_modifier=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = tracer or ddtrace.tracer\n self.integration_config = integration_config\n self.handle_exception_span = handle_exception_span\n self.span_modifier = span_modifier\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n\n headers = _extract_headers(scope)\n\n trace_utils.activate_distributed_headers(\n self.tracer, int_config=self.integration_config, request_headers=headers\n )\n\n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n\n span = self.tracer.trace(\n name=self.integration_config.get(\"request_span_name\", \"asgi.request\"),\n service=trace_utils.int_service(None, self.integration_config),\n resource=resource,\n span_type=SpanTypes.WEB,\n )\n\n if self.span_modifier:\n self.span_modifier(span, scope)\n\n sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = scope.get(\"method\")\n server = scope.get(\"server\")\n if server and len(server) == 2:\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port is not None and port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n else:\n url = None\n\n if self.integration_config.trace_query_string:\n query_string = scope.get(\"query_string\")\n if len(query_string) > 0:\n query_string = bytes_to_str(query_string)\n else:\n query_string = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers\n )\n\n tags = _extract_versions_from_scope(scope, self.integration_config)\n span.set_tags(tags)\n\n async def wrapped_send(message):\n if span and message.get(\"type\") == \"http.response.start\" and \"status\" in message:\n status_code = message[\"status\"]\n else:\n status_code = None\n\n if \"headers\" in message:\n response_headers = message[\"headers\"]\n else:\n response_headers = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n\n return await send(message)\n\n try:\n return await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n span.set_exc_info(exc_type, exc_val, exc_tb)\n self.handle_exception_span(exc, span)\n reraise(exc_type, exc_val, exc_tb)\n finally:\n span.finish()\n", "path": "ddtrace/contrib/asgi/middleware.py"}], "after_files": [{"content": "import sys\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import http\n\nfrom .. import trace_utils\nfrom ...internal.compat import reraise\nfrom ...internal.logger import get_logger\nfrom .utils import guarantee_single_callable\n\n\nlog = get_logger(__name__)\n\nconfig._add(\n \"asgi\",\n dict(service_name=config._get_service(default=\"asgi\"), request_span_name=\"asgi.request\", distributed_tracing=True),\n)\n\nASGI_VERSION = \"asgi.version\"\nASGI_SPEC_VERSION = \"asgi.spec_version\"\n\n\ndef bytes_to_str(str_or_bytes):\n return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes\n\n\ndef _extract_versions_from_scope(scope, integration_config):\n tags = {}\n\n http_version = scope.get(\"http_version\")\n if http_version:\n tags[http.VERSION] = http_version\n\n scope_asgi = scope.get(\"asgi\")\n\n if scope_asgi and \"version\" in scope_asgi:\n tags[ASGI_VERSION] = scope_asgi[\"version\"]\n\n if scope_asgi and \"spec_version\" in scope_asgi:\n tags[ASGI_SPEC_VERSION] = scope_asgi[\"spec_version\"]\n\n return tags\n\n\ndef _extract_headers(scope):\n headers = scope.get(\"headers\")\n if headers:\n # headers: (Iterable[[byte string, byte string]])\n return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)\n return {}\n\n\ndef _default_handle_exception_span(exc, span):\n \"\"\"Default handler for exception for span\"\"\"\n span.set_tag(http.STATUS_CODE, 500)\n\n\nclass TraceMiddleware:\n \"\"\"\n ASGI application middleware that traces the requests.\n\n Args:\n app: The ASGI application.\n tracer: Custom tracer. Defaults to the global tracer.\n \"\"\"\n\n def __init__(\n self,\n app,\n tracer=None,\n integration_config=config.asgi,\n handle_exception_span=_default_handle_exception_span,\n span_modifier=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = tracer or ddtrace.tracer\n self.integration_config = integration_config\n self.handle_exception_span = handle_exception_span\n self.span_modifier = span_modifier\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n\n try:\n headers = _extract_headers(scope)\n except Exception:\n log.warning(\"failed to decode headers for distributed tracing\", exc_info=True)\n headers = {}\n else:\n trace_utils.activate_distributed_headers(\n self.tracer, int_config=self.integration_config, request_headers=headers\n )\n\n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n\n span = self.tracer.trace(\n name=self.integration_config.get(\"request_span_name\", \"asgi.request\"),\n service=trace_utils.int_service(None, self.integration_config),\n resource=resource,\n span_type=SpanTypes.WEB,\n )\n\n if self.span_modifier:\n self.span_modifier(span, scope)\n\n sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = scope.get(\"method\")\n server = scope.get(\"server\")\n if server and len(server) == 2:\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port is not None and port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n else:\n url = None\n\n if self.integration_config.trace_query_string:\n query_string = scope.get(\"query_string\")\n if len(query_string) > 0:\n query_string = bytes_to_str(query_string)\n else:\n query_string = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers\n )\n\n tags = _extract_versions_from_scope(scope, self.integration_config)\n span.set_tags(tags)\n\n async def wrapped_send(message):\n if span and message.get(\"type\") == \"http.response.start\" and \"status\" in message:\n status_code = message[\"status\"]\n else:\n status_code = None\n\n if \"headers\" in message:\n response_headers = message[\"headers\"]\n else:\n response_headers = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n\n return await send(message)\n\n try:\n return await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n span.set_exc_info(exc_type, exc_val, exc_tb)\n self.handle_exception_span(exc, span)\n reraise(exc_type, exc_val, exc_tb)\n finally:\n span.finish()\n", "path": "ddtrace/contrib/asgi/middleware.py"}]}
2,179
209
gh_patches_debug_15365
rasdani/github-patches
git_diff
uclapi__uclapi-1028
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Medium Articles Bug Bug in getting medium articles on the homepage --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/uclapi/dashboard/app_helpers.py` Content: ``` 1 from binascii import hexlify 2 from random import SystemRandom 3 4 from common.helpers import generate_api_token 5 from uclapi.settings import ( 6 MEDIUM_ARTICLE_QUANTITY, 7 REDIS_UCLAPI_HOST 8 ) 9 10 import os 11 import redis 12 import textwrap 13 import validators 14 15 16 def get_articles(): 17 r = redis.Redis(host=REDIS_UCLAPI_HOST) 18 pipe = r.pipeline() 19 articles = [] 20 for i in range(0, MEDIUM_ARTICLE_QUANTITY): 21 articles.append({}) 22 redis_key_url = "Blog:item:{}:url".format(i) 23 redis_key_title = "Blog:item:{}:title".format(i) 24 pipe.get(redis_key_url) 25 pipe.get(redis_key_title) 26 redis_response = pipe.execute() 27 for i in range(0, MEDIUM_ARTICLE_QUANTITY): 28 articles[i]['url'] = redis_response[i*2].decode("utf-8") 29 articles[i]['title'] = redis_response[i*2+1].decode("utf-8") 30 return articles 31 32 33 def generate_temp_api_token(): 34 return generate_api_token("temp") 35 36 37 def get_temp_token(): 38 r = redis.Redis(host=REDIS_UCLAPI_HOST) 39 40 token = generate_temp_api_token() 41 # We initialise a new temporary token and set it to 1 42 # as it is generated at its first usage. 43 r.set(token, 1, 600) 44 return token 45 46 47 def generate_app_id(): 48 key = hexlify(os.urandom(5)).decode() 49 final = "A" + key 50 51 return final 52 53 54 def generate_app_client_id(): 55 sr = SystemRandom() 56 57 client_id = '{}.{}'.format( 58 ''.join(str(sr.randint(0, 9)) for _ in range(16)), 59 ''.join(str(sr.randint(0, 9)) for _ in range(16)) 60 ) 61 62 return client_id 63 64 65 def generate_app_client_secret(): 66 client_secret = hexlify(os.urandom(32)).decode() 67 return client_secret 68 69 70 def is_url_safe(url): 71 if not url.startswith("https://"): 72 return False 73 74 if not validators.url(url, public=True): 75 return False 76 77 whitelist_urls = os.environ["WHITELISTED_CALLBACK_URLS"].split(';') 78 if url in whitelist_urls: 79 return True 80 81 forbidden_urls = os.environ["FORBIDDEN_CALLBACK_URLS"].split(';') 82 for furl in forbidden_urls: 83 if furl in url: 84 return False 85 86 return True 87 88 89 def generate_secret(): 90 key = hexlify(os.urandom(30)).decode() 91 dashed = '-'.join(textwrap.wrap(key, 15)) 92 93 return dashed 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py --- a/backend/uclapi/dashboard/app_helpers.py +++ b/backend/uclapi/dashboard/app_helpers.py @@ -4,9 +4,10 @@ from common.helpers import generate_api_token from uclapi.settings import ( MEDIUM_ARTICLE_QUANTITY, - REDIS_UCLAPI_HOST + REDIS_UCLAPI_HOST, + DEBUG ) - +from django.core.management import call_command import os import redis import textwrap @@ -15,6 +16,11 @@ def get_articles(): r = redis.Redis(host=REDIS_UCLAPI_HOST) + if not r.exists("Blog:item:1:url"): + if DEBUG: + call_command('update_medium') + else: + return [] pipe = r.pipeline() articles = [] for i in range(0, MEDIUM_ARTICLE_QUANTITY):
{"golden_diff": "diff --git a/backend/uclapi/dashboard/app_helpers.py b/backend/uclapi/dashboard/app_helpers.py\n--- a/backend/uclapi/dashboard/app_helpers.py\n+++ b/backend/uclapi/dashboard/app_helpers.py\n@@ -4,9 +4,10 @@\n from common.helpers import generate_api_token\n from uclapi.settings import (\n MEDIUM_ARTICLE_QUANTITY,\n- REDIS_UCLAPI_HOST\n+ REDIS_UCLAPI_HOST,\n+ DEBUG\n )\n-\n+from django.core.management import call_command\n import os\n import redis\n import textwrap\n@@ -15,6 +16,11 @@\n \n def get_articles():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n+ if not r.exists(\"Blog:item:1:url\"):\n+ if DEBUG:\n+ call_command('update_medium')\n+ else:\n+ return []\n pipe = r.pipeline()\n articles = []\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n", "issue": "Medium Articles Bug\nBug in getting medium articles on the homepage\n", "before_files": [{"content": "from binascii import hexlify\nfrom random import SystemRandom\n\nfrom common.helpers import generate_api_token\nfrom uclapi.settings import (\n MEDIUM_ARTICLE_QUANTITY,\n REDIS_UCLAPI_HOST\n)\n\nimport os\nimport redis\nimport textwrap\nimport validators\n\n\ndef get_articles():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n pipe = r.pipeline()\n articles = []\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles.append({})\n redis_key_url = \"Blog:item:{}:url\".format(i)\n redis_key_title = \"Blog:item:{}:title\".format(i)\n pipe.get(redis_key_url)\n pipe.get(redis_key_title)\n redis_response = pipe.execute()\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles[i]['url'] = redis_response[i*2].decode(\"utf-8\")\n articles[i]['title'] = redis_response[i*2+1].decode(\"utf-8\")\n return articles\n\n\ndef generate_temp_api_token():\n return generate_api_token(\"temp\")\n\n\ndef get_temp_token():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n\n token = generate_temp_api_token()\n # We initialise a new temporary token and set it to 1\n # as it is generated at its first usage.\n r.set(token, 1, 600)\n return token\n\n\ndef generate_app_id():\n key = hexlify(os.urandom(5)).decode()\n final = \"A\" + key\n\n return final\n\n\ndef generate_app_client_id():\n sr = SystemRandom()\n\n client_id = '{}.{}'.format(\n ''.join(str(sr.randint(0, 9)) for _ in range(16)),\n ''.join(str(sr.randint(0, 9)) for _ in range(16))\n )\n\n return client_id\n\n\ndef generate_app_client_secret():\n client_secret = hexlify(os.urandom(32)).decode()\n return client_secret\n\n\ndef is_url_safe(url):\n if not url.startswith(\"https://\"):\n return False\n\n if not validators.url(url, public=True):\n return False\n\n whitelist_urls = os.environ[\"WHITELISTED_CALLBACK_URLS\"].split(';')\n if url in whitelist_urls:\n return True\n\n forbidden_urls = os.environ[\"FORBIDDEN_CALLBACK_URLS\"].split(';')\n for furl in forbidden_urls:\n if furl in url:\n return False\n\n return True\n\n\ndef generate_secret():\n key = hexlify(os.urandom(30)).decode()\n dashed = '-'.join(textwrap.wrap(key, 15))\n\n return dashed\n", "path": "backend/uclapi/dashboard/app_helpers.py"}], "after_files": [{"content": "from binascii import hexlify\nfrom random import SystemRandom\n\nfrom common.helpers import generate_api_token\nfrom uclapi.settings import (\n MEDIUM_ARTICLE_QUANTITY,\n REDIS_UCLAPI_HOST,\n DEBUG\n)\nfrom django.core.management import call_command\nimport os\nimport redis\nimport textwrap\nimport validators\n\n\ndef get_articles():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n if not r.exists(\"Blog:item:1:url\"):\n if DEBUG:\n call_command('update_medium')\n else:\n return []\n pipe = r.pipeline()\n articles = []\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles.append({})\n redis_key_url = \"Blog:item:{}:url\".format(i)\n redis_key_title = \"Blog:item:{}:title\".format(i)\n pipe.get(redis_key_url)\n pipe.get(redis_key_title)\n redis_response = pipe.execute()\n for i in range(0, MEDIUM_ARTICLE_QUANTITY):\n articles[i]['url'] = redis_response[i*2].decode(\"utf-8\")\n articles[i]['title'] = redis_response[i*2+1].decode(\"utf-8\")\n return articles\n\n\ndef generate_temp_api_token():\n return generate_api_token(\"temp\")\n\n\ndef get_temp_token():\n r = redis.Redis(host=REDIS_UCLAPI_HOST)\n\n token = generate_temp_api_token()\n # We initialise a new temporary token and set it to 1\n # as it is generated at its first usage.\n r.set(token, 1, 600)\n return token\n\n\ndef generate_app_id():\n key = hexlify(os.urandom(5)).decode()\n final = \"A\" + key\n\n return final\n\n\ndef generate_app_client_id():\n sr = SystemRandom()\n\n client_id = '{}.{}'.format(\n ''.join(str(sr.randint(0, 9)) for _ in range(16)),\n ''.join(str(sr.randint(0, 9)) for _ in range(16))\n )\n\n return client_id\n\n\ndef generate_app_client_secret():\n client_secret = hexlify(os.urandom(32)).decode()\n return client_secret\n\n\ndef is_url_safe(url):\n if not url.startswith(\"https://\"):\n return False\n\n if not validators.url(url, public=True):\n return False\n\n whitelist_urls = os.environ[\"WHITELISTED_CALLBACK_URLS\"].split(';')\n if url in whitelist_urls:\n return True\n\n forbidden_urls = os.environ[\"FORBIDDEN_CALLBACK_URLS\"].split(';')\n for furl in forbidden_urls:\n if furl in url:\n return False\n\n return True\n\n\ndef generate_secret():\n key = hexlify(os.urandom(30)).decode()\n dashed = '-'.join(textwrap.wrap(key, 15))\n\n return dashed\n", "path": "backend/uclapi/dashboard/app_helpers.py"}]}
1,043
211
gh_patches_debug_23493
rasdani/github-patches
git_diff
ansible__ansible-modules-extras-2776
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- vmware_dvs_portgroup destroy fails <!--- Verify first that your issue/request is not already reported in GitHub --> ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bug Report ##### COMPONENT NAME <!--- Name of the plugin/module/task --> cloud/vmware/vmware_dvs_portgroup.py ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.1.1.0 (detached HEAD 780c363482) last updated 2016/08/19 17:46:34 (GMT -400) lib/ansible/modules/core: (detached HEAD 91a839f1e3) last updated 2016/08/19 17:46:11 (GMT -400) lib/ansible/modules/extras: (detached HEAD 1aeb9f8a8c) last updated 2016/08/19 17:46:13 (GMT -400) config file = configured module search path = Default w/o overrides ``` ##### CONFIGURATION <!--- Mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> ##### OS / ENVIRONMENT <!--- Mention the OS you are running Ansible from, and the OS you are managing, or say “N/A” for anything that is not platform-specific. --> N/A ##### SUMMARY <!--- Explain the problem briefly --> The function definition for state_destroy_dvspg(self) contains the line task = dvs_portgroup.Destroy_Task() This should be task = self.dvs_portgroup.Destroy_Task() ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem. For new features, show how the feature would be used. --> Issue a playbook task to remove a vSphere portgroup by setting the state property to absent. <!--- Paste example playbooks or commands between quotes below --> ``` - name: Remove vSphere Portgroups vmware_dvs_portgroup: hostname: "{{ VSPHERE_HOST }}" username: "{{ VSPHERE_LOGIN }}" password: "{{ VSPHERE_PASSWORD }}" portgroup_name: "{{ TESTBED_NAME }}_{{ item }}" switch_name: "{{ VDS }}" vlan_id: "{{ item }}" num_ports: 10 portgroup_type: earlyBinding state: absent validate_certs: False with_items: "{{ test_vlans }}" ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> Expected portgroup entry to be removed from vSphere networking. ##### ACTUAL RESULTS <!--- What actually happened? If possible run with high verbosity (-vvvv) --> Task fails with an error that 'dvs_portgroup' is not defined. <!--- Paste verbatim command output between quotes below --> ``` TASK [teardown : Remove vSphere Portgroup] ************************************* task path: /var/jenkins/workspace/Build_CVP_Testbed/lab-automation/roles/teardown/tasks/main.yml:46 <10.81.110.96> ESTABLISH SSH CONNECTION FOR USER: root <10.81.110.96> SSH: EXEC sshpass -d12 ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r 10.81.110.96 '/bin/sh -c '"'"'( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082 `" && echo ansible-tmp-1471642781.59-166008742553082="` echo $HOME/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082 `" ) && sleep 0'"'"'' <10.81.110.96> PUT /tmp/tmpzAGbLy TO /root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/vmware_dvs_portgroup <10.81.110.96> SSH: EXEC sshpass -d12 sftp -o BatchMode=no -b - -C -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r '[10.81.110.96]' <10.81.110.96> ESTABLISH SSH CONNECTION FOR USER: root <10.81.110.96> SSH: EXEC sshpass -d12 ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r -tt 10.81.110.96 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/vmware_dvs_portgroup; rm -rf "/root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/" > /dev/null 2>&1 && sleep 0'"'"'' failed: [10.81.110.96] (item=502) => {"failed": true, "invocation": {"module_args": {"hostname": "10.81.110.87", "num_ports": 10, "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "portgroup_name": "garaaa_502", "portgroup_type": "earlyBinding", "state": "absent", "switch_name": "EOS+vDS", "username": "[email protected]", "validate_certs": false, "vlan_id": 502}, "module_name": "vmware_dvs_portgroup"}, "item": 502, "msg": "global name 'dvs_portgroup' is not defined"} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cloud/vmware/vmware_dvs_portgroup.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # (c) 2015, Joseph Callen <jcallen () csc.com> 5 # 6 # This file is part of Ansible 7 # 8 # Ansible is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # Ansible is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 20 21 DOCUMENTATION = ''' 22 --- 23 module: vmware_dvs_portgroup 24 short_description: Create or remove a Distributed vSwitch portgroup 25 description: 26 - Create or remove a Distributed vSwitch portgroup 27 version_added: 2.0 28 author: "Joseph Callen (@jcpowermac)" 29 notes: 30 - Tested on vSphere 5.5 31 requirements: 32 - "python >= 2.6" 33 - PyVmomi 34 options: 35 portgroup_name: 36 description: 37 - The name of the portgroup that is to be created or deleted 38 required: True 39 switch_name: 40 description: 41 - The name of the distributed vSwitch the port group should be created on. 42 required: True 43 vlan_id: 44 description: 45 - The VLAN ID that should be configured with the portgroup 46 required: True 47 num_ports: 48 description: 49 - The number of ports the portgroup should contain 50 required: True 51 portgroup_type: 52 description: 53 - See VMware KB 1022312 regarding portgroup types 54 required: True 55 choices: 56 - 'earlyBinding' 57 - 'lateBinding' 58 - 'ephemeral' 59 extends_documentation_fragment: vmware.documentation 60 ''' 61 62 EXAMPLES = ''' 63 - name: Create Management portgroup 64 local_action: 65 module: vmware_dvs_portgroup 66 hostname: vcenter_ip_or_hostname 67 username: vcenter_username 68 password: vcenter_password 69 portgroup_name: Management 70 switch_name: dvSwitch 71 vlan_id: 123 72 num_ports: 120 73 portgroup_type: earlyBinding 74 state: present 75 ''' 76 77 try: 78 from pyVmomi import vim, vmodl 79 HAS_PYVMOMI = True 80 except ImportError: 81 HAS_PYVMOMI = False 82 83 84 class VMwareDvsPortgroup(object): 85 def __init__(self, module): 86 self.module = module 87 self.dvs_portgroup = None 88 self.switch_name = self.module.params['switch_name'] 89 self.portgroup_name = self.module.params['portgroup_name'] 90 self.vlan_id = self.module.params['vlan_id'] 91 self.num_ports = self.module.params['num_ports'] 92 self.portgroup_type = self.module.params['portgroup_type'] 93 self.dv_switch = None 94 self.state = self.module.params['state'] 95 self.content = connect_to_api(module) 96 97 def process_state(self): 98 try: 99 dvspg_states = { 100 'absent': { 101 'present': self.state_destroy_dvspg, 102 'absent': self.state_exit_unchanged, 103 }, 104 'present': { 105 'update': self.state_update_dvspg, 106 'present': self.state_exit_unchanged, 107 'absent': self.state_create_dvspg, 108 } 109 } 110 dvspg_states[self.state][self.check_dvspg_state()]() 111 except vmodl.RuntimeFault as runtime_fault: 112 self.module.fail_json(msg=runtime_fault.msg) 113 except vmodl.MethodFault as method_fault: 114 self.module.fail_json(msg=method_fault.msg) 115 except Exception as e: 116 self.module.fail_json(msg=str(e)) 117 118 def create_port_group(self): 119 config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() 120 121 config.name = self.portgroup_name 122 config.numPorts = self.num_ports 123 124 # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation 125 # but this is the correct managed object type. 126 127 config.defaultPortConfig = vim.VMwareDVSPortSetting() 128 129 # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the 130 # pyvmomi documentation but this is the correct managed object type 131 config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec() 132 config.defaultPortConfig.vlan.inherited = False 133 config.defaultPortConfig.vlan.vlanId = self.vlan_id 134 config.type = self.portgroup_type 135 136 spec = [config] 137 task = self.dv_switch.AddDVPortgroup_Task(spec) 138 changed, result = wait_for_task(task) 139 return changed, result 140 141 def state_destroy_dvspg(self): 142 changed = True 143 result = None 144 145 if not self.module.check_mode: 146 task = dvs_portgroup.Destroy_Task() 147 changed, result = wait_for_task(task) 148 self.module.exit_json(changed=changed, result=str(result)) 149 150 def state_exit_unchanged(self): 151 self.module.exit_json(changed=False) 152 153 def state_update_dvspg(self): 154 self.module.exit_json(changed=False, msg="Currently not implemented.") 155 156 def state_create_dvspg(self): 157 changed = True 158 result = None 159 160 if not self.module.check_mode: 161 changed, result = self.create_port_group() 162 self.module.exit_json(changed=changed, result=str(result)) 163 164 def check_dvspg_state(self): 165 self.dv_switch = find_dvs_by_name(self.content, self.switch_name) 166 167 if self.dv_switch is None: 168 raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name) 169 self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name) 170 171 if self.dvs_portgroup is None: 172 return 'absent' 173 else: 174 return 'present' 175 176 177 def main(): 178 argument_spec = vmware_argument_spec() 179 argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), 180 switch_name=dict(required=True, type='str'), 181 vlan_id=dict(required=True, type='int'), 182 num_ports=dict(required=True, type='int'), 183 portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'), 184 state=dict(default='present', choices=['present', 'absent'], type='str'))) 185 186 module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 187 188 if not HAS_PYVMOMI: 189 module.fail_json(msg='pyvmomi is required for this module') 190 191 vmware_dvs_portgroup = VMwareDvsPortgroup(module) 192 vmware_dvs_portgroup.process_state() 193 194 from ansible.module_utils.vmware import * 195 from ansible.module_utils.basic import * 196 197 if __name__ == '__main__': 198 main() 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cloud/vmware/vmware_dvs_portgroup.py b/cloud/vmware/vmware_dvs_portgroup.py --- a/cloud/vmware/vmware_dvs_portgroup.py +++ b/cloud/vmware/vmware_dvs_portgroup.py @@ -68,7 +68,7 @@ password: vcenter_password portgroup_name: Management switch_name: dvSwitch - vlan_id: 123 + vlan_id: 123 num_ports: 120 portgroup_type: earlyBinding state: present @@ -93,7 +93,7 @@ self.dv_switch = None self.state = self.module.params['state'] self.content = connect_to_api(module) - + def process_state(self): try: dvspg_states = { @@ -143,7 +143,7 @@ result = None if not self.module.check_mode: - task = dvs_portgroup.Destroy_Task() + task = self.dvs_portgroup.Destroy_Task() changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=str(result))
{"golden_diff": "diff --git a/cloud/vmware/vmware_dvs_portgroup.py b/cloud/vmware/vmware_dvs_portgroup.py\n--- a/cloud/vmware/vmware_dvs_portgroup.py\n+++ b/cloud/vmware/vmware_dvs_portgroup.py\n@@ -68,7 +68,7 @@\n password: vcenter_password\n portgroup_name: Management\n switch_name: dvSwitch\n- vlan_id: 123 \n+ vlan_id: 123\n num_ports: 120\n portgroup_type: earlyBinding\n state: present\n@@ -93,7 +93,7 @@\n self.dv_switch = None\n self.state = self.module.params['state']\n self.content = connect_to_api(module)\n- \n+\n def process_state(self):\n try:\n dvspg_states = {\n@@ -143,7 +143,7 @@\n result = None\n \n if not self.module.check_mode:\n- task = dvs_portgroup.Destroy_Task()\n+ task = self.dvs_portgroup.Destroy_Task()\n changed, result = wait_for_task(task)\n self.module.exit_json(changed=changed, result=str(result))\n", "issue": "vmware_dvs_portgroup destroy fails\n<!--- Verify first that your issue/request is not already reported in GitHub -->\n##### ISSUE TYPE\n\n<!--- Pick one below and delete the rest: -->\n- Bug Report\n##### COMPONENT NAME\n\n<!--- Name of the plugin/module/task -->\n\ncloud/vmware/vmware_dvs_portgroup.py\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\nansible 2.1.1.0 (detached HEAD 780c363482) last updated 2016/08/19 17:46:34 (GMT -400)\n lib/ansible/modules/core: (detached HEAD 91a839f1e3) last updated 2016/08/19 17:46:11 (GMT -400)\n lib/ansible/modules/extras: (detached HEAD 1aeb9f8a8c) last updated 2016/08/19 17:46:13 (GMT -400)\n config file =\n configured module search path = Default w/o overrides\n\n```\n##### CONFIGURATION\n\n<!---\nMention any settings you have changed/added/removed in ansible.cfg\n(or using the ANSIBLE_* environment variables).\n-->\n##### OS / ENVIRONMENT\n\n<!---\nMention the OS you are running Ansible from, and the OS you are\nmanaging, or say \u201cN/A\u201d for anything that is not platform-specific.\n-->\n\nN/A\n##### SUMMARY\n\n<!--- Explain the problem briefly -->\n\nThe function definition for state_destroy_dvspg(self) contains the line\n task = dvs_portgroup.Destroy_Task()\n\nThis should be \n task = self.dvs_portgroup.Destroy_Task()\n##### STEPS TO REPRODUCE\n\n<!---\nFor bugs, show exactly how to reproduce the problem.\nFor new features, show how the feature would be used.\n-->\n\nIssue a playbook task to remove a vSphere portgroup by setting the state property to absent.\n\n<!--- Paste example playbooks or commands between quotes below -->\n\n```\n - name: Remove vSphere Portgroups\n vmware_dvs_portgroup:\n hostname: \"{{ VSPHERE_HOST }}\"\n username: \"{{ VSPHERE_LOGIN }}\"\n password: \"{{ VSPHERE_PASSWORD }}\"\n portgroup_name: \"{{ TESTBED_NAME }}_{{ item }}\"\n switch_name: \"{{ VDS }}\"\n vlan_id: \"{{ item }}\"\n num_ports: 10\n portgroup_type: earlyBinding\n state: absent\n validate_certs: False\n with_items: \"{{ test_vlans }}\"\n\n```\n\n<!--- You can also paste gist.github.com links for larger files -->\n##### EXPECTED RESULTS\n\n<!--- What did you expect to happen when running the steps above? -->\n\nExpected portgroup entry to be removed from vSphere networking.\n##### ACTUAL RESULTS\n\n<!--- What actually happened? If possible run with high verbosity (-vvvv) -->\n\nTask fails with an error that 'dvs_portgroup' is not defined.\n\n<!--- Paste verbatim command output between quotes below -->\n\n```\nTASK [teardown : Remove vSphere Portgroup] *************************************\ntask path: /var/jenkins/workspace/Build_CVP_Testbed/lab-automation/roles/teardown/tasks/main.yml:46\n<10.81.110.96> ESTABLISH SSH CONNECTION FOR USER: root\n<10.81.110.96> SSH: EXEC sshpass -d12 ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r 10.81.110.96 '/bin/sh -c '\"'\"'( umask 77 && mkdir -p \"` echo $HOME/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082 `\" && echo ansible-tmp-1471642781.59-166008742553082=\"` echo $HOME/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082 `\" ) && sleep 0'\"'\"''\n<10.81.110.96> PUT /tmp/tmpzAGbLy TO /root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/vmware_dvs_portgroup\n<10.81.110.96> SSH: EXEC sshpass -d12 sftp -o BatchMode=no -b - -C -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r '[10.81.110.96]'\n<10.81.110.96> ESTABLISH SSH CONNECTION FOR USER: root\n<10.81.110.96> SSH: EXEC sshpass -d12 ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o User=root -o ConnectTimeout=10 -o ControlPath=/users/builder/.ansible/cp/ansible-ssh-%h-%p-%r -tt 10.81.110.96 '/bin/sh -c '\"'\"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/vmware_dvs_portgroup; rm -rf \"/root/.ansible/tmp/ansible-tmp-1471642781.59-166008742553082/\" > /dev/null 2>&1 && sleep 0'\"'\"''\nfailed: [10.81.110.96] (item=502) => {\"failed\": true, \"invocation\": {\"module_args\": {\"hostname\": \"10.81.110.87\", \"num_ports\": 10, \"password\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\", \"portgroup_name\": \"garaaa_502\", \"portgroup_type\": \"earlyBinding\", \"state\": \"absent\", \"switch_name\": \"EOS+vDS\", \"username\": \"[email protected]\", \"validate_certs\": false, \"vlan_id\": 502}, \"module_name\": \"vmware_dvs_portgroup\"}, \"item\": 502, \"msg\": \"global name 'dvs_portgroup' is not defined\"}\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2015, Joseph Callen <jcallen () csc.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: vmware_dvs_portgroup\nshort_description: Create or remove a Distributed vSwitch portgroup\ndescription:\n - Create or remove a Distributed vSwitch portgroup\nversion_added: 2.0\nauthor: \"Joseph Callen (@jcpowermac)\"\nnotes:\n - Tested on vSphere 5.5\nrequirements:\n - \"python >= 2.6\"\n - PyVmomi\noptions:\n portgroup_name:\n description:\n - The name of the portgroup that is to be created or deleted\n required: True\n switch_name:\n description:\n - The name of the distributed vSwitch the port group should be created on.\n required: True\n vlan_id:\n description:\n - The VLAN ID that should be configured with the portgroup\n required: True\n num_ports:\n description:\n - The number of ports the portgroup should contain\n required: True\n portgroup_type:\n description:\n - See VMware KB 1022312 regarding portgroup types\n required: True\n choices:\n - 'earlyBinding'\n - 'lateBinding'\n - 'ephemeral'\nextends_documentation_fragment: vmware.documentation\n'''\n\nEXAMPLES = '''\n - name: Create Management portgroup\n local_action:\n module: vmware_dvs_portgroup\n hostname: vcenter_ip_or_hostname\n username: vcenter_username\n password: vcenter_password\n portgroup_name: Management\n switch_name: dvSwitch\n vlan_id: 123 \n num_ports: 120\n portgroup_type: earlyBinding\n state: present\n'''\n\ntry:\n from pyVmomi import vim, vmodl\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\n\nclass VMwareDvsPortgroup(object):\n def __init__(self, module):\n self.module = module\n self.dvs_portgroup = None\n self.switch_name = self.module.params['switch_name']\n self.portgroup_name = self.module.params['portgroup_name']\n self.vlan_id = self.module.params['vlan_id']\n self.num_ports = self.module.params['num_ports']\n self.portgroup_type = self.module.params['portgroup_type']\n self.dv_switch = None\n self.state = self.module.params['state']\n self.content = connect_to_api(module)\n \n def process_state(self):\n try:\n dvspg_states = {\n 'absent': {\n 'present': self.state_destroy_dvspg,\n 'absent': self.state_exit_unchanged,\n },\n 'present': {\n 'update': self.state_update_dvspg,\n 'present': self.state_exit_unchanged,\n 'absent': self.state_create_dvspg,\n }\n }\n dvspg_states[self.state][self.check_dvspg_state()]()\n except vmodl.RuntimeFault as runtime_fault:\n self.module.fail_json(msg=runtime_fault.msg)\n except vmodl.MethodFault as method_fault:\n self.module.fail_json(msg=method_fault.msg)\n except Exception as e:\n self.module.fail_json(msg=str(e))\n\n def create_port_group(self):\n config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()\n\n config.name = self.portgroup_name\n config.numPorts = self.num_ports\n\n # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation\n # but this is the correct managed object type.\n\n config.defaultPortConfig = vim.VMwareDVSPortSetting()\n\n # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the\n # pyvmomi documentation but this is the correct managed object type\n config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()\n config.defaultPortConfig.vlan.inherited = False\n config.defaultPortConfig.vlan.vlanId = self.vlan_id\n config.type = self.portgroup_type\n\n spec = [config]\n task = self.dv_switch.AddDVPortgroup_Task(spec)\n changed, result = wait_for_task(task)\n return changed, result\n\n def state_destroy_dvspg(self):\n changed = True\n result = None\n\n if not self.module.check_mode:\n task = dvs_portgroup.Destroy_Task()\n changed, result = wait_for_task(task)\n self.module.exit_json(changed=changed, result=str(result))\n\n def state_exit_unchanged(self):\n self.module.exit_json(changed=False)\n\n def state_update_dvspg(self):\n self.module.exit_json(changed=False, msg=\"Currently not implemented.\")\n\n def state_create_dvspg(self):\n changed = True\n result = None\n\n if not self.module.check_mode:\n changed, result = self.create_port_group()\n self.module.exit_json(changed=changed, result=str(result))\n\n def check_dvspg_state(self):\n self.dv_switch = find_dvs_by_name(self.content, self.switch_name)\n\n if self.dv_switch is None:\n raise Exception(\"A distributed virtual switch with name %s does not exist\" % self.switch_name)\n self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)\n\n if self.dvs_portgroup is None:\n return 'absent'\n else:\n return 'present'\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),\n switch_name=dict(required=True, type='str'),\n vlan_id=dict(required=True, type='int'),\n num_ports=dict(required=True, type='int'),\n portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),\n state=dict(default='present', choices=['present', 'absent'], type='str')))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi is required for this module')\n\n vmware_dvs_portgroup = VMwareDvsPortgroup(module)\n vmware_dvs_portgroup.process_state()\n\nfrom ansible.module_utils.vmware import *\nfrom ansible.module_utils.basic import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/vmware/vmware_dvs_portgroup.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2015, Joseph Callen <jcallen () csc.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: vmware_dvs_portgroup\nshort_description: Create or remove a Distributed vSwitch portgroup\ndescription:\n - Create or remove a Distributed vSwitch portgroup\nversion_added: 2.0\nauthor: \"Joseph Callen (@jcpowermac)\"\nnotes:\n - Tested on vSphere 5.5\nrequirements:\n - \"python >= 2.6\"\n - PyVmomi\noptions:\n portgroup_name:\n description:\n - The name of the portgroup that is to be created or deleted\n required: True\n switch_name:\n description:\n - The name of the distributed vSwitch the port group should be created on.\n required: True\n vlan_id:\n description:\n - The VLAN ID that should be configured with the portgroup\n required: True\n num_ports:\n description:\n - The number of ports the portgroup should contain\n required: True\n portgroup_type:\n description:\n - See VMware KB 1022312 regarding portgroup types\n required: True\n choices:\n - 'earlyBinding'\n - 'lateBinding'\n - 'ephemeral'\nextends_documentation_fragment: vmware.documentation\n'''\n\nEXAMPLES = '''\n - name: Create Management portgroup\n local_action:\n module: vmware_dvs_portgroup\n hostname: vcenter_ip_or_hostname\n username: vcenter_username\n password: vcenter_password\n portgroup_name: Management\n switch_name: dvSwitch\n vlan_id: 123\n num_ports: 120\n portgroup_type: earlyBinding\n state: present\n'''\n\ntry:\n from pyVmomi import vim, vmodl\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\n\nclass VMwareDvsPortgroup(object):\n def __init__(self, module):\n self.module = module\n self.dvs_portgroup = None\n self.switch_name = self.module.params['switch_name']\n self.portgroup_name = self.module.params['portgroup_name']\n self.vlan_id = self.module.params['vlan_id']\n self.num_ports = self.module.params['num_ports']\n self.portgroup_type = self.module.params['portgroup_type']\n self.dv_switch = None\n self.state = self.module.params['state']\n self.content = connect_to_api(module)\n\n def process_state(self):\n try:\n dvspg_states = {\n 'absent': {\n 'present': self.state_destroy_dvspg,\n 'absent': self.state_exit_unchanged,\n },\n 'present': {\n 'update': self.state_update_dvspg,\n 'present': self.state_exit_unchanged,\n 'absent': self.state_create_dvspg,\n }\n }\n dvspg_states[self.state][self.check_dvspg_state()]()\n except vmodl.RuntimeFault as runtime_fault:\n self.module.fail_json(msg=runtime_fault.msg)\n except vmodl.MethodFault as method_fault:\n self.module.fail_json(msg=method_fault.msg)\n except Exception as e:\n self.module.fail_json(msg=str(e))\n\n def create_port_group(self):\n config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()\n\n config.name = self.portgroup_name\n config.numPorts = self.num_ports\n\n # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation\n # but this is the correct managed object type.\n\n config.defaultPortConfig = vim.VMwareDVSPortSetting()\n\n # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the\n # pyvmomi documentation but this is the correct managed object type\n config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()\n config.defaultPortConfig.vlan.inherited = False\n config.defaultPortConfig.vlan.vlanId = self.vlan_id\n config.type = self.portgroup_type\n\n spec = [config]\n task = self.dv_switch.AddDVPortgroup_Task(spec)\n changed, result = wait_for_task(task)\n return changed, result\n\n def state_destroy_dvspg(self):\n changed = True\n result = None\n\n if not self.module.check_mode:\n task = self.dvs_portgroup.Destroy_Task()\n changed, result = wait_for_task(task)\n self.module.exit_json(changed=changed, result=str(result))\n\n def state_exit_unchanged(self):\n self.module.exit_json(changed=False)\n\n def state_update_dvspg(self):\n self.module.exit_json(changed=False, msg=\"Currently not implemented.\")\n\n def state_create_dvspg(self):\n changed = True\n result = None\n\n if not self.module.check_mode:\n changed, result = self.create_port_group()\n self.module.exit_json(changed=changed, result=str(result))\n\n def check_dvspg_state(self):\n self.dv_switch = find_dvs_by_name(self.content, self.switch_name)\n\n if self.dv_switch is None:\n raise Exception(\"A distributed virtual switch with name %s does not exist\" % self.switch_name)\n self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)\n\n if self.dvs_portgroup is None:\n return 'absent'\n else:\n return 'present'\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),\n switch_name=dict(required=True, type='str'),\n vlan_id=dict(required=True, type='int'),\n num_ports=dict(required=True, type='int'),\n portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),\n state=dict(default='present', choices=['present', 'absent'], type='str')))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi is required for this module')\n\n vmware_dvs_portgroup = VMwareDvsPortgroup(module)\n vmware_dvs_portgroup.process_state()\n\nfrom ansible.module_utils.vmware import *\nfrom ansible.module_utils.basic import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/vmware/vmware_dvs_portgroup.py"}]}
3,898
267
gh_patches_debug_1224
rasdani/github-patches
git_diff
projectmesa__mesa-826
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Push new Mesa release Wee are overdue for an official release. Before I push one, does anyone have anything that really want to try to get in or should I just tag and release? Discuss. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mesa/__init__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Mesa Agent-Based Modeling Framework 4 5 Core Objects: Model, and Agent. 6 7 """ 8 import datetime 9 10 from .model import Model 11 from .agent import Agent 12 13 14 __all__ = ["Model", "Agent"] 15 16 __title__ = "mesa" 17 __version__ = "0.8.6" 18 __license__ = "Apache 2.0" 19 __copyright__ = "Copyright %s Project Mesa Team" % datetime.date.today().year 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mesa/__init__.py b/mesa/__init__.py --- a/mesa/__init__.py +++ b/mesa/__init__.py @@ -14,6 +14,6 @@ __all__ = ["Model", "Agent"] __title__ = "mesa" -__version__ = "0.8.6" +__version__ = "0.8.7" __license__ = "Apache 2.0" __copyright__ = "Copyright %s Project Mesa Team" % datetime.date.today().year
{"golden_diff": "diff --git a/mesa/__init__.py b/mesa/__init__.py\n--- a/mesa/__init__.py\n+++ b/mesa/__init__.py\n@@ -14,6 +14,6 @@\n __all__ = [\"Model\", \"Agent\"]\n \n __title__ = \"mesa\"\n-__version__ = \"0.8.6\"\n+__version__ = \"0.8.7\"\n __license__ = \"Apache 2.0\"\n __copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n", "issue": "Push new Mesa release\nWee are overdue for an official release. Before I push one, does anyone have anything that really want to try to get in or should I just tag and release? \r\n\r\nDiscuss. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = \"mesa\"\n__version__ = \"0.8.6\"\n__license__ = \"Apache 2.0\"\n__copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n", "path": "mesa/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = \"mesa\"\n__version__ = \"0.8.7\"\n__license__ = \"Apache 2.0\"\n__copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n", "path": "mesa/__init__.py"}]}
438
121
gh_patches_debug_27345
rasdani/github-patches
git_diff
internetarchive__openlibrary-5001
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Is there a way to limit the page-size of search API requests? ### Question Is there a way to limit the page-size of search API requests? The default Search-API page-size is 100 items: http://openlibrary.org/search.json?q=the+lord+of+the+rings I would like to reduce the page-size (limit) for Search API calls, since the user can just 'page' through the results if he/she wants. Fetching more results also requires more processing on the client-side. Side notes: - The number is 20 for the search-inside API: http://openlibrary.org/search/inside.json?q=thanks%20for%20all%20the%20fish - I think both default page-sizes should probably be the same (20 seems like a reasonable number to me). - The Archive.org API has the "limit" parameter to do this. Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/plugins/inside/code.py` Content: ``` 1 from time import time 2 3 import json 4 import web 5 6 from infogami.utils import delegate 7 from infogami.utils.view import render_template 8 9 from openlibrary.core.fulltext import fulltext_search 10 11 RESULTS_PER_PAGE = 20 12 13 14 class search_inside(delegate.page): 15 16 path = '/search/inside' 17 18 def GET(self): 19 search_start = time() # should probably use a @timeit decorator 20 i = web.input(q='', page=1) 21 query = i.q 22 page = int(i.page) 23 results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE) 24 search_time = time() - search_start 25 26 return render_template('search/inside.tmpl', query, results, search_time, 27 page=page, results_per_page=RESULTS_PER_PAGE) 28 page.v2 = True # page is mobile-first 29 return page 30 31 32 class search_inside_json(delegate.page): 33 path = "/search/inside" 34 encoding = "json" 35 36 def GET(self): 37 i = web.input(q='', page=1, limit=RESULTS_PER_PAGE) 38 limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE 39 query = i.q 40 page = int(i.page) 41 results = fulltext_search(query, page=page, limit=limit, js=True) 42 web.header('Content-Type', 'application/json') 43 return delegate.RawText(json.dumps(results, indent=4)) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/plugins/inside/code.py b/openlibrary/plugins/inside/code.py --- a/openlibrary/plugins/inside/code.py +++ b/openlibrary/plugins/inside/code.py @@ -4,7 +4,7 @@ import web from infogami.utils import delegate -from infogami.utils.view import render_template +from infogami.utils.view import render_template, safeint from openlibrary.core.fulltext import fulltext_search @@ -12,7 +12,6 @@ class search_inside(delegate.page): - path = '/search/inside' def GET(self): @@ -25,8 +24,6 @@ return render_template('search/inside.tmpl', query, results, search_time, page=page, results_per_page=RESULTS_PER_PAGE) - page.v2 = True # page is mobile-first - return page class search_inside_json(delegate.page): @@ -35,7 +32,7 @@ def GET(self): i = web.input(q='', page=1, limit=RESULTS_PER_PAGE) - limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE + limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE) query = i.q page = int(i.page) results = fulltext_search(query, page=page, limit=limit, js=True)
{"golden_diff": "diff --git a/openlibrary/plugins/inside/code.py b/openlibrary/plugins/inside/code.py\n--- a/openlibrary/plugins/inside/code.py\n+++ b/openlibrary/plugins/inside/code.py\n@@ -4,7 +4,7 @@\n import web\n \n from infogami.utils import delegate\n-from infogami.utils.view import render_template\n+from infogami.utils.view import render_template, safeint\n \n from openlibrary.core.fulltext import fulltext_search\n \n@@ -12,7 +12,6 @@\n \n \n class search_inside(delegate.page):\n-\n path = '/search/inside'\n \n def GET(self):\n@@ -25,8 +24,6 @@\n \n return render_template('search/inside.tmpl', query, results, search_time,\n page=page, results_per_page=RESULTS_PER_PAGE)\n- page.v2 = True # page is mobile-first\n- return page\n \n \n class search_inside_json(delegate.page):\n@@ -35,7 +32,7 @@\n \n def GET(self):\n i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)\n- limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE\n+ limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=limit, js=True)\n", "issue": "Is there a way to limit the page-size of search API requests?\n### Question\r\nIs there a way to limit the page-size of search API requests?\r\n\r\nThe default Search-API page-size is 100 items: http://openlibrary.org/search.json?q=the+lord+of+the+rings\r\n\r\nI would like to reduce the page-size (limit) for Search API calls, since the user can just 'page' through the results if he/she wants. Fetching more results also requires more processing on the client-side.\r\n\r\nSide notes:\r\n- The number is 20 for the search-inside API: http://openlibrary.org/search/inside.json?q=thanks%20for%20all%20the%20fish\r\n- I think both default page-sizes should probably be the same (20 seems like a reasonable number to me).\r\n- The Archive.org API has the \"limit\" parameter to do this.\r\n\r\nThanks!\r\n\r\n\n", "before_files": [{"content": "from time import time\n\nimport json\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template\n\nfrom openlibrary.core.fulltext import fulltext_search\n\nRESULTS_PER_PAGE = 20\n\n\nclass search_inside(delegate.page):\n\n path = '/search/inside'\n\n def GET(self):\n search_start = time() # should probably use a @timeit decorator\n i = web.input(q='', page=1)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE)\n search_time = time() - search_start\n\n return render_template('search/inside.tmpl', query, results, search_time,\n page=page, results_per_page=RESULTS_PER_PAGE)\n page.v2 = True # page is mobile-first\n return page\n\n\nclass search_inside_json(delegate.page):\n path = \"/search/inside\"\n encoding = \"json\"\n\n def GET(self):\n i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)\n limit = min(i.limit, RESULTS_PER_PAGE) if i.limit else RESULTS_PER_PAGE\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=limit, js=True)\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(results, indent=4))\n", "path": "openlibrary/plugins/inside/code.py"}], "after_files": [{"content": "from time import time\n\nimport json\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template, safeint\n\nfrom openlibrary.core.fulltext import fulltext_search\n\nRESULTS_PER_PAGE = 20\n\n\nclass search_inside(delegate.page):\n path = '/search/inside'\n\n def GET(self):\n search_start = time() # should probably use a @timeit decorator\n i = web.input(q='', page=1)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=RESULTS_PER_PAGE)\n search_time = time() - search_start\n\n return render_template('search/inside.tmpl', query, results, search_time,\n page=page, results_per_page=RESULTS_PER_PAGE)\n\n\nclass search_inside_json(delegate.page):\n path = \"/search/inside\"\n encoding = \"json\"\n\n def GET(self):\n i = web.input(q='', page=1, limit=RESULTS_PER_PAGE)\n limit = min(safeint(i.limit, RESULTS_PER_PAGE), RESULTS_PER_PAGE)\n query = i.q\n page = int(i.page)\n results = fulltext_search(query, page=page, limit=limit, js=True)\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(results, indent=4))\n", "path": "openlibrary/plugins/inside/code.py"}]}
852
307
gh_patches_debug_20619
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1348
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `merge_type` `types` type hint https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/tools/merge_types.py#L9 The current `Tuple[Type]` produces: ``` *.py:15:5: error: Argument 2 to "merge_types" has incompatible type "Tuple[Type[QueryA], Type[QueryB], Type[QueryC]]"; expected "Tuple[Type[Any]]" [arg-type] ``` According to [mypy](https://mypy.readthedocs.io/en/stable/kinds_of_types.html#tuple-types), we should either change it to `Tuple[Type, ...]` or follow mypy's suggestion and go with a generic `Sequence`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/tools/merge_types.py` Content: ``` 1 import warnings 2 from collections import Counter 3 from itertools import chain 4 from typing import Tuple, Type 5 6 import strawberry 7 8 9 def merge_types(name: str, types: Tuple[Type]) -> Type: 10 """Merge multiple Strawberry types into one 11 12 For example, given two queries `A` and `B`, one can merge them into a 13 super type as follows: 14 15 merge_types("SuperQuery", (B, A)) 16 17 This is essentially the same as: 18 19 class SuperQuery(B, A): 20 ... 21 """ 22 23 if not types: 24 raise ValueError("Can't merge types if none are supplied") 25 26 fields = chain(*(t._type_definition.fields for t in types)) 27 counter = Counter(f.name for f in fields) 28 dupes = [f for f, c in counter.most_common() if c > 1] 29 if dupes: 30 warnings.warn("{} has overridden fields: {}".format(name, ", ".join(dupes))) 31 32 return strawberry.type(type(name, types, {})) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/tools/merge_types.py b/strawberry/tools/merge_types.py --- a/strawberry/tools/merge_types.py +++ b/strawberry/tools/merge_types.py @@ -1,12 +1,12 @@ import warnings from collections import Counter from itertools import chain -from typing import Tuple, Type +from typing import Tuple import strawberry -def merge_types(name: str, types: Tuple[Type]) -> Type: +def merge_types(name: str, types: Tuple[type, ...]) -> type: """Merge multiple Strawberry types into one For example, given two queries `A` and `B`, one can merge them into a @@ -23,7 +23,9 @@ if not types: raise ValueError("Can't merge types if none are supplied") - fields = chain(*(t._type_definition.fields for t in types)) + fields = chain( + *(t._type_definition.fields for t in types) # type: ignore[attr-defined] + ) counter = Counter(f.name for f in fields) dupes = [f for f, c in counter.most_common() if c > 1] if dupes:
{"golden_diff": "diff --git a/strawberry/tools/merge_types.py b/strawberry/tools/merge_types.py\n--- a/strawberry/tools/merge_types.py\n+++ b/strawberry/tools/merge_types.py\n@@ -1,12 +1,12 @@\n import warnings\n from collections import Counter\n from itertools import chain\n-from typing import Tuple, Type\n+from typing import Tuple\n \n import strawberry\n \n \n-def merge_types(name: str, types: Tuple[Type]) -> Type:\n+def merge_types(name: str, types: Tuple[type, ...]) -> type:\n \"\"\"Merge multiple Strawberry types into one\n \n For example, given two queries `A` and `B`, one can merge them into a\n@@ -23,7 +23,9 @@\n if not types:\n raise ValueError(\"Can't merge types if none are supplied\")\n \n- fields = chain(*(t._type_definition.fields for t in types))\n+ fields = chain(\n+ *(t._type_definition.fields for t in types) # type: ignore[attr-defined]\n+ )\n counter = Counter(f.name for f in fields)\n dupes = [f for f, c in counter.most_common() if c > 1]\n if dupes:\n", "issue": "`merge_type` `types` type hint\nhttps://github.com/strawberry-graphql/strawberry/blob/main/strawberry/tools/merge_types.py#L9\r\n\r\nThe current `Tuple[Type]` produces:\r\n```\r\n*.py:15:5: error: Argument 2 to \"merge_types\" has incompatible type \"Tuple[Type[QueryA], Type[QueryB], Type[QueryC]]\"; expected \"Tuple[Type[Any]]\" [arg-type]\r\n```\r\n\r\nAccording to [mypy](https://mypy.readthedocs.io/en/stable/kinds_of_types.html#tuple-types), we should either change it to `Tuple[Type, ...]` or follow mypy's suggestion and go with a generic `Sequence`.\r\n\r\n\n", "before_files": [{"content": "import warnings\nfrom collections import Counter\nfrom itertools import chain\nfrom typing import Tuple, Type\n\nimport strawberry\n\n\ndef merge_types(name: str, types: Tuple[Type]) -> Type:\n \"\"\"Merge multiple Strawberry types into one\n\n For example, given two queries `A` and `B`, one can merge them into a\n super type as follows:\n\n merge_types(\"SuperQuery\", (B, A))\n\n This is essentially the same as:\n\n class SuperQuery(B, A):\n ...\n \"\"\"\n\n if not types:\n raise ValueError(\"Can't merge types if none are supplied\")\n\n fields = chain(*(t._type_definition.fields for t in types))\n counter = Counter(f.name for f in fields)\n dupes = [f for f, c in counter.most_common() if c > 1]\n if dupes:\n warnings.warn(\"{} has overridden fields: {}\".format(name, \", \".join(dupes)))\n\n return strawberry.type(type(name, types, {}))\n", "path": "strawberry/tools/merge_types.py"}], "after_files": [{"content": "import warnings\nfrom collections import Counter\nfrom itertools import chain\nfrom typing import Tuple\n\nimport strawberry\n\n\ndef merge_types(name: str, types: Tuple[type, ...]) -> type:\n \"\"\"Merge multiple Strawberry types into one\n\n For example, given two queries `A` and `B`, one can merge them into a\n super type as follows:\n\n merge_types(\"SuperQuery\", (B, A))\n\n This is essentially the same as:\n\n class SuperQuery(B, A):\n ...\n \"\"\"\n\n if not types:\n raise ValueError(\"Can't merge types if none are supplied\")\n\n fields = chain(\n *(t._type_definition.fields for t in types) # type: ignore[attr-defined]\n )\n counter = Counter(f.name for f in fields)\n dupes = [f for f, c in counter.most_common() if c > 1]\n if dupes:\n warnings.warn(\"{} has overridden fields: {}\".format(name, \", \".join(dupes)))\n\n return strawberry.type(type(name, types, {}))\n", "path": "strawberry/tools/merge_types.py"}]}
700
269
gh_patches_debug_29158
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-1885
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Email to admins when a user is reported has the wrong link --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/models/report.py` Content: ``` 1 """ flagged for moderation """ 2 from django.db import models 3 from .base_model import BookWyrmModel 4 5 6 class Report(BookWyrmModel): 7 """reported status or user""" 8 9 reporter = models.ForeignKey( 10 "User", related_name="reporter", on_delete=models.PROTECT 11 ) 12 note = models.TextField(null=True, blank=True) 13 user = models.ForeignKey("User", on_delete=models.PROTECT) 14 statuses = models.ManyToManyField("Status", blank=True) 15 links = models.ManyToManyField("Link", blank=True) 16 resolved = models.BooleanField(default=False) 17 18 class Meta: 19 """set order by default""" 20 21 ordering = ("-created_date",) 22 23 24 class ReportComment(BookWyrmModel): 25 """updates on a report""" 26 27 user = models.ForeignKey("User", on_delete=models.PROTECT) 28 note = models.TextField() 29 report = models.ForeignKey(Report, on_delete=models.PROTECT) 30 31 class Meta: 32 """sort comments""" 33 34 ordering = ("-created_date",) 35 ``` Path: `bookwyrm/views/admin/reports.py` Content: ``` 1 """ moderation via flagged posts and users """ 2 from django.contrib.auth.decorators import login_required, permission_required 3 from django.core.exceptions import PermissionDenied 4 from django.shortcuts import get_object_or_404, redirect 5 from django.template.response import TemplateResponse 6 from django.utils.decorators import method_decorator 7 from django.views import View 8 9 from bookwyrm import forms, models 10 11 12 # pylint: disable=no-self-use 13 @method_decorator(login_required, name="dispatch") 14 @method_decorator( 15 permission_required("bookwyrm.moderate_user", raise_exception=True), 16 name="dispatch", 17 ) 18 @method_decorator( 19 permission_required("bookwyrm.moderate_post", raise_exception=True), 20 name="dispatch", 21 ) 22 class ReportsAdmin(View): 23 """list of reports""" 24 25 def get(self, request): 26 """view current reports""" 27 filters = {} 28 29 resolved = request.GET.get("resolved") == "true" 30 server = request.GET.get("server") 31 if server: 32 filters["user__federated_server__server_name"] = server 33 username = request.GET.get("username") 34 if username: 35 filters["user__username__icontains"] = username 36 filters["resolved"] = resolved 37 data = { 38 "resolved": resolved, 39 "server": server, 40 "reports": models.Report.objects.filter(**filters), 41 } 42 return TemplateResponse(request, "settings/reports/reports.html", data) 43 44 45 @method_decorator(login_required, name="dispatch") 46 @method_decorator( 47 permission_required("bookwyrm.moderate_user", raise_exception=True), 48 name="dispatch", 49 ) 50 @method_decorator( 51 permission_required("bookwyrm.moderate_post", raise_exception=True), 52 name="dispatch", 53 ) 54 class ReportAdmin(View): 55 """view a specific report""" 56 57 def get(self, request, report_id): 58 """load a report""" 59 data = { 60 "report": get_object_or_404(models.Report, id=report_id), 61 } 62 return TemplateResponse(request, "settings/reports/report.html", data) 63 64 def post(self, request, report_id): 65 """comment on a report""" 66 report = get_object_or_404(models.Report, id=report_id) 67 models.ReportComment.objects.create( 68 user=request.user, 69 report=report, 70 note=request.POST.get("note"), 71 ) 72 return redirect("settings-report", report.id) 73 74 75 @login_required 76 @permission_required("bookwyrm_moderate_user") 77 def suspend_user(_, user_id): 78 """mark an account as inactive""" 79 user = get_object_or_404(models.User, id=user_id) 80 user.is_active = False 81 user.deactivation_reason = "moderator_suspension" 82 # this isn't a full deletion, so we don't want to tell the world 83 user.save(broadcast=False) 84 return redirect("settings-user", user.id) 85 86 87 @login_required 88 @permission_required("bookwyrm_moderate_user") 89 def unsuspend_user(_, user_id): 90 """mark an account as inactive""" 91 user = get_object_or_404(models.User, id=user_id) 92 user.is_active = True 93 user.deactivation_reason = None 94 # this isn't a full deletion, so we don't want to tell the world 95 user.save(broadcast=False) 96 return redirect("settings-user", user.id) 97 98 99 @login_required 100 @permission_required("bookwyrm_moderate_user") 101 def moderator_delete_user(request, user_id): 102 """permanently delete a user""" 103 user = get_object_or_404(models.User, id=user_id) 104 105 # we can't delete users on other instances 106 if not user.local: 107 raise PermissionDenied() 108 109 form = forms.DeleteUserForm(request.POST, instance=user) 110 111 moderator = models.User.objects.get(id=request.user.id) 112 # check the moderator's password 113 if form.is_valid() and moderator.check_password(form.cleaned_data["password"]): 114 user.deactivation_reason = "moderator_deletion" 115 user.delete() 116 return redirect("settings-user", user.id) 117 118 form.errors["password"] = ["Invalid password"] 119 120 data = {"user": user, "group_form": forms.UserGroupForm(), "form": form} 121 return TemplateResponse(request, "user_admin/user.html", data) 122 123 124 @login_required 125 @permission_required("bookwyrm_moderate_post") 126 def resolve_report(_, report_id): 127 """mark a report as (un)resolved""" 128 report = get_object_or_404(models.Report, id=report_id) 129 report.resolved = not report.resolved 130 report.save() 131 if not report.resolved: 132 return redirect("settings-report", report.id) 133 return redirect("settings-reports") 134 ``` Path: `bookwyrm/emailing.py` Content: ``` 1 """ send emails """ 2 from django.core.mail import EmailMultiAlternatives 3 from django.template.loader import get_template 4 5 from bookwyrm import models, settings 6 from bookwyrm.tasks import app 7 from bookwyrm.settings import DOMAIN 8 9 10 def email_data(): 11 """fields every email needs""" 12 site = models.SiteSettings.objects.get() 13 return { 14 "site_name": site.name, 15 "logo": site.logo_small_url, 16 "domain": DOMAIN, 17 "user": None, 18 } 19 20 21 def email_confirmation_email(user): 22 """newly registered users confirm email address""" 23 data = email_data() 24 data["confirmation_code"] = user.confirmation_code 25 data["confirmation_link"] = user.confirmation_link 26 send_email.delay(user.email, *format_email("confirm", data)) 27 28 29 def invite_email(invite_request): 30 """send out an invite code""" 31 data = email_data() 32 data["invite_link"] = invite_request.invite.link 33 send_email.delay(invite_request.email, *format_email("invite", data)) 34 35 36 def password_reset_email(reset_code): 37 """generate a password reset email""" 38 data = email_data() 39 data["reset_link"] = reset_code.link 40 data["user"] = reset_code.user.display_name 41 send_email.delay(reset_code.user.email, *format_email("password_reset", data)) 42 43 44 def moderation_report_email(report): 45 """a report was created""" 46 data = email_data() 47 data["reporter"] = report.reporter.localname or report.reporter.username 48 data["reportee"] = report.user.localname or report.user.username 49 data["report_link"] = report.remote_id 50 51 for admin in models.User.objects.filter(groups__name__in=["admin", "moderator"]): 52 data["user"] = admin.display_name 53 send_email.delay(admin.email, *format_email("moderation_report", data)) 54 55 56 def format_email(email_name, data): 57 """render the email templates""" 58 subject = get_template(f"email/{email_name}/subject.html").render(data).strip() 59 html_content = ( 60 get_template(f"email/{email_name}/html_content.html").render(data).strip() 61 ) 62 text_content = ( 63 get_template(f"email/{email_name}/text_content.html").render(data).strip() 64 ) 65 return (subject, html_content, text_content) 66 67 68 @app.task(queue="high_priority") 69 def send_email(recipient, subject, html_content, text_content): 70 """use a task to send the email""" 71 email = EmailMultiAlternatives( 72 subject, text_content, settings.EMAIL_SENDER, [recipient] 73 ) 74 email.attach_alternative(html_content, "text/html") 75 email.send() 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/emailing.py b/bookwyrm/emailing.py --- a/bookwyrm/emailing.py +++ b/bookwyrm/emailing.py @@ -48,7 +48,9 @@ data["reportee"] = report.user.localname or report.user.username data["report_link"] = report.remote_id - for admin in models.User.objects.filter(groups__name__in=["admin", "moderator"]): + for admin in models.User.objects.filter( + groups__name__in=["admin", "moderator"] + ).distinct(): data["user"] = admin.display_name send_email.delay(admin.email, *format_email("moderation_report", data)) diff --git a/bookwyrm/models/report.py b/bookwyrm/models/report.py --- a/bookwyrm/models/report.py +++ b/bookwyrm/models/report.py @@ -1,5 +1,6 @@ """ flagged for moderation """ from django.db import models +from bookwyrm.settings import DOMAIN from .base_model import BookWyrmModel @@ -15,6 +16,9 @@ links = models.ManyToManyField("Link", blank=True) resolved = models.BooleanField(default=False) + def get_remote_id(self): + return f"https://{DOMAIN}/settings/reports/{self.id}" + class Meta: """set order by default""" diff --git a/bookwyrm/views/admin/reports.py b/bookwyrm/views/admin/reports.py --- a/bookwyrm/views/admin/reports.py +++ b/bookwyrm/views/admin/reports.py @@ -58,6 +58,7 @@ """load a report""" data = { "report": get_object_or_404(models.Report, id=report_id), + "group_form": forms.UserGroupForm(), } return TemplateResponse(request, "settings/reports/report.html", data)
{"golden_diff": "diff --git a/bookwyrm/emailing.py b/bookwyrm/emailing.py\n--- a/bookwyrm/emailing.py\n+++ b/bookwyrm/emailing.py\n@@ -48,7 +48,9 @@\n data[\"reportee\"] = report.user.localname or report.user.username\n data[\"report_link\"] = report.remote_id\n \n- for admin in models.User.objects.filter(groups__name__in=[\"admin\", \"moderator\"]):\n+ for admin in models.User.objects.filter(\n+ groups__name__in=[\"admin\", \"moderator\"]\n+ ).distinct():\n data[\"user\"] = admin.display_name\n send_email.delay(admin.email, *format_email(\"moderation_report\", data))\n \ndiff --git a/bookwyrm/models/report.py b/bookwyrm/models/report.py\n--- a/bookwyrm/models/report.py\n+++ b/bookwyrm/models/report.py\n@@ -1,5 +1,6 @@\n \"\"\" flagged for moderation \"\"\"\n from django.db import models\n+from bookwyrm.settings import DOMAIN\n from .base_model import BookWyrmModel\n \n \n@@ -15,6 +16,9 @@\n links = models.ManyToManyField(\"Link\", blank=True)\n resolved = models.BooleanField(default=False)\n \n+ def get_remote_id(self):\n+ return f\"https://{DOMAIN}/settings/reports/{self.id}\"\n+\n class Meta:\n \"\"\"set order by default\"\"\"\n \ndiff --git a/bookwyrm/views/admin/reports.py b/bookwyrm/views/admin/reports.py\n--- a/bookwyrm/views/admin/reports.py\n+++ b/bookwyrm/views/admin/reports.py\n@@ -58,6 +58,7 @@\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n+ \"group_form\": forms.UserGroupForm(),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n", "issue": "Email to admins when a user is reported has the wrong link\n\n", "before_files": [{"content": "\"\"\" flagged for moderation \"\"\"\nfrom django.db import models\nfrom .base_model import BookWyrmModel\n\n\nclass Report(BookWyrmModel):\n \"\"\"reported status or user\"\"\"\n\n reporter = models.ForeignKey(\n \"User\", related_name=\"reporter\", on_delete=models.PROTECT\n )\n note = models.TextField(null=True, blank=True)\n user = models.ForeignKey(\"User\", on_delete=models.PROTECT)\n statuses = models.ManyToManyField(\"Status\", blank=True)\n links = models.ManyToManyField(\"Link\", blank=True)\n resolved = models.BooleanField(default=False)\n\n class Meta:\n \"\"\"set order by default\"\"\"\n\n ordering = (\"-created_date\",)\n\n\nclass ReportComment(BookWyrmModel):\n \"\"\"updates on a report\"\"\"\n\n user = models.ForeignKey(\"User\", on_delete=models.PROTECT)\n note = models.TextField()\n report = models.ForeignKey(Report, on_delete=models.PROTECT)\n\n class Meta:\n \"\"\"sort comments\"\"\"\n\n ordering = (\"-created_date\",)\n", "path": "bookwyrm/models/report.py"}, {"content": "\"\"\" moderation via flagged posts and users \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportsAdmin(View):\n \"\"\"list of reports\"\"\"\n\n def get(self, request):\n \"\"\"view current reports\"\"\"\n filters = {}\n\n resolved = request.GET.get(\"resolved\") == \"true\"\n server = request.GET.get(\"server\")\n if server:\n filters[\"user__federated_server__server_name\"] = server\n username = request.GET.get(\"username\")\n if username:\n filters[\"user__username__icontains\"] = username\n filters[\"resolved\"] = resolved\n data = {\n \"resolved\": resolved,\n \"server\": server,\n \"reports\": models.Report.objects.filter(**filters),\n }\n return TemplateResponse(request, \"settings/reports/reports.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportAdmin(View):\n \"\"\"view a specific report\"\"\"\n\n def get(self, request, report_id):\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n\n def post(self, request, report_id):\n \"\"\"comment on a report\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n models.ReportComment.objects.create(\n user=request.user,\n report=report,\n note=request.POST.get(\"note\"),\n )\n return redirect(\"settings-report\", report.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef suspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = False\n user.deactivation_reason = \"moderator_suspension\"\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef unsuspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = True\n user.deactivation_reason = None\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef moderator_delete_user(request, user_id):\n \"\"\"permanently delete a user\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n\n # we can't delete users on other instances\n if not user.local:\n raise PermissionDenied()\n\n form = forms.DeleteUserForm(request.POST, instance=user)\n\n moderator = models.User.objects.get(id=request.user.id)\n # check the moderator's password\n if form.is_valid() and moderator.check_password(form.cleaned_data[\"password\"]):\n user.deactivation_reason = \"moderator_deletion\"\n user.delete()\n return redirect(\"settings-user\", user.id)\n\n form.errors[\"password\"] = [\"Invalid password\"]\n\n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_post\")\ndef resolve_report(_, report_id):\n \"\"\"mark a report as (un)resolved\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n report.resolved = not report.resolved\n report.save()\n if not report.resolved:\n return redirect(\"settings-report\", report.id)\n return redirect(\"settings-reports\")\n", "path": "bookwyrm/views/admin/reports.py"}, {"content": "\"\"\" send emails \"\"\"\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\nfrom bookwyrm import models, settings\nfrom bookwyrm.tasks import app\nfrom bookwyrm.settings import DOMAIN\n\n\ndef email_data():\n \"\"\"fields every email needs\"\"\"\n site = models.SiteSettings.objects.get()\n return {\n \"site_name\": site.name,\n \"logo\": site.logo_small_url,\n \"domain\": DOMAIN,\n \"user\": None,\n }\n\n\ndef email_confirmation_email(user):\n \"\"\"newly registered users confirm email address\"\"\"\n data = email_data()\n data[\"confirmation_code\"] = user.confirmation_code\n data[\"confirmation_link\"] = user.confirmation_link\n send_email.delay(user.email, *format_email(\"confirm\", data))\n\n\ndef invite_email(invite_request):\n \"\"\"send out an invite code\"\"\"\n data = email_data()\n data[\"invite_link\"] = invite_request.invite.link\n send_email.delay(invite_request.email, *format_email(\"invite\", data))\n\n\ndef password_reset_email(reset_code):\n \"\"\"generate a password reset email\"\"\"\n data = email_data()\n data[\"reset_link\"] = reset_code.link\n data[\"user\"] = reset_code.user.display_name\n send_email.delay(reset_code.user.email, *format_email(\"password_reset\", data))\n\n\ndef moderation_report_email(report):\n \"\"\"a report was created\"\"\"\n data = email_data()\n data[\"reporter\"] = report.reporter.localname or report.reporter.username\n data[\"reportee\"] = report.user.localname or report.user.username\n data[\"report_link\"] = report.remote_id\n\n for admin in models.User.objects.filter(groups__name__in=[\"admin\", \"moderator\"]):\n data[\"user\"] = admin.display_name\n send_email.delay(admin.email, *format_email(\"moderation_report\", data))\n\n\ndef format_email(email_name, data):\n \"\"\"render the email templates\"\"\"\n subject = get_template(f\"email/{email_name}/subject.html\").render(data).strip()\n html_content = (\n get_template(f\"email/{email_name}/html_content.html\").render(data).strip()\n )\n text_content = (\n get_template(f\"email/{email_name}/text_content.html\").render(data).strip()\n )\n return (subject, html_content, text_content)\n\n\[email protected](queue=\"high_priority\")\ndef send_email(recipient, subject, html_content, text_content):\n \"\"\"use a task to send the email\"\"\"\n email = EmailMultiAlternatives(\n subject, text_content, settings.EMAIL_SENDER, [recipient]\n )\n email.attach_alternative(html_content, \"text/html\")\n email.send()\n", "path": "bookwyrm/emailing.py"}], "after_files": [{"content": "\"\"\" flagged for moderation \"\"\"\nfrom django.db import models\nfrom bookwyrm.settings import DOMAIN\nfrom .base_model import BookWyrmModel\n\n\nclass Report(BookWyrmModel):\n \"\"\"reported status or user\"\"\"\n\n reporter = models.ForeignKey(\n \"User\", related_name=\"reporter\", on_delete=models.PROTECT\n )\n note = models.TextField(null=True, blank=True)\n user = models.ForeignKey(\"User\", on_delete=models.PROTECT)\n statuses = models.ManyToManyField(\"Status\", blank=True)\n links = models.ManyToManyField(\"Link\", blank=True)\n resolved = models.BooleanField(default=False)\n\n def get_remote_id(self):\n return f\"https://{DOMAIN}/settings/reports/{self.id}\"\n\n class Meta:\n \"\"\"set order by default\"\"\"\n\n ordering = (\"-created_date\",)\n\n\nclass ReportComment(BookWyrmModel):\n \"\"\"updates on a report\"\"\"\n\n user = models.ForeignKey(\"User\", on_delete=models.PROTECT)\n note = models.TextField()\n report = models.ForeignKey(Report, on_delete=models.PROTECT)\n\n class Meta:\n \"\"\"sort comments\"\"\"\n\n ordering = (\"-created_date\",)\n", "path": "bookwyrm/models/report.py"}, {"content": "\"\"\" moderation via flagged posts and users \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportsAdmin(View):\n \"\"\"list of reports\"\"\"\n\n def get(self, request):\n \"\"\"view current reports\"\"\"\n filters = {}\n\n resolved = request.GET.get(\"resolved\") == \"true\"\n server = request.GET.get(\"server\")\n if server:\n filters[\"user__federated_server__server_name\"] = server\n username = request.GET.get(\"username\")\n if username:\n filters[\"user__username__icontains\"] = username\n filters[\"resolved\"] = resolved\n data = {\n \"resolved\": resolved,\n \"server\": server,\n \"reports\": models.Report.objects.filter(**filters),\n }\n return TemplateResponse(request, \"settings/reports/reports.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportAdmin(View):\n \"\"\"view a specific report\"\"\"\n\n def get(self, request, report_id):\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n \"group_form\": forms.UserGroupForm(),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n\n def post(self, request, report_id):\n \"\"\"comment on a report\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n models.ReportComment.objects.create(\n user=request.user,\n report=report,\n note=request.POST.get(\"note\"),\n )\n return redirect(\"settings-report\", report.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef suspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = False\n user.deactivation_reason = \"moderator_suspension\"\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef unsuspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = True\n user.deactivation_reason = None\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_user\")\ndef moderator_delete_user(request, user_id):\n \"\"\"permanently delete a user\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n\n # we can't delete users on other instances\n if not user.local:\n raise PermissionDenied()\n\n form = forms.DeleteUserForm(request.POST, instance=user)\n\n moderator = models.User.objects.get(id=request.user.id)\n # check the moderator's password\n if form.is_valid() and moderator.check_password(form.cleaned_data[\"password\"]):\n user.deactivation_reason = \"moderator_deletion\"\n user.delete()\n return redirect(\"settings-user\", user.id)\n\n form.errors[\"password\"] = [\"Invalid password\"]\n\n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n\n@login_required\n@permission_required(\"bookwyrm_moderate_post\")\ndef resolve_report(_, report_id):\n \"\"\"mark a report as (un)resolved\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n report.resolved = not report.resolved\n report.save()\n if not report.resolved:\n return redirect(\"settings-report\", report.id)\n return redirect(\"settings-reports\")\n", "path": "bookwyrm/views/admin/reports.py"}, {"content": "\"\"\" send emails \"\"\"\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\nfrom bookwyrm import models, settings\nfrom bookwyrm.tasks import app\nfrom bookwyrm.settings import DOMAIN\n\n\ndef email_data():\n \"\"\"fields every email needs\"\"\"\n site = models.SiteSettings.objects.get()\n return {\n \"site_name\": site.name,\n \"logo\": site.logo_small_url,\n \"domain\": DOMAIN,\n \"user\": None,\n }\n\n\ndef email_confirmation_email(user):\n \"\"\"newly registered users confirm email address\"\"\"\n data = email_data()\n data[\"confirmation_code\"] = user.confirmation_code\n data[\"confirmation_link\"] = user.confirmation_link\n send_email.delay(user.email, *format_email(\"confirm\", data))\n\n\ndef invite_email(invite_request):\n \"\"\"send out an invite code\"\"\"\n data = email_data()\n data[\"invite_link\"] = invite_request.invite.link\n send_email.delay(invite_request.email, *format_email(\"invite\", data))\n\n\ndef password_reset_email(reset_code):\n \"\"\"generate a password reset email\"\"\"\n data = email_data()\n data[\"reset_link\"] = reset_code.link\n data[\"user\"] = reset_code.user.display_name\n send_email.delay(reset_code.user.email, *format_email(\"password_reset\", data))\n\n\ndef moderation_report_email(report):\n \"\"\"a report was created\"\"\"\n data = email_data()\n data[\"reporter\"] = report.reporter.localname or report.reporter.username\n data[\"reportee\"] = report.user.localname or report.user.username\n data[\"report_link\"] = report.remote_id\n\n for admin in models.User.objects.filter(\n groups__name__in=[\"admin\", \"moderator\"]\n ).distinct():\n data[\"user\"] = admin.display_name\n send_email.delay(admin.email, *format_email(\"moderation_report\", data))\n\n\ndef format_email(email_name, data):\n \"\"\"render the email templates\"\"\"\n subject = get_template(f\"email/{email_name}/subject.html\").render(data).strip()\n html_content = (\n get_template(f\"email/{email_name}/html_content.html\").render(data).strip()\n )\n text_content = (\n get_template(f\"email/{email_name}/text_content.html\").render(data).strip()\n )\n return (subject, html_content, text_content)\n\n\[email protected](queue=\"high_priority\")\ndef send_email(recipient, subject, html_content, text_content):\n \"\"\"use a task to send the email\"\"\"\n email = EmailMultiAlternatives(\n subject, text_content, settings.EMAIL_SENDER, [recipient]\n )\n email.attach_alternative(html_content, \"text/html\")\n email.send()\n", "path": "bookwyrm/emailing.py"}]}
2,597
412
gh_patches_debug_32039
rasdani/github-patches
git_diff
pyro-ppl__pyro-1506
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HMC gives non deterministic results with parallel chaining Even though the `ParallelSampler` yields the same set of traces (each worker gives deterministic results), the order in which the traces are consumed might be different, which gives different results when we use `TracePredictive`. One way to get deterministic results from HMC/NUTS with `num_chains > 1` would be to store intermediate results, and yield results from each of the workers in a round-robin fashion. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyro/infer/mcmc/mcmc.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 import errno 4 import json 5 import logging 6 import signal 7 import socket 8 import sys 9 import threading 10 import warnings 11 from collections import OrderedDict 12 13 import six 14 from six.moves import queue 15 import torch 16 import torch.multiprocessing as mp 17 18 import pyro 19 from pyro.infer import TracePosterior 20 from pyro.infer.mcmc.logger import initialize_logger, initialize_progbar, DIAGNOSTIC_MSG, TqdmHandler 21 from pyro.util import optional 22 23 24 def logger_thread(log_queue, warmup_steps, num_samples, num_chains): 25 """ 26 Logging thread that asynchronously consumes logging events from `log_queue`, 27 and handles them appropriately. 28 """ 29 progress_bars = [initialize_progbar(warmup_steps, s, pos=i) 30 for i, s in enumerate(num_samples)] 31 logger = logging.getLogger(__name__) 32 logger.propagate = False 33 logger.addHandler(TqdmHandler()) 34 num_samples = [0] * len(num_samples) 35 try: 36 while True: 37 try: 38 record = log_queue.get_nowait() 39 except queue.Empty: 40 continue 41 if record is None: 42 break 43 metadata, msg = record.getMessage().split("]", 1) 44 _, msg_type, logger_id = metadata[1:].split() 45 if msg_type == DIAGNOSTIC_MSG: 46 pbar_pos = int(logger_id.split(":")[-1]) - 1 47 num_samples[pbar_pos] += 1 48 if num_samples[pbar_pos] == warmup_steps: 49 progress_bars[pbar_pos].set_description("Sample [{}]".format(pbar_pos + 1)) 50 diagnostics = json.loads(msg, object_pairs_hook=OrderedDict) 51 progress_bars[pbar_pos].set_postfix(diagnostics) 52 progress_bars[pbar_pos].update() 53 else: 54 logger.handle(record) 55 finally: 56 for pbar in progress_bars: 57 pbar.close() 58 # Required to not overwrite multiple progress bars on exit. 59 sys.stderr.write("\n" * num_chains) 60 61 62 class _Worker(object): 63 def __init__(self, chain_id, result_queue, log_queue, 64 kernel, num_samples, warmup_steps=0, 65 args=None, kwargs=None): 66 self.chain_id = chain_id 67 self.trace_gen = _SingleSampler(kernel, num_samples=num_samples, warmup_steps=warmup_steps) 68 self.args = args if args is not None else [] 69 self.kwargs = kwargs if kwargs is not None else {} 70 self.rng_seed = torch.initial_seed() 71 self.log_queue = log_queue 72 self.result_queue = result_queue 73 self.default_tensor_type = torch.Tensor().type() 74 75 def run(self, *args, **kwargs): 76 pyro.set_rng_seed(self.chain_id + self.rng_seed) 77 torch.set_default_tensor_type(self.default_tensor_type) 78 kwargs["logger_id"] = "CHAIN:{}".format(self.chain_id) 79 kwargs["log_queue"] = self.log_queue 80 try: 81 for sample in self.trace_gen._traces(*args, **kwargs): 82 self.result_queue.put_nowait((self.chain_id, sample)) 83 self.result_queue.put_nowait((self.chain_id, None)) 84 except Exception as e: 85 self.trace_gen.logger.exception(e) 86 self.result_queue.put_nowait((self.chain_id, e)) 87 88 89 class _ParallelSampler(TracePosterior): 90 """ 91 Parallel runner class for running MCMC chains in parallel. This uses the 92 `torch.multiprocessing` module (itself a light wrapper over the python 93 `multiprocessing` module) to spin up parallel workers. 94 """ 95 def __init__(self, kernel, num_samples, warmup_steps, num_chains, mp_context): 96 super(_ParallelSampler, self).__init__() 97 self.kernel = kernel 98 self.warmup_steps = warmup_steps 99 self.num_chains = num_chains 100 self.workers = [] 101 self.ctx = mp 102 if mp_context: 103 if six.PY2: 104 raise ValueError("multiprocessing.get_context() is " 105 "not supported in Python 2.") 106 self.ctx = mp.get_context(mp_context) 107 self.result_queue = self.ctx.Manager().Queue() 108 self.log_queue = self.ctx.Manager().Queue() 109 self.logger = initialize_logger(logging.getLogger("pyro.infer.mcmc"), 110 "MAIN", log_queue=self.log_queue) 111 # initialize number of samples per chain 112 samples_per_chain = num_samples // num_chains 113 self.num_samples = [samples_per_chain] * num_chains 114 for i in range(num_samples % num_chains): 115 self.num_samples[i] += 1 116 self.log_thread = threading.Thread(target=logger_thread, 117 args=(self.log_queue, self.warmup_steps, 118 self.num_samples, self.num_chains)) 119 self.log_thread.daemon = True 120 self.log_thread.start() 121 122 def init_workers(self, *args, **kwargs): 123 self.workers = [] 124 for i in range(self.num_chains): 125 worker = _Worker(i + 1, self.result_queue, self.log_queue, self.kernel, 126 self.num_samples[i], self.warmup_steps) 127 worker.daemon = True 128 self.workers.append(self.ctx.Process(name=str(i), target=worker.run, 129 args=args, kwargs=kwargs)) 130 131 def terminate(self): 132 if self.log_thread.is_alive(): 133 self.log_queue.put_nowait(None) 134 self.log_thread.join(timeout=1) 135 for w in self.workers: 136 if w.is_alive(): 137 w.terminate() 138 139 def _traces(self, *args, **kwargs): 140 # Ignore sigint in worker processes; they will be shut down 141 # when the main process terminates. 142 sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) 143 self.init_workers(*args, **kwargs) 144 # restore original handler 145 signal.signal(signal.SIGINT, sigint_handler) 146 active_workers = self.num_chains 147 try: 148 for w in self.workers: 149 w.start() 150 while active_workers: 151 try: 152 chain_id, val = self.result_queue.get_nowait() 153 # This can happen when the worker process has terminated. 154 # See https://github.com/pytorch/pytorch/pull/5380 for motivation. 155 except socket.error as e: 156 if getattr(e, "errno", None) == errno.ENOENT: 157 pass 158 else: 159 raise e 160 except queue.Empty: 161 continue 162 if isinstance(val, Exception): 163 # Exception trace is already logged by worker. 164 raise val 165 elif val is not None: 166 yield val 167 else: 168 active_workers -= 1 169 finally: 170 self.terminate() 171 172 173 class _SingleSampler(TracePosterior): 174 """ 175 Single process runner class optimized for the case `num_chains=1`. 176 """ 177 def __init__(self, kernel, num_samples, warmup_steps): 178 self.kernel = kernel 179 self.warmup_steps = warmup_steps 180 self.num_samples = num_samples 181 self.logger = None 182 super(_SingleSampler, self).__init__() 183 184 def _gen_samples(self, num_samples, init_trace): 185 trace = init_trace 186 for _ in range(num_samples): 187 trace = self.kernel.sample(trace) 188 diagnostics = json.dumps(self.kernel.diagnostics()) 189 self.logger.info(diagnostics, extra={"msg_type": DIAGNOSTIC_MSG}) 190 yield trace 191 192 def _traces(self, *args, **kwargs): 193 logger_id = kwargs.pop("logger_id", "") 194 log_queue = kwargs.pop("log_queue", None) 195 self.logger = logging.getLogger("pyro.infer.mcmc") 196 is_multiprocessing = log_queue is not None 197 progress_bar = initialize_progbar(self.warmup_steps, self.num_samples) \ 198 if not is_multiprocessing else None 199 self.logger = initialize_logger(self.logger, logger_id, progress_bar, log_queue) 200 self.kernel.setup(self.warmup_steps, *args, **kwargs) 201 trace = self.kernel.initial_trace() 202 with optional(progress_bar, not is_multiprocessing): 203 for trace in self._gen_samples(self.warmup_steps, trace): 204 continue 205 if progress_bar: 206 progress_bar.set_description("Sample") 207 for trace in self._gen_samples(self.num_samples, trace): 208 yield (trace, 1.0) 209 self.kernel.cleanup() 210 211 212 class MCMC(TracePosterior): 213 """ 214 Wrapper class for Markov Chain Monte Carlo algorithms. Specific MCMC algorithms 215 are TraceKernel instances and need to be supplied as a ``kernel`` argument 216 to the constructor. 217 218 .. note:: The case of `num_chains > 1` uses python multiprocessing to 219 run parallel chains in multiple processes. This goes with the usual 220 caveats around multiprocessing in python, e.g. the model used to 221 initialize the ``kernel`` must be serializable via `pickle`, and the 222 performance / constraints will be platform dependent (e.g. only 223 the "spawn" context is available in Windows). This has also not 224 been extensively tested on the Windows platform. 225 226 :param kernel: An instance of the ``TraceKernel`` class, which when 227 given an execution trace returns another sample trace from the target 228 (posterior) distribution. 229 :param int num_samples: The number of samples that need to be generated, 230 excluding the samples discarded during the warmup phase. 231 :param int warmup_steps: Number of warmup iterations. The samples generated 232 during the warmup phase are discarded. If not provided, default is 233 half of `num_samples`. 234 :param int num_chains: Number of MCMC chains to run in parallel. Depending on 235 whether `num_chains` is 1 or more than 1, this class internally dispatches 236 to either `_SingleSampler` or `_ParallelSampler`. 237 :param str mp_context: Multiprocessing context to use when `num_chains > 1`. 238 Only applicable for Python 3.5 and above. Use `mp_context="spawn"` for 239 CUDA. 240 """ 241 def __init__(self, kernel, num_samples, warmup_steps=0, 242 num_chains=1, mp_context=None): 243 self.warmup_steps = warmup_steps if warmup_steps is not None else num_samples // 2 # Stan 244 self.num_samples = num_samples 245 if num_chains > 1: 246 cpu_count = mp.cpu_count() 247 if num_chains > cpu_count: 248 warnings.warn("`num_chains` is more than CPU count - {}. " 249 "Resetting num_chains to CPU count.").format(cpu_count) 250 self.sampler = _ParallelSampler(kernel, num_samples, warmup_steps, 251 num_chains, mp_context) 252 else: 253 self.sampler = _SingleSampler(kernel, num_samples, warmup_steps) 254 super(MCMC, self).__init__() 255 256 def _traces(self, *args, **kwargs): 257 for sample in self.sampler._traces(*args, **kwargs): 258 yield sample 259 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyro/infer/mcmc/mcmc.py b/pyro/infer/mcmc/mcmc.py --- a/pyro/infer/mcmc/mcmc.py +++ b/pyro/infer/mcmc/mcmc.py @@ -8,7 +8,7 @@ import sys import threading import warnings -from collections import OrderedDict +from collections import OrderedDict, deque import six from six.moves import queue @@ -144,6 +144,11 @@ # restore original handler signal.signal(signal.SIGINT, sigint_handler) active_workers = self.num_chains + # To yield a deterministic ordering, we hold intermediate traces + # from each of the workers in its own queue in `results_buffer` + # and yield these in a round robin fashion. + buffer_idx = 0 + results_buffer = [deque() for _ in range(self.num_chains)] try: for w in self.workers: w.start() @@ -162,10 +167,22 @@ if isinstance(val, Exception): # Exception trace is already logged by worker. raise val - elif val is not None: - yield val - else: + if val is None: active_workers -= 1 + else: + results_buffer[chain_id - 1].append(val) + while results_buffer[buffer_idx]: + yield results_buffer[buffer_idx].popleft() + buffer_idx = (buffer_idx + 1) % self.num_chains + # empty out the results buffer + non_empty_buffers = set(range(self.num_chains)) + while non_empty_buffers: + if results_buffer[buffer_idx]: + yield results_buffer[buffer_idx].popleft() + else: + if buffer_idx in non_empty_buffers: + non_empty_buffers.remove(buffer_idx) + buffer_idx = (buffer_idx + 1) % self.num_chains finally: self.terminate()
{"golden_diff": "diff --git a/pyro/infer/mcmc/mcmc.py b/pyro/infer/mcmc/mcmc.py\n--- a/pyro/infer/mcmc/mcmc.py\n+++ b/pyro/infer/mcmc/mcmc.py\n@@ -8,7 +8,7 @@\n import sys\n import threading\n import warnings\n-from collections import OrderedDict\n+from collections import OrderedDict, deque\n \n import six\n from six.moves import queue\n@@ -144,6 +144,11 @@\n # restore original handler\n signal.signal(signal.SIGINT, sigint_handler)\n active_workers = self.num_chains\n+ # To yield a deterministic ordering, we hold intermediate traces\n+ # from each of the workers in its own queue in `results_buffer`\n+ # and yield these in a round robin fashion.\n+ buffer_idx = 0\n+ results_buffer = [deque() for _ in range(self.num_chains)]\n try:\n for w in self.workers:\n w.start()\n@@ -162,10 +167,22 @@\n if isinstance(val, Exception):\n # Exception trace is already logged by worker.\n raise val\n- elif val is not None:\n- yield val\n- else:\n+ if val is None:\n active_workers -= 1\n+ else:\n+ results_buffer[chain_id - 1].append(val)\n+ while results_buffer[buffer_idx]:\n+ yield results_buffer[buffer_idx].popleft()\n+ buffer_idx = (buffer_idx + 1) % self.num_chains\n+ # empty out the results buffer\n+ non_empty_buffers = set(range(self.num_chains))\n+ while non_empty_buffers:\n+ if results_buffer[buffer_idx]:\n+ yield results_buffer[buffer_idx].popleft()\n+ else:\n+ if buffer_idx in non_empty_buffers:\n+ non_empty_buffers.remove(buffer_idx)\n+ buffer_idx = (buffer_idx + 1) % self.num_chains\n finally:\n self.terminate()\n", "issue": "HMC gives non deterministic results with parallel chaining\nEven though the `ParallelSampler` yields the same set of traces (each worker gives deterministic results), the order in which the traces are consumed might be different, which gives different results when we use `TracePredictive`. \r\n\r\nOne way to get deterministic results from HMC/NUTS with `num_chains > 1` would be to store intermediate results, and yield results from each of the workers in a round-robin fashion. \n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport errno\nimport json\nimport logging\nimport signal\nimport socket\nimport sys\nimport threading\nimport warnings\nfrom collections import OrderedDict\n\nimport six\nfrom six.moves import queue\nimport torch\nimport torch.multiprocessing as mp\n\nimport pyro\nfrom pyro.infer import TracePosterior\nfrom pyro.infer.mcmc.logger import initialize_logger, initialize_progbar, DIAGNOSTIC_MSG, TqdmHandler\nfrom pyro.util import optional\n\n\ndef logger_thread(log_queue, warmup_steps, num_samples, num_chains):\n \"\"\"\n Logging thread that asynchronously consumes logging events from `log_queue`,\n and handles them appropriately.\n \"\"\"\n progress_bars = [initialize_progbar(warmup_steps, s, pos=i)\n for i, s in enumerate(num_samples)]\n logger = logging.getLogger(__name__)\n logger.propagate = False\n logger.addHandler(TqdmHandler())\n num_samples = [0] * len(num_samples)\n try:\n while True:\n try:\n record = log_queue.get_nowait()\n except queue.Empty:\n continue\n if record is None:\n break\n metadata, msg = record.getMessage().split(\"]\", 1)\n _, msg_type, logger_id = metadata[1:].split()\n if msg_type == DIAGNOSTIC_MSG:\n pbar_pos = int(logger_id.split(\":\")[-1]) - 1\n num_samples[pbar_pos] += 1\n if num_samples[pbar_pos] == warmup_steps:\n progress_bars[pbar_pos].set_description(\"Sample [{}]\".format(pbar_pos + 1))\n diagnostics = json.loads(msg, object_pairs_hook=OrderedDict)\n progress_bars[pbar_pos].set_postfix(diagnostics)\n progress_bars[pbar_pos].update()\n else:\n logger.handle(record)\n finally:\n for pbar in progress_bars:\n pbar.close()\n # Required to not overwrite multiple progress bars on exit.\n sys.stderr.write(\"\\n\" * num_chains)\n\n\nclass _Worker(object):\n def __init__(self, chain_id, result_queue, log_queue,\n kernel, num_samples, warmup_steps=0,\n args=None, kwargs=None):\n self.chain_id = chain_id\n self.trace_gen = _SingleSampler(kernel, num_samples=num_samples, warmup_steps=warmup_steps)\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n self.rng_seed = torch.initial_seed()\n self.log_queue = log_queue\n self.result_queue = result_queue\n self.default_tensor_type = torch.Tensor().type()\n\n def run(self, *args, **kwargs):\n pyro.set_rng_seed(self.chain_id + self.rng_seed)\n torch.set_default_tensor_type(self.default_tensor_type)\n kwargs[\"logger_id\"] = \"CHAIN:{}\".format(self.chain_id)\n kwargs[\"log_queue\"] = self.log_queue\n try:\n for sample in self.trace_gen._traces(*args, **kwargs):\n self.result_queue.put_nowait((self.chain_id, sample))\n self.result_queue.put_nowait((self.chain_id, None))\n except Exception as e:\n self.trace_gen.logger.exception(e)\n self.result_queue.put_nowait((self.chain_id, e))\n\n\nclass _ParallelSampler(TracePosterior):\n \"\"\"\n Parallel runner class for running MCMC chains in parallel. This uses the\n `torch.multiprocessing` module (itself a light wrapper over the python\n `multiprocessing` module) to spin up parallel workers.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps, num_chains, mp_context):\n super(_ParallelSampler, self).__init__()\n self.kernel = kernel\n self.warmup_steps = warmup_steps\n self.num_chains = num_chains\n self.workers = []\n self.ctx = mp\n if mp_context:\n if six.PY2:\n raise ValueError(\"multiprocessing.get_context() is \"\n \"not supported in Python 2.\")\n self.ctx = mp.get_context(mp_context)\n self.result_queue = self.ctx.Manager().Queue()\n self.log_queue = self.ctx.Manager().Queue()\n self.logger = initialize_logger(logging.getLogger(\"pyro.infer.mcmc\"),\n \"MAIN\", log_queue=self.log_queue)\n # initialize number of samples per chain\n samples_per_chain = num_samples // num_chains\n self.num_samples = [samples_per_chain] * num_chains\n for i in range(num_samples % num_chains):\n self.num_samples[i] += 1\n self.log_thread = threading.Thread(target=logger_thread,\n args=(self.log_queue, self.warmup_steps,\n self.num_samples, self.num_chains))\n self.log_thread.daemon = True\n self.log_thread.start()\n\n def init_workers(self, *args, **kwargs):\n self.workers = []\n for i in range(self.num_chains):\n worker = _Worker(i + 1, self.result_queue, self.log_queue, self.kernel,\n self.num_samples[i], self.warmup_steps)\n worker.daemon = True\n self.workers.append(self.ctx.Process(name=str(i), target=worker.run,\n args=args, kwargs=kwargs))\n\n def terminate(self):\n if self.log_thread.is_alive():\n self.log_queue.put_nowait(None)\n self.log_thread.join(timeout=1)\n for w in self.workers:\n if w.is_alive():\n w.terminate()\n\n def _traces(self, *args, **kwargs):\n # Ignore sigint in worker processes; they will be shut down\n # when the main process terminates.\n sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n self.init_workers(*args, **kwargs)\n # restore original handler\n signal.signal(signal.SIGINT, sigint_handler)\n active_workers = self.num_chains\n try:\n for w in self.workers:\n w.start()\n while active_workers:\n try:\n chain_id, val = self.result_queue.get_nowait()\n # This can happen when the worker process has terminated.\n # See https://github.com/pytorch/pytorch/pull/5380 for motivation.\n except socket.error as e:\n if getattr(e, \"errno\", None) == errno.ENOENT:\n pass\n else:\n raise e\n except queue.Empty:\n continue\n if isinstance(val, Exception):\n # Exception trace is already logged by worker.\n raise val\n elif val is not None:\n yield val\n else:\n active_workers -= 1\n finally:\n self.terminate()\n\n\nclass _SingleSampler(TracePosterior):\n \"\"\"\n Single process runner class optimized for the case `num_chains=1`.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps):\n self.kernel = kernel\n self.warmup_steps = warmup_steps\n self.num_samples = num_samples\n self.logger = None\n super(_SingleSampler, self).__init__()\n\n def _gen_samples(self, num_samples, init_trace):\n trace = init_trace\n for _ in range(num_samples):\n trace = self.kernel.sample(trace)\n diagnostics = json.dumps(self.kernel.diagnostics())\n self.logger.info(diagnostics, extra={\"msg_type\": DIAGNOSTIC_MSG})\n yield trace\n\n def _traces(self, *args, **kwargs):\n logger_id = kwargs.pop(\"logger_id\", \"\")\n log_queue = kwargs.pop(\"log_queue\", None)\n self.logger = logging.getLogger(\"pyro.infer.mcmc\")\n is_multiprocessing = log_queue is not None\n progress_bar = initialize_progbar(self.warmup_steps, self.num_samples) \\\n if not is_multiprocessing else None\n self.logger = initialize_logger(self.logger, logger_id, progress_bar, log_queue)\n self.kernel.setup(self.warmup_steps, *args, **kwargs)\n trace = self.kernel.initial_trace()\n with optional(progress_bar, not is_multiprocessing):\n for trace in self._gen_samples(self.warmup_steps, trace):\n continue\n if progress_bar:\n progress_bar.set_description(\"Sample\")\n for trace in self._gen_samples(self.num_samples, trace):\n yield (trace, 1.0)\n self.kernel.cleanup()\n\n\nclass MCMC(TracePosterior):\n \"\"\"\n Wrapper class for Markov Chain Monte Carlo algorithms. Specific MCMC algorithms\n are TraceKernel instances and need to be supplied as a ``kernel`` argument\n to the constructor.\n\n .. note:: The case of `num_chains > 1` uses python multiprocessing to\n run parallel chains in multiple processes. This goes with the usual\n caveats around multiprocessing in python, e.g. the model used to\n initialize the ``kernel`` must be serializable via `pickle`, and the\n performance / constraints will be platform dependent (e.g. only\n the \"spawn\" context is available in Windows). This has also not\n been extensively tested on the Windows platform.\n\n :param kernel: An instance of the ``TraceKernel`` class, which when\n given an execution trace returns another sample trace from the target\n (posterior) distribution.\n :param int num_samples: The number of samples that need to be generated,\n excluding the samples discarded during the warmup phase.\n :param int warmup_steps: Number of warmup iterations. The samples generated\n during the warmup phase are discarded. If not provided, default is\n half of `num_samples`.\n :param int num_chains: Number of MCMC chains to run in parallel. Depending on\n whether `num_chains` is 1 or more than 1, this class internally dispatches\n to either `_SingleSampler` or `_ParallelSampler`.\n :param str mp_context: Multiprocessing context to use when `num_chains > 1`.\n Only applicable for Python 3.5 and above. Use `mp_context=\"spawn\"` for\n CUDA.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps=0,\n num_chains=1, mp_context=None):\n self.warmup_steps = warmup_steps if warmup_steps is not None else num_samples // 2 # Stan\n self.num_samples = num_samples\n if num_chains > 1:\n cpu_count = mp.cpu_count()\n if num_chains > cpu_count:\n warnings.warn(\"`num_chains` is more than CPU count - {}. \"\n \"Resetting num_chains to CPU count.\").format(cpu_count)\n self.sampler = _ParallelSampler(kernel, num_samples, warmup_steps,\n num_chains, mp_context)\n else:\n self.sampler = _SingleSampler(kernel, num_samples, warmup_steps)\n super(MCMC, self).__init__()\n\n def _traces(self, *args, **kwargs):\n for sample in self.sampler._traces(*args, **kwargs):\n yield sample\n", "path": "pyro/infer/mcmc/mcmc.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport errno\nimport json\nimport logging\nimport signal\nimport socket\nimport sys\nimport threading\nimport warnings\nfrom collections import OrderedDict, deque\n\nimport six\nfrom six.moves import queue\nimport torch\nimport torch.multiprocessing as mp\n\nimport pyro\nfrom pyro.infer import TracePosterior\nfrom pyro.infer.mcmc.logger import initialize_logger, initialize_progbar, DIAGNOSTIC_MSG, TqdmHandler\nfrom pyro.util import optional\n\n\ndef logger_thread(log_queue, warmup_steps, num_samples, num_chains):\n \"\"\"\n Logging thread that asynchronously consumes logging events from `log_queue`,\n and handles them appropriately.\n \"\"\"\n progress_bars = [initialize_progbar(warmup_steps, s, pos=i)\n for i, s in enumerate(num_samples)]\n logger = logging.getLogger(__name__)\n logger.propagate = False\n logger.addHandler(TqdmHandler())\n num_samples = [0] * len(num_samples)\n try:\n while True:\n try:\n record = log_queue.get_nowait()\n except queue.Empty:\n continue\n if record is None:\n break\n metadata, msg = record.getMessage().split(\"]\", 1)\n _, msg_type, logger_id = metadata[1:].split()\n if msg_type == DIAGNOSTIC_MSG:\n pbar_pos = int(logger_id.split(\":\")[-1]) - 1\n num_samples[pbar_pos] += 1\n if num_samples[pbar_pos] == warmup_steps:\n progress_bars[pbar_pos].set_description(\"Sample [{}]\".format(pbar_pos + 1))\n diagnostics = json.loads(msg, object_pairs_hook=OrderedDict)\n progress_bars[pbar_pos].set_postfix(diagnostics)\n progress_bars[pbar_pos].update()\n else:\n logger.handle(record)\n finally:\n for pbar in progress_bars:\n pbar.close()\n # Required to not overwrite multiple progress bars on exit.\n sys.stderr.write(\"\\n\" * num_chains)\n\n\nclass _Worker(object):\n def __init__(self, chain_id, result_queue, log_queue,\n kernel, num_samples, warmup_steps=0,\n args=None, kwargs=None):\n self.chain_id = chain_id\n self.trace_gen = _SingleSampler(kernel, num_samples=num_samples, warmup_steps=warmup_steps)\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n self.rng_seed = torch.initial_seed()\n self.log_queue = log_queue\n self.result_queue = result_queue\n self.default_tensor_type = torch.Tensor().type()\n\n def run(self, *args, **kwargs):\n pyro.set_rng_seed(self.chain_id + self.rng_seed)\n torch.set_default_tensor_type(self.default_tensor_type)\n kwargs[\"logger_id\"] = \"CHAIN:{}\".format(self.chain_id)\n kwargs[\"log_queue\"] = self.log_queue\n try:\n for sample in self.trace_gen._traces(*args, **kwargs):\n self.result_queue.put_nowait((self.chain_id, sample))\n self.result_queue.put_nowait((self.chain_id, None))\n except Exception as e:\n self.trace_gen.logger.exception(e)\n self.result_queue.put_nowait((self.chain_id, e))\n\n\nclass _ParallelSampler(TracePosterior):\n \"\"\"\n Parallel runner class for running MCMC chains in parallel. This uses the\n `torch.multiprocessing` module (itself a light wrapper over the python\n `multiprocessing` module) to spin up parallel workers.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps, num_chains, mp_context):\n super(_ParallelSampler, self).__init__()\n self.kernel = kernel\n self.warmup_steps = warmup_steps\n self.num_chains = num_chains\n self.workers = []\n self.ctx = mp\n if mp_context:\n if six.PY2:\n raise ValueError(\"multiprocessing.get_context() is \"\n \"not supported in Python 2.\")\n self.ctx = mp.get_context(mp_context)\n self.result_queue = self.ctx.Manager().Queue()\n self.log_queue = self.ctx.Manager().Queue()\n self.logger = initialize_logger(logging.getLogger(\"pyro.infer.mcmc\"),\n \"MAIN\", log_queue=self.log_queue)\n # initialize number of samples per chain\n samples_per_chain = num_samples // num_chains\n self.num_samples = [samples_per_chain] * num_chains\n for i in range(num_samples % num_chains):\n self.num_samples[i] += 1\n self.log_thread = threading.Thread(target=logger_thread,\n args=(self.log_queue, self.warmup_steps,\n self.num_samples, self.num_chains))\n self.log_thread.daemon = True\n self.log_thread.start()\n\n def init_workers(self, *args, **kwargs):\n self.workers = []\n for i in range(self.num_chains):\n worker = _Worker(i + 1, self.result_queue, self.log_queue, self.kernel,\n self.num_samples[i], self.warmup_steps)\n worker.daemon = True\n self.workers.append(self.ctx.Process(name=str(i), target=worker.run,\n args=args, kwargs=kwargs))\n\n def terminate(self):\n if self.log_thread.is_alive():\n self.log_queue.put_nowait(None)\n self.log_thread.join(timeout=1)\n for w in self.workers:\n if w.is_alive():\n w.terminate()\n\n def _traces(self, *args, **kwargs):\n # Ignore sigint in worker processes; they will be shut down\n # when the main process terminates.\n sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n self.init_workers(*args, **kwargs)\n # restore original handler\n signal.signal(signal.SIGINT, sigint_handler)\n active_workers = self.num_chains\n # To yield a deterministic ordering, we hold intermediate traces\n # from each of the workers in its own queue in `results_buffer`\n # and yield these in a round robin fashion.\n buffer_idx = 0\n results_buffer = [deque() for _ in range(self.num_chains)]\n try:\n for w in self.workers:\n w.start()\n while active_workers:\n try:\n chain_id, val = self.result_queue.get_nowait()\n # This can happen when the worker process has terminated.\n # See https://github.com/pytorch/pytorch/pull/5380 for motivation.\n except socket.error as e:\n if getattr(e, \"errno\", None) == errno.ENOENT:\n pass\n else:\n raise e\n except queue.Empty:\n continue\n if isinstance(val, Exception):\n # Exception trace is already logged by worker.\n raise val\n if val is None:\n active_workers -= 1\n else:\n results_buffer[chain_id - 1].append(val)\n while results_buffer[buffer_idx]:\n yield results_buffer[buffer_idx].popleft()\n buffer_idx = (buffer_idx + 1) % self.num_chains\n # empty out the results buffer\n non_empty_buffers = set(range(self.num_chains))\n while non_empty_buffers:\n if results_buffer[buffer_idx]:\n yield results_buffer[buffer_idx].popleft()\n else:\n if buffer_idx in non_empty_buffers:\n non_empty_buffers.remove(buffer_idx)\n buffer_idx = (buffer_idx + 1) % self.num_chains\n finally:\n self.terminate()\n\n\nclass _SingleSampler(TracePosterior):\n \"\"\"\n Single process runner class optimized for the case `num_chains=1`.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps):\n self.kernel = kernel\n self.warmup_steps = warmup_steps\n self.num_samples = num_samples\n self.logger = None\n super(_SingleSampler, self).__init__()\n\n def _gen_samples(self, num_samples, init_trace):\n trace = init_trace\n for _ in range(num_samples):\n trace = self.kernel.sample(trace)\n diagnostics = json.dumps(self.kernel.diagnostics())\n self.logger.info(diagnostics, extra={\"msg_type\": DIAGNOSTIC_MSG})\n yield trace\n\n def _traces(self, *args, **kwargs):\n logger_id = kwargs.pop(\"logger_id\", \"\")\n log_queue = kwargs.pop(\"log_queue\", None)\n self.logger = logging.getLogger(\"pyro.infer.mcmc\")\n is_multiprocessing = log_queue is not None\n progress_bar = initialize_progbar(self.warmup_steps, self.num_samples) \\\n if not is_multiprocessing else None\n self.logger = initialize_logger(self.logger, logger_id, progress_bar, log_queue)\n self.kernel.setup(self.warmup_steps, *args, **kwargs)\n trace = self.kernel.initial_trace()\n with optional(progress_bar, not is_multiprocessing):\n for trace in self._gen_samples(self.warmup_steps, trace):\n continue\n if progress_bar:\n progress_bar.set_description(\"Sample\")\n for trace in self._gen_samples(self.num_samples, trace):\n yield (trace, 1.0)\n self.kernel.cleanup()\n\n\nclass MCMC(TracePosterior):\n \"\"\"\n Wrapper class for Markov Chain Monte Carlo algorithms. Specific MCMC algorithms\n are TraceKernel instances and need to be supplied as a ``kernel`` argument\n to the constructor.\n\n .. note:: The case of `num_chains > 1` uses python multiprocessing to\n run parallel chains in multiple processes. This goes with the usual\n caveats around multiprocessing in python, e.g. the model used to\n initialize the ``kernel`` must be serializable via `pickle`, and the\n performance / constraints will be platform dependent (e.g. only\n the \"spawn\" context is available in Windows). This has also not\n been extensively tested on the Windows platform.\n\n :param kernel: An instance of the ``TraceKernel`` class, which when\n given an execution trace returns another sample trace from the target\n (posterior) distribution.\n :param int num_samples: The number of samples that need to be generated,\n excluding the samples discarded during the warmup phase.\n :param int warmup_steps: Number of warmup iterations. The samples generated\n during the warmup phase are discarded. If not provided, default is\n half of `num_samples`.\n :param int num_chains: Number of MCMC chains to run in parallel. Depending on\n whether `num_chains` is 1 or more than 1, this class internally dispatches\n to either `_SingleSampler` or `_ParallelSampler`.\n :param str mp_context: Multiprocessing context to use when `num_chains > 1`.\n Only applicable for Python 3.5 and above. Use `mp_context=\"spawn\"` for\n CUDA.\n \"\"\"\n def __init__(self, kernel, num_samples, warmup_steps=0,\n num_chains=1, mp_context=None):\n self.warmup_steps = warmup_steps if warmup_steps is not None else num_samples // 2 # Stan\n self.num_samples = num_samples\n if num_chains > 1:\n cpu_count = mp.cpu_count()\n if num_chains > cpu_count:\n warnings.warn(\"`num_chains` is more than CPU count - {}. \"\n \"Resetting num_chains to CPU count.\").format(cpu_count)\n self.sampler = _ParallelSampler(kernel, num_samples, warmup_steps,\n num_chains, mp_context)\n else:\n self.sampler = _SingleSampler(kernel, num_samples, warmup_steps)\n super(MCMC, self).__init__()\n\n def _traces(self, *args, **kwargs):\n for sample in self.sampler._traces(*args, **kwargs):\n yield sample\n", "path": "pyro/infer/mcmc/mcmc.py"}]}
3,415
436
gh_patches_debug_18567
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-945
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update and unify a number of metrics in `torchmetrics` docs ## 📚 Documentation Before the next feature release, it'd be nice to update the number of implemented metrics and unify this number over all occurrences within the docs/pages. **Additional context:** It looks like we've already had almost 80 metrics, so it'd be pity to underestimate these before another feature release O:] ```bash $ grep -w docs/source/references/functional.rst -e "func" | wc -l 78 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/__about__.py` Content: ``` 1 __version__ = "0.8.0dev" 2 __author__ = "PyTorchLightning et al." 3 __author_email__ = "[email protected]" 4 __license__ = "Apache-2.0" 5 __copyright__ = f"Copyright (c) 2020-2022, {__author__}." 6 __homepage__ = "https://github.com/PyTorchLightning/metrics" 7 __docs__ = "PyTorch native Metrics" 8 __docs_url__ = "https://torchmetrics.readthedocs.io/en/stable/" 9 __long_doc__ = """ 10 Torchmetrics is a metrics API created for easy metric development and usage in both PyTorch and 11 [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of 12 Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics 13 implemented without having to install Pytorch Lightning (even though we would love for you to try it out). 14 We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within 15 already covered domains (classification, regression ect.) but also new domains (object detection ect.). 16 We make sure that all our metrics are rigorously tested such that you can trust them. 17 """ 18 19 __all__ = [ 20 "__author__", 21 "__author_email__", 22 "__copyright__", 23 "__docs__", 24 "__homepage__", 25 "__license__", 26 "__version__", 27 ] 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/__about__.py b/torchmetrics/__about__.py --- a/torchmetrics/__about__.py +++ b/torchmetrics/__about__.py @@ -11,7 +11,7 @@ [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics implemented without having to install Pytorch Lightning (even though we would love for you to try it out). -We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within +We currently have around 80+ metrics implemented and we continuously are adding more metrics, both within already covered domains (classification, regression ect.) but also new domains (object detection ect.). We make sure that all our metrics are rigorously tested such that you can trust them. """
{"golden_diff": "diff --git a/torchmetrics/__about__.py b/torchmetrics/__about__.py\n--- a/torchmetrics/__about__.py\n+++ b/torchmetrics/__about__.py\n@@ -11,7 +11,7 @@\n [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of\n Pytorch Lightning, but got split off so users could take advantage of the large collection of metrics\n implemented without having to install Pytorch Lightning (even though we would love for you to try it out).\n-We currently have around 60+ metrics implemented and we continuously are adding more metrics, both within\n+We currently have around 80+ metrics implemented and we continuously are adding more metrics, both within\n already covered domains (classification, regression ect.) but also new domains (object detection ect.).\n We make sure that all our metrics are rigorously tested such that you can trust them.\n \"\"\"\n", "issue": "Update and unify a number of metrics in `torchmetrics` docs\n## \ud83d\udcda Documentation\r\n\r\nBefore the next feature release, it'd be nice to update the number of implemented metrics and unify this number over all occurrences within the docs/pages.\r\n\r\n**Additional context:** It looks like we've already had almost 80 metrics, so it'd be pity to underestimate these before another feature release O:]\r\n\r\n```bash\r\n$ grep -w docs/source/references/functional.rst -e \"func\" | wc -l\r\n 78\r\n```\r\n\n", "before_files": [{"content": "__version__ = \"0.8.0dev\"\n__author__ = \"PyTorchLightning et al.\"\n__author_email__ = \"[email protected]\"\n__license__ = \"Apache-2.0\"\n__copyright__ = f\"Copyright (c) 2020-2022, {__author__}.\"\n__homepage__ = \"https://github.com/PyTorchLightning/metrics\"\n__docs__ = \"PyTorch native Metrics\"\n__docs_url__ = \"https://torchmetrics.readthedocs.io/en/stable/\"\n__long_doc__ = \"\"\"\nTorchmetrics is a metrics API created for easy metric development and usage in both PyTorch and\n[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of\nPytorch Lightning, but got split off so users could take advantage of the large collection of metrics\nimplemented without having to install Pytorch Lightning (even though we would love for you to try it out).\nWe currently have around 60+ metrics implemented and we continuously are adding more metrics, both within\nalready covered domains (classification, regression ect.) but also new domains (object detection ect.).\nWe make sure that all our metrics are rigorously tested such that you can trust them.\n\"\"\"\n\n__all__ = [\n \"__author__\",\n \"__author_email__\",\n \"__copyright__\",\n \"__docs__\",\n \"__homepage__\",\n \"__license__\",\n \"__version__\",\n]\n", "path": "torchmetrics/__about__.py"}], "after_files": [{"content": "__version__ = \"0.8.0dev\"\n__author__ = \"PyTorchLightning et al.\"\n__author_email__ = \"[email protected]\"\n__license__ = \"Apache-2.0\"\n__copyright__ = f\"Copyright (c) 2020-2022, {__author__}.\"\n__homepage__ = \"https://github.com/PyTorchLightning/metrics\"\n__docs__ = \"PyTorch native Metrics\"\n__docs_url__ = \"https://torchmetrics.readthedocs.io/en/stable/\"\n__long_doc__ = \"\"\"\nTorchmetrics is a metrics API created for easy metric development and usage in both PyTorch and\n[PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/stable/). It was originally a part of\nPytorch Lightning, but got split off so users could take advantage of the large collection of metrics\nimplemented without having to install Pytorch Lightning (even though we would love for you to try it out).\nWe currently have around 80+ metrics implemented and we continuously are adding more metrics, both within\nalready covered domains (classification, regression ect.) but also new domains (object detection ect.).\nWe make sure that all our metrics are rigorously tested such that you can trust them.\n\"\"\"\n\n__all__ = [\n \"__author__\",\n \"__author_email__\",\n \"__copyright__\",\n \"__docs__\",\n \"__homepage__\",\n \"__license__\",\n \"__version__\",\n]\n", "path": "torchmetrics/__about__.py"}]}
742
203
gh_patches_debug_454
rasdani/github-patches
git_diff
Textualize__textual-2755
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- A lone `Static` results in a `TooManyMatches` error when using `query_one` I've not dived into this beyond knocking up this example to isolate what I saw (about to head out of the door but wanted to record this as a reminder). With 0.27.0 (perhaps before too, just noting the version here for the record), this code: ```python from textual.app import App, ComposeResult from textual.widgets import Static class OneStatic( App[ None ] ): def compose( self ) -> ComposeResult: yield Static() def on_mount( self ) -> None: self.query_one( Static ).update( "Hello, World!" ) if __name__ == "__main__": OneStatic().run() ``` results in a `TooManyMatches` error being raised from the `query_one`. With very early testing this only seems to be the case with `Static` (at least, I tested with `Label` and `Button` and they're fine). I think most people would rightly find this surprising. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/widgets/_tooltip.py` Content: ``` 1 from __future__ import annotations 2 3 from textual.widgets import Static 4 5 6 class Tooltip(Static): 7 DEFAULT_CSS = """ 8 Tooltip { 9 layer: _tooltips; 10 margin: 1 2; 11 padding: 1 2; 12 background: $panel; 13 width: auto; 14 height: auto; 15 constrain: inflect; 16 max-width: 40; 17 display: none; 18 } 19 """ 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/widgets/_tooltip.py b/src/textual/widgets/_tooltip.py --- a/src/textual/widgets/_tooltip.py +++ b/src/textual/widgets/_tooltip.py @@ -3,7 +3,7 @@ from textual.widgets import Static -class Tooltip(Static): +class Tooltip(Static, inherit_css=False): DEFAULT_CSS = """ Tooltip { layer: _tooltips;
{"golden_diff": "diff --git a/src/textual/widgets/_tooltip.py b/src/textual/widgets/_tooltip.py\n--- a/src/textual/widgets/_tooltip.py\n+++ b/src/textual/widgets/_tooltip.py\n@@ -3,7 +3,7 @@\n from textual.widgets import Static\n \n \n-class Tooltip(Static):\n+class Tooltip(Static, inherit_css=False):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n", "issue": "A lone `Static` results in a `TooManyMatches` error when using `query_one`\nI've not dived into this beyond knocking up this example to isolate what I saw (about to head out of the door but wanted to record this as a reminder). With 0.27.0 (perhaps before too, just noting the version here for the record), this code:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Static\r\n\r\nclass OneStatic( App[ None ] ):\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Static()\r\n\r\n def on_mount( self ) -> None:\r\n self.query_one( Static ).update( \"Hello, World!\" )\r\n\r\nif __name__ == \"__main__\":\r\n OneStatic().run()\r\n```\r\n\r\nresults in a `TooManyMatches` error being raised from the `query_one`. With very early testing this only seems to be the case with `Static` (at least, I tested with `Label` and `Button` and they're fine).\r\n\r\nI think most people would rightly find this surprising.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textual.widgets import Static\n\n\nclass Tooltip(Static):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n margin: 1 2;\n padding: 1 2;\n background: $panel;\n width: auto;\n height: auto;\n constrain: inflect;\n max-width: 40;\n display: none;\n }\n \"\"\"\n", "path": "src/textual/widgets/_tooltip.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom textual.widgets import Static\n\n\nclass Tooltip(Static, inherit_css=False):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n margin: 1 2;\n padding: 1 2;\n background: $panel;\n width: auto;\n height: auto;\n constrain: inflect;\n max-width: 40;\n display: none;\n }\n \"\"\"\n", "path": "src/textual/widgets/_tooltip.py"}]}
612
88
gh_patches_debug_3747
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-2518
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Close stdin for processes called from qutebrowser When running this as userscript: ```bash #!/bin/bash buku --debug --add "$QUTE_URL" echo "message-info 'Bookmark added to Buku!'" >> "$QUTE_FIFO" ``` [buku](https://github.com/jarun/Buku) just hangs, without producing any output (not even when adding `&> /tmp/buku.log` to the `buku` call). Running `QUTE_URL=https://www.example.com/ ./buku.sh` works fine though, and every other python application I've seen so far works... @jarun - do you have any idea what could be going wrong here? cc @Chinggis6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qutebrowser/misc/guiprocess.py` Content: ``` 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """A QProcess which shows notifications in the GUI.""" 21 22 import shlex 23 24 from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess, 25 QProcessEnvironment) 26 27 from qutebrowser.utils import message, log 28 29 # A mapping of QProcess::ErrorCode's to human-readable strings. 30 31 ERROR_STRINGS = { 32 QProcess.FailedToStart: "The process failed to start.", 33 QProcess.Crashed: "The process crashed.", 34 QProcess.Timedout: "The last waitFor...() function timed out.", 35 QProcess.WriteError: ("An error occurred when attempting to write to the " 36 "process."), 37 QProcess.ReadError: ("An error occurred when attempting to read from the " 38 "process."), 39 QProcess.UnknownError: "An unknown error occurred.", 40 } 41 42 43 class GUIProcess(QObject): 44 45 """An external process which shows notifications in the GUI. 46 47 Args: 48 cmd: The command which was started. 49 args: A list of arguments which gets passed. 50 verbose: Whether to show more messages. 51 _started: Whether the underlying process is started. 52 _proc: The underlying QProcess. 53 _what: What kind of thing is spawned (process/editor/userscript/...). 54 Used in messages. 55 56 Signals: 57 error/finished/started signals proxied from QProcess. 58 """ 59 60 error = pyqtSignal(QProcess.ProcessError) 61 finished = pyqtSignal(int, QProcess.ExitStatus) 62 started = pyqtSignal() 63 64 def __init__(self, what, *, verbose=False, additional_env=None, 65 parent=None): 66 super().__init__(parent) 67 self._what = what 68 self.verbose = verbose 69 self._started = False 70 self.cmd = None 71 self.args = None 72 73 self._proc = QProcess(self) 74 self._proc.error.connect(self.on_error) 75 self._proc.error.connect(self.error) 76 self._proc.finished.connect(self.on_finished) 77 self._proc.finished.connect(self.finished) 78 self._proc.started.connect(self.on_started) 79 self._proc.started.connect(self.started) 80 81 if additional_env is not None: 82 procenv = QProcessEnvironment.systemEnvironment() 83 for k, v in additional_env.items(): 84 procenv.insert(k, v) 85 self._proc.setProcessEnvironment(procenv) 86 87 @pyqtSlot(QProcess.ProcessError) 88 def on_error(self, error): 89 """Show a message if there was an error while spawning.""" 90 msg = ERROR_STRINGS[error] 91 message.error("Error while spawning {}: {}".format(self._what, msg)) 92 93 @pyqtSlot(int, QProcess.ExitStatus) 94 def on_finished(self, code, status): 95 """Show a message when the process finished.""" 96 self._started = False 97 log.procs.debug("Process finished with code {}, status {}.".format( 98 code, status)) 99 if status == QProcess.CrashExit: 100 message.error("{} crashed!".format(self._what.capitalize())) 101 elif status == QProcess.NormalExit and code == 0: 102 if self.verbose: 103 message.info("{} exited successfully.".format( 104 self._what.capitalize())) 105 else: 106 assert status == QProcess.NormalExit 107 # We call this 'status' here as it makes more sense to the user - 108 # it's actually 'code'. 109 message.error("{} exited with status {}.".format( 110 self._what.capitalize(), code)) 111 112 stderr = bytes(self._proc.readAllStandardError()).decode('utf-8') 113 stdout = bytes(self._proc.readAllStandardOutput()).decode('utf-8') 114 if stdout: 115 log.procs.error("Process stdout:\n" + stdout.strip()) 116 if stderr: 117 log.procs.error("Process stderr:\n" + stderr.strip()) 118 119 @pyqtSlot() 120 def on_started(self): 121 """Called when the process started successfully.""" 122 log.procs.debug("Process started.") 123 assert not self._started 124 self._started = True 125 126 def _pre_start(self, cmd, args): 127 """Prepare starting of a QProcess.""" 128 if self._started: 129 raise ValueError("Trying to start a running QProcess!") 130 self.cmd = cmd 131 self.args = args 132 fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args)) 133 log.procs.debug("Executing: {}".format(fake_cmdline)) 134 if self.verbose: 135 message.info('Executing: ' + fake_cmdline) 136 137 def start(self, cmd, args, mode=None): 138 """Convenience wrapper around QProcess::start.""" 139 log.procs.debug("Starting process.") 140 self._pre_start(cmd, args) 141 if mode is None: 142 self._proc.start(cmd, args) 143 else: 144 self._proc.start(cmd, args, mode) 145 146 def start_detached(self, cmd, args, cwd=None): 147 """Convenience wrapper around QProcess::startDetached.""" 148 log.procs.debug("Starting detached.") 149 self._pre_start(cmd, args) 150 ok, _pid = self._proc.startDetached(cmd, args, cwd) 151 152 if ok: 153 log.procs.debug("Process started.") 154 self._started = True 155 else: 156 message.error("Error while spawning {}: {}.".format( 157 self._what, self._proc.error())) 158 159 def exit_status(self): 160 return self._proc.exitStatus() 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qutebrowser/misc/guiprocess.py b/qutebrowser/misc/guiprocess.py --- a/qutebrowser/misc/guiprocess.py +++ b/qutebrowser/misc/guiprocess.py @@ -142,6 +142,7 @@ self._proc.start(cmd, args) else: self._proc.start(cmd, args, mode) + self._proc.closeWriteChannel() def start_detached(self, cmd, args, cwd=None): """Convenience wrapper around QProcess::startDetached."""
{"golden_diff": "diff --git a/qutebrowser/misc/guiprocess.py b/qutebrowser/misc/guiprocess.py\n--- a/qutebrowser/misc/guiprocess.py\n+++ b/qutebrowser/misc/guiprocess.py\n@@ -142,6 +142,7 @@\n self._proc.start(cmd, args)\n else:\n self._proc.start(cmd, args, mode)\n+ self._proc.closeWriteChannel()\n \n def start_detached(self, cmd, args, cwd=None):\n \"\"\"Convenience wrapper around QProcess::startDetached.\"\"\"\n", "issue": "Close stdin for processes called from qutebrowser\nWhen running this as userscript:\r\n\r\n```bash\r\n#!/bin/bash\r\nbuku --debug --add \"$QUTE_URL\"\r\necho \"message-info 'Bookmark added to Buku!'\" >> \"$QUTE_FIFO\"\r\n```\r\n\r\n[buku](https://github.com/jarun/Buku) just hangs, without producing any output (not even when adding `&> /tmp/buku.log` to the `buku` call).\r\n\r\nRunning `QUTE_URL=https://www.example.com/ ./buku.sh` works fine though, and every other python application I've seen so far works...\r\n\r\n@jarun - do you have any idea what could be going wrong here?\r\n\r\ncc @Chinggis6\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A QProcess which shows notifications in the GUI.\"\"\"\n\nimport shlex\n\nfrom PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess,\n QProcessEnvironment)\n\nfrom qutebrowser.utils import message, log\n\n# A mapping of QProcess::ErrorCode's to human-readable strings.\n\nERROR_STRINGS = {\n QProcess.FailedToStart: \"The process failed to start.\",\n QProcess.Crashed: \"The process crashed.\",\n QProcess.Timedout: \"The last waitFor...() function timed out.\",\n QProcess.WriteError: (\"An error occurred when attempting to write to the \"\n \"process.\"),\n QProcess.ReadError: (\"An error occurred when attempting to read from the \"\n \"process.\"),\n QProcess.UnknownError: \"An unknown error occurred.\",\n}\n\n\nclass GUIProcess(QObject):\n\n \"\"\"An external process which shows notifications in the GUI.\n\n Args:\n cmd: The command which was started.\n args: A list of arguments which gets passed.\n verbose: Whether to show more messages.\n _started: Whether the underlying process is started.\n _proc: The underlying QProcess.\n _what: What kind of thing is spawned (process/editor/userscript/...).\n Used in messages.\n\n Signals:\n error/finished/started signals proxied from QProcess.\n \"\"\"\n\n error = pyqtSignal(QProcess.ProcessError)\n finished = pyqtSignal(int, QProcess.ExitStatus)\n started = pyqtSignal()\n\n def __init__(self, what, *, verbose=False, additional_env=None,\n parent=None):\n super().__init__(parent)\n self._what = what\n self.verbose = verbose\n self._started = False\n self.cmd = None\n self.args = None\n\n self._proc = QProcess(self)\n self._proc.error.connect(self.on_error)\n self._proc.error.connect(self.error)\n self._proc.finished.connect(self.on_finished)\n self._proc.finished.connect(self.finished)\n self._proc.started.connect(self.on_started)\n self._proc.started.connect(self.started)\n\n if additional_env is not None:\n procenv = QProcessEnvironment.systemEnvironment()\n for k, v in additional_env.items():\n procenv.insert(k, v)\n self._proc.setProcessEnvironment(procenv)\n\n @pyqtSlot(QProcess.ProcessError)\n def on_error(self, error):\n \"\"\"Show a message if there was an error while spawning.\"\"\"\n msg = ERROR_STRINGS[error]\n message.error(\"Error while spawning {}: {}\".format(self._what, msg))\n\n @pyqtSlot(int, QProcess.ExitStatus)\n def on_finished(self, code, status):\n \"\"\"Show a message when the process finished.\"\"\"\n self._started = False\n log.procs.debug(\"Process finished with code {}, status {}.\".format(\n code, status))\n if status == QProcess.CrashExit:\n message.error(\"{} crashed!\".format(self._what.capitalize()))\n elif status == QProcess.NormalExit and code == 0:\n if self.verbose:\n message.info(\"{} exited successfully.\".format(\n self._what.capitalize()))\n else:\n assert status == QProcess.NormalExit\n # We call this 'status' here as it makes more sense to the user -\n # it's actually 'code'.\n message.error(\"{} exited with status {}.\".format(\n self._what.capitalize(), code))\n\n stderr = bytes(self._proc.readAllStandardError()).decode('utf-8')\n stdout = bytes(self._proc.readAllStandardOutput()).decode('utf-8')\n if stdout:\n log.procs.error(\"Process stdout:\\n\" + stdout.strip())\n if stderr:\n log.procs.error(\"Process stderr:\\n\" + stderr.strip())\n\n @pyqtSlot()\n def on_started(self):\n \"\"\"Called when the process started successfully.\"\"\"\n log.procs.debug(\"Process started.\")\n assert not self._started\n self._started = True\n\n def _pre_start(self, cmd, args):\n \"\"\"Prepare starting of a QProcess.\"\"\"\n if self._started:\n raise ValueError(\"Trying to start a running QProcess!\")\n self.cmd = cmd\n self.args = args\n fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args))\n log.procs.debug(\"Executing: {}\".format(fake_cmdline))\n if self.verbose:\n message.info('Executing: ' + fake_cmdline)\n\n def start(self, cmd, args, mode=None):\n \"\"\"Convenience wrapper around QProcess::start.\"\"\"\n log.procs.debug(\"Starting process.\")\n self._pre_start(cmd, args)\n if mode is None:\n self._proc.start(cmd, args)\n else:\n self._proc.start(cmd, args, mode)\n\n def start_detached(self, cmd, args, cwd=None):\n \"\"\"Convenience wrapper around QProcess::startDetached.\"\"\"\n log.procs.debug(\"Starting detached.\")\n self._pre_start(cmd, args)\n ok, _pid = self._proc.startDetached(cmd, args, cwd)\n\n if ok:\n log.procs.debug(\"Process started.\")\n self._started = True\n else:\n message.error(\"Error while spawning {}: {}.\".format(\n self._what, self._proc.error()))\n\n def exit_status(self):\n return self._proc.exitStatus()\n", "path": "qutebrowser/misc/guiprocess.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A QProcess which shows notifications in the GUI.\"\"\"\n\nimport shlex\n\nfrom PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess,\n QProcessEnvironment)\n\nfrom qutebrowser.utils import message, log\n\n# A mapping of QProcess::ErrorCode's to human-readable strings.\n\nERROR_STRINGS = {\n QProcess.FailedToStart: \"The process failed to start.\",\n QProcess.Crashed: \"The process crashed.\",\n QProcess.Timedout: \"The last waitFor...() function timed out.\",\n QProcess.WriteError: (\"An error occurred when attempting to write to the \"\n \"process.\"),\n QProcess.ReadError: (\"An error occurred when attempting to read from the \"\n \"process.\"),\n QProcess.UnknownError: \"An unknown error occurred.\",\n}\n\n\nclass GUIProcess(QObject):\n\n \"\"\"An external process which shows notifications in the GUI.\n\n Args:\n cmd: The command which was started.\n args: A list of arguments which gets passed.\n verbose: Whether to show more messages.\n _started: Whether the underlying process is started.\n _proc: The underlying QProcess.\n _what: What kind of thing is spawned (process/editor/userscript/...).\n Used in messages.\n\n Signals:\n error/finished/started signals proxied from QProcess.\n \"\"\"\n\n error = pyqtSignal(QProcess.ProcessError)\n finished = pyqtSignal(int, QProcess.ExitStatus)\n started = pyqtSignal()\n\n def __init__(self, what, *, verbose=False, additional_env=None,\n parent=None):\n super().__init__(parent)\n self._what = what\n self.verbose = verbose\n self._started = False\n self.cmd = None\n self.args = None\n\n self._proc = QProcess(self)\n self._proc.error.connect(self.on_error)\n self._proc.error.connect(self.error)\n self._proc.finished.connect(self.on_finished)\n self._proc.finished.connect(self.finished)\n self._proc.started.connect(self.on_started)\n self._proc.started.connect(self.started)\n\n if additional_env is not None:\n procenv = QProcessEnvironment.systemEnvironment()\n for k, v in additional_env.items():\n procenv.insert(k, v)\n self._proc.setProcessEnvironment(procenv)\n\n @pyqtSlot(QProcess.ProcessError)\n def on_error(self, error):\n \"\"\"Show a message if there was an error while spawning.\"\"\"\n msg = ERROR_STRINGS[error]\n message.error(\"Error while spawning {}: {}\".format(self._what, msg))\n\n @pyqtSlot(int, QProcess.ExitStatus)\n def on_finished(self, code, status):\n \"\"\"Show a message when the process finished.\"\"\"\n self._started = False\n log.procs.debug(\"Process finished with code {}, status {}.\".format(\n code, status))\n if status == QProcess.CrashExit:\n message.error(\"{} crashed!\".format(self._what.capitalize()))\n elif status == QProcess.NormalExit and code == 0:\n if self.verbose:\n message.info(\"{} exited successfully.\".format(\n self._what.capitalize()))\n else:\n assert status == QProcess.NormalExit\n # We call this 'status' here as it makes more sense to the user -\n # it's actually 'code'.\n message.error(\"{} exited with status {}.\".format(\n self._what.capitalize(), code))\n\n stderr = bytes(self._proc.readAllStandardError()).decode('utf-8')\n stdout = bytes(self._proc.readAllStandardOutput()).decode('utf-8')\n if stdout:\n log.procs.error(\"Process stdout:\\n\" + stdout.strip())\n if stderr:\n log.procs.error(\"Process stderr:\\n\" + stderr.strip())\n\n @pyqtSlot()\n def on_started(self):\n \"\"\"Called when the process started successfully.\"\"\"\n log.procs.debug(\"Process started.\")\n assert not self._started\n self._started = True\n\n def _pre_start(self, cmd, args):\n \"\"\"Prepare starting of a QProcess.\"\"\"\n if self._started:\n raise ValueError(\"Trying to start a running QProcess!\")\n self.cmd = cmd\n self.args = args\n fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args))\n log.procs.debug(\"Executing: {}\".format(fake_cmdline))\n if self.verbose:\n message.info('Executing: ' + fake_cmdline)\n\n def start(self, cmd, args, mode=None):\n \"\"\"Convenience wrapper around QProcess::start.\"\"\"\n log.procs.debug(\"Starting process.\")\n self._pre_start(cmd, args)\n if mode is None:\n self._proc.start(cmd, args)\n else:\n self._proc.start(cmd, args, mode)\n self._proc.closeWriteChannel()\n\n def start_detached(self, cmd, args, cwd=None):\n \"\"\"Convenience wrapper around QProcess::startDetached.\"\"\"\n log.procs.debug(\"Starting detached.\")\n self._pre_start(cmd, args)\n ok, _pid = self._proc.startDetached(cmd, args, cwd)\n\n if ok:\n log.procs.debug(\"Process started.\")\n self._started = True\n else:\n message.error(\"Error while spawning {}: {}.\".format(\n self._what, self._proc.error()))\n\n def exit_status(self):\n return self._proc.exitStatus()\n", "path": "qutebrowser/misc/guiprocess.py"}]}
2,144
122
gh_patches_debug_22794
rasdani/github-patches
git_diff
ultrabug__py3status-2007
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clock terminated with Exception With the latest Manjaro Testing Update, I received version 3.32 with Python 3.9.1. All modules still work except the clock module which is terminated. The journal simply says ```Exception in `i3pystatus clock` post_config_hook().``` The config didn't change and works with 3.31: ``` clock { format = "{Local}" format_time = "{icon} %a, %d.%m.%Y %H:%M" } ``` Downgrading to 3.31 works. What else information do you need? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `py3status/storage.py` Content: ``` 1 import os 2 import time 3 4 from pathlib import Path 5 from pickle import dump, load 6 from tempfile import NamedTemporaryFile 7 8 9 class Storage: 10 11 data = {} 12 initialized = False 13 14 def init(self, py3_wrapper): 15 self.py3_wrapper = py3_wrapper 16 self.config = py3_wrapper.config 17 py3_config = self.config.get("py3_config", {}) 18 19 # check for legacy storage cache 20 legacy_storage_path = self.get_legacy_storage_path() 21 22 # cutting edge storage cache 23 storage_config = py3_config.get("py3status", {}).get("storage") 24 if storage_config: 25 storage_file = os.path.expandvars(storage_config.expanduser()) 26 if "/" in storage_file: 27 storage_dir = None 28 else: 29 storage_dir = os.environ.get("XDG_CACHE_HOME") 30 else: 31 storage_dir = os.environ.get("XDG_CACHE_HOME") 32 storage_file = Path("py3status_cache.data") 33 34 if not storage_dir: 35 storage_dir = Path("~/.cache").expanduser() 36 self.storage_path = storage_dir / storage_file 37 38 # move legacy storage cache to new desired / default location 39 if legacy_storage_path: 40 self.py3_wrapper.log( 41 "moving legacy storage_path {} to {}".format( 42 legacy_storage_path, self.storage_path 43 ) 44 ) 45 legacy_storage_path.rename(self.storage_path) 46 47 try: 48 with self.storage_path.open("rb") as f: 49 self.data = load(f, encoding="bytes") 50 except OSError: 51 pass 52 53 self.py3_wrapper.log(f"storage_path: {self.storage_path}") 54 if self.data: 55 self.py3_wrapper.log(f"storage_data: {self.data}") 56 self.initialized = True 57 58 def get_legacy_storage_path(self): 59 """ 60 Detect and return existing legacy storage path. 61 """ 62 config_dir = Path( 63 self.py3_wrapper.config.get("i3status_config_path", "/tmp") 64 ).parent 65 storage_path = config_dir / "py3status.data" 66 if storage_path.exists(): 67 return storage_path 68 else: 69 return None 70 71 def save(self): 72 """ 73 Save our data to disk. We want to always have a valid file. 74 """ 75 with NamedTemporaryFile(dir=self.storage_path.parent, delete=False) as f: 76 # we use protocol=2 for python 2/3 compatibility 77 dump(self.data, f, protocol=2) 78 f.flush() 79 os.fsync(f.fileno()) 80 tmppath = Path(f.name) 81 tmppath.rename(self.storage_path) 82 83 def storage_set(self, module_name, key, value): 84 if key.startswith("_"): 85 raise ValueError('cannot set keys starting with an underscore "_"') 86 87 if self.data.get(module_name, {}).get(key) == value: 88 return 89 90 if module_name not in self.data: 91 self.data[module_name] = {} 92 self.data[module_name][key] = value 93 ts = time.time() 94 if "_ctime" not in self.data[module_name]: 95 self.data[module_name]["_ctime"] = ts 96 self.data[module_name]["_mtime"] = ts 97 self.save() 98 99 def storage_get(self, module_name, key): 100 return self.data.get(module_name, {}).get(key, None) 101 102 def storage_del(self, module_name, key=None): 103 if module_name in self.data and key in self.data[module_name]: 104 del self.data[module_name][key] 105 self.save() 106 107 def storage_keys(self, module_name): 108 return list(self.data.get(module_name, {})) 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/py3status/storage.py b/py3status/storage.py --- a/py3status/storage.py +++ b/py3status/storage.py @@ -22,18 +22,18 @@ # cutting edge storage cache storage_config = py3_config.get("py3status", {}).get("storage") if storage_config: - storage_file = os.path.expandvars(storage_config.expanduser()) + storage_file = os.path.expandvars(os.path.expanduser(storage_config)) if "/" in storage_file: storage_dir = None else: storage_dir = os.environ.get("XDG_CACHE_HOME") else: storage_dir = os.environ.get("XDG_CACHE_HOME") - storage_file = Path("py3status_cache.data") + storage_file = "py3status_cache.data" if not storage_dir: storage_dir = Path("~/.cache").expanduser() - self.storage_path = storage_dir / storage_file + self.storage_path = Path(storage_dir, storage_file) # move legacy storage cache to new desired / default location if legacy_storage_path:
{"golden_diff": "diff --git a/py3status/storage.py b/py3status/storage.py\n--- a/py3status/storage.py\n+++ b/py3status/storage.py\n@@ -22,18 +22,18 @@\n # cutting edge storage cache\n storage_config = py3_config.get(\"py3status\", {}).get(\"storage\")\n if storage_config:\n- storage_file = os.path.expandvars(storage_config.expanduser())\n+ storage_file = os.path.expandvars(os.path.expanduser(storage_config))\n if \"/\" in storage_file:\n storage_dir = None\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n- storage_file = Path(\"py3status_cache.data\")\n+ storage_file = \"py3status_cache.data\"\n \n if not storage_dir:\n storage_dir = Path(\"~/.cache\").expanduser()\n- self.storage_path = storage_dir / storage_file\n+ self.storage_path = Path(storage_dir, storage_file)\n \n # move legacy storage cache to new desired / default location\n if legacy_storage_path:\n", "issue": "Clock terminated with Exception\nWith the latest Manjaro Testing Update, I received version 3.32 with Python 3.9.1.\r\n\r\nAll modules still work except the clock module which is terminated. The journal simply says\r\n```Exception in `i3pystatus clock` post_config_hook().```\r\n\r\nThe config didn't change and works with 3.31:\r\n```\r\nclock {\r\n format = \"{Local}\"\r\n format_time = \"{icon} %a, %d.%m.%Y %H:%M\"\r\n}\r\n```\r\n\r\nDowngrading to 3.31 works. What else information do you need?\n", "before_files": [{"content": "import os\nimport time\n\nfrom pathlib import Path\nfrom pickle import dump, load\nfrom tempfile import NamedTemporaryFile\n\n\nclass Storage:\n\n data = {}\n initialized = False\n\n def init(self, py3_wrapper):\n self.py3_wrapper = py3_wrapper\n self.config = py3_wrapper.config\n py3_config = self.config.get(\"py3_config\", {})\n\n # check for legacy storage cache\n legacy_storage_path = self.get_legacy_storage_path()\n\n # cutting edge storage cache\n storage_config = py3_config.get(\"py3status\", {}).get(\"storage\")\n if storage_config:\n storage_file = os.path.expandvars(storage_config.expanduser())\n if \"/\" in storage_file:\n storage_dir = None\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n storage_file = Path(\"py3status_cache.data\")\n\n if not storage_dir:\n storage_dir = Path(\"~/.cache\").expanduser()\n self.storage_path = storage_dir / storage_file\n\n # move legacy storage cache to new desired / default location\n if legacy_storage_path:\n self.py3_wrapper.log(\n \"moving legacy storage_path {} to {}\".format(\n legacy_storage_path, self.storage_path\n )\n )\n legacy_storage_path.rename(self.storage_path)\n\n try:\n with self.storage_path.open(\"rb\") as f:\n self.data = load(f, encoding=\"bytes\")\n except OSError:\n pass\n\n self.py3_wrapper.log(f\"storage_path: {self.storage_path}\")\n if self.data:\n self.py3_wrapper.log(f\"storage_data: {self.data}\")\n self.initialized = True\n\n def get_legacy_storage_path(self):\n \"\"\"\n Detect and return existing legacy storage path.\n \"\"\"\n config_dir = Path(\n self.py3_wrapper.config.get(\"i3status_config_path\", \"/tmp\")\n ).parent\n storage_path = config_dir / \"py3status.data\"\n if storage_path.exists():\n return storage_path\n else:\n return None\n\n def save(self):\n \"\"\"\n Save our data to disk. We want to always have a valid file.\n \"\"\"\n with NamedTemporaryFile(dir=self.storage_path.parent, delete=False) as f:\n # we use protocol=2 for python 2/3 compatibility\n dump(self.data, f, protocol=2)\n f.flush()\n os.fsync(f.fileno())\n tmppath = Path(f.name)\n tmppath.rename(self.storage_path)\n\n def storage_set(self, module_name, key, value):\n if key.startswith(\"_\"):\n raise ValueError('cannot set keys starting with an underscore \"_\"')\n\n if self.data.get(module_name, {}).get(key) == value:\n return\n\n if module_name not in self.data:\n self.data[module_name] = {}\n self.data[module_name][key] = value\n ts = time.time()\n if \"_ctime\" not in self.data[module_name]:\n self.data[module_name][\"_ctime\"] = ts\n self.data[module_name][\"_mtime\"] = ts\n self.save()\n\n def storage_get(self, module_name, key):\n return self.data.get(module_name, {}).get(key, None)\n\n def storage_del(self, module_name, key=None):\n if module_name in self.data and key in self.data[module_name]:\n del self.data[module_name][key]\n self.save()\n\n def storage_keys(self, module_name):\n return list(self.data.get(module_name, {}))\n", "path": "py3status/storage.py"}], "after_files": [{"content": "import os\nimport time\n\nfrom pathlib import Path\nfrom pickle import dump, load\nfrom tempfile import NamedTemporaryFile\n\n\nclass Storage:\n\n data = {}\n initialized = False\n\n def init(self, py3_wrapper):\n self.py3_wrapper = py3_wrapper\n self.config = py3_wrapper.config\n py3_config = self.config.get(\"py3_config\", {})\n\n # check for legacy storage cache\n legacy_storage_path = self.get_legacy_storage_path()\n\n # cutting edge storage cache\n storage_config = py3_config.get(\"py3status\", {}).get(\"storage\")\n if storage_config:\n storage_file = os.path.expandvars(os.path.expanduser(storage_config))\n if \"/\" in storage_file:\n storage_dir = None\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n else:\n storage_dir = os.environ.get(\"XDG_CACHE_HOME\")\n storage_file = \"py3status_cache.data\"\n\n if not storage_dir:\n storage_dir = Path(\"~/.cache\").expanduser()\n self.storage_path = Path(storage_dir, storage_file)\n\n # move legacy storage cache to new desired / default location\n if legacy_storage_path:\n self.py3_wrapper.log(\n \"moving legacy storage_path {} to {}\".format(\n legacy_storage_path, self.storage_path\n )\n )\n legacy_storage_path.rename(self.storage_path)\n\n try:\n with self.storage_path.open(\"rb\") as f:\n self.data = load(f, encoding=\"bytes\")\n except OSError:\n pass\n\n self.py3_wrapper.log(f\"storage_path: {self.storage_path}\")\n if self.data:\n self.py3_wrapper.log(f\"storage_data: {self.data}\")\n self.initialized = True\n\n def get_legacy_storage_path(self):\n \"\"\"\n Detect and return existing legacy storage path.\n \"\"\"\n config_dir = Path(\n self.py3_wrapper.config.get(\"i3status_config_path\", \"/tmp\")\n ).parent\n storage_path = config_dir / \"py3status.data\"\n if storage_path.exists():\n return storage_path\n else:\n return None\n\n def save(self):\n \"\"\"\n Save our data to disk. We want to always have a valid file.\n \"\"\"\n with NamedTemporaryFile(dir=self.storage_path.parent, delete=False) as f:\n # we use protocol=2 for python 2/3 compatibility\n dump(self.data, f, protocol=2)\n f.flush()\n os.fsync(f.fileno())\n tmppath = Path(f.name)\n tmppath.rename(self.storage_path)\n\n def storage_set(self, module_name, key, value):\n if key.startswith(\"_\"):\n raise ValueError('cannot set keys starting with an underscore \"_\"')\n\n if self.data.get(module_name, {}).get(key) == value:\n return\n\n if module_name not in self.data:\n self.data[module_name] = {}\n self.data[module_name][key] = value\n ts = time.time()\n if \"_ctime\" not in self.data[module_name]:\n self.data[module_name][\"_ctime\"] = ts\n self.data[module_name][\"_mtime\"] = ts\n self.save()\n\n def storage_get(self, module_name, key):\n return self.data.get(module_name, {}).get(key, None)\n\n def storage_del(self, module_name, key=None):\n if module_name in self.data and key in self.data[module_name]:\n del self.data[module_name][key]\n self.save()\n\n def storage_keys(self, module_name):\n return list(self.data.get(module_name, {}))\n", "path": "py3status/storage.py"}]}
1,383
240
gh_patches_debug_1106
rasdani/github-patches
git_diff
saulpw__visidata-1310
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [v2.9dev] Disable adding new row in DirSheet **Small description** Unless used, `add-row` should probably be disabled on DirSheet as it creates an error **Expected result** A warning to be shown to the user that a new row/file cannot be created. **Actual result with screenshot** ![image](https://user-images.githubusercontent.com/11225502/155373843-aeef6d8e-cc96-4559-b3e6-dc9690aab25f.png) **Steps to reproduce with sample data and a .vd** Open vd directory with vd (`vd .`) and press `a` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/shell.py` Content: ``` 1 import os 2 import shutil 3 import stat 4 import subprocess 5 import contextlib 6 try: 7 import pwd 8 import grp 9 except ImportError: 10 pass # pwd,grp modules not available on Windows 11 12 from visidata import Column, Sheet, LazyComputeRow, asynccache, BaseSheet, vd 13 from visidata import Path, ENTER, date, asyncthread, FileExistsError, VisiData 14 from visidata import modtime, filesize, vstat, Progress, TextSheet 15 16 17 vd.option('dir_recurse', False, 'walk source path recursively on DirSheet') 18 vd.option('dir_hidden', False, 'load hidden files on DirSheet') 19 20 21 @VisiData.lazy_property 22 def currentDirSheet(p): 23 'Support opening the current DirSheet from the vdmenu' 24 return DirSheet('.', source=Path('.')) 25 26 @asyncthread 27 def exec_shell(*args): 28 p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 29 out, err = p.communicate() 30 if err or out: 31 lines = err.decode('utf8').splitlines() + out.decode('utf8').splitlines() 32 vd.push(TextSheet(' '.join(args), source=lines)) 33 34 35 @VisiData.api 36 def open_dir(vd, p): 37 return DirSheet(p.name, source=p) 38 39 @VisiData.api 40 def open_fdir(vd, p): 41 return FileListSheet(p.name, source=p) 42 43 @VisiData.api 44 def addShellColumns(vd, cmd, sheet): 45 shellcol = ColumnShell(cmd, source=sheet, width=0) 46 sheet.addColumnAtCursor( 47 shellcol, 48 Column(cmd+'_stdout', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[0]), 49 Column(cmd+'_stderr', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[1])) 50 51 52 class ColumnShell(Column): 53 def __init__(self, name, cmd=None, **kwargs): 54 super().__init__(name, **kwargs) 55 self.expr = cmd or name 56 57 @asynccache(lambda col,row: (col, col.sheet.rowid(row))) 58 def calcValue(self, row): 59 try: 60 import shlex 61 args = [] 62 context = LazyComputeRow(self.source, row) 63 for arg in shlex.split(self.expr): 64 if arg.startswith('$'): 65 args.append(shlex.quote(str(context[arg[1:]]))) 66 else: 67 args.append(arg) 68 69 p = subprocess.Popen([os.getenv('SHELL', 'bash'), '-c', ' '.join(args)], 70 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 71 return p.communicate() 72 except Exception as e: 73 vd.exceptionCaught(e) 74 75 76 class DirSheet(Sheet): 77 'Sheet displaying directory, using ENTER to open a particular file. Edited fields are applied to the filesystem.' 78 rowtype = 'files' # rowdef: Path 79 defer = True 80 columns = [ 81 Column('directory', 82 getter=lambda col,row: str(row.parent) if str(row.parent) == '.' else str(row.parent) + '/', 83 setter=lambda col,row,val: col.sheet.moveFile(row, val)), 84 Column('filename', 85 getter=lambda col,row: row.name + row.suffix, 86 setter=lambda col,row,val: col.sheet.renameFile(row, val)), 87 Column('abspath', width=0, type=str, 88 getter=lambda col,row: row, 89 setter=lambda col,row,val: os.rename(row, val)), 90 Column('ext', getter=lambda col,row: row.is_dir() and '/' or row.ext), 91 Column('size', type=int, 92 getter=lambda col,row: filesize(row), 93 setter=lambda col,row,val: os.truncate(row, int(val))), 94 Column('modtime', type=date, 95 getter=lambda col,row: modtime(row), 96 setter=lambda col,row,val: os.utime(row, times=((row.stat().st_atime, float(val))))), 97 Column('owner', width=0, 98 getter=lambda col,row: pwd.getpwuid(row.stat().st_uid).pw_name, 99 setter=lambda col,row,val: os.chown(row, pwd.getpwnam(val).pw_uid, -1)), 100 Column('group', width=0, 101 getter=lambda col,row: grp.getgrgid(row.stat().st_gid).gr_name, 102 setter=lambda col,row,val: os.chown(row, -1, grp.getgrnam(val).pw_gid)), 103 Column('mode', width=0, 104 getter=lambda col,row: '{:o}'.format(row.stat().st_mode), 105 setter=lambda col,row,val: os.chmod(row, int(val, 8))), 106 Column('filetype', width=0, cache='async', getter=lambda col,row: subprocess.Popen(['file', '--brief', row], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].strip()), 107 ] 108 nKeys = 2 109 _ordering = [('modtime', True)] # sort by reverse modtime initially 110 111 @staticmethod 112 def colorOwner(sheet, col, row, val): 113 ret = '' 114 if col.name == 'group': 115 mode = row.stat().st_mode 116 if mode & stat.S_IXGRP: ret = 'bold ' 117 if mode & stat.S_IWGRP: return ret + 'green' 118 if mode & stat.S_IRGRP: return ret + 'yellow' 119 elif col.name == 'owner': 120 mode = row.stat().st_mode 121 if mode & stat.S_IXUSR: ret = 'bold ' 122 if mode & stat.S_IWUSR: return ret + 'green' 123 if mode & stat.S_IRUSR: return ret + 'yellow' 124 125 def moveFile(self, row, newparent): 126 parent = Path(newparent) 127 newpath = Path(parent/(row.name + row.suffix)) 128 if parent.exists(): 129 if not parent.is_dir(): 130 vd.error('destination %s not a directory' % parent) 131 else: 132 with contextlib.suppress(FileExistsError): 133 os.makedirs(parent) 134 135 row.rename(newpath) 136 row.given = newpath # modify visidata.Path 137 self.restat() 138 139 def renameFile(self, row, val): 140 newpath = row.with_name(val) 141 row.rename(newpath) 142 row.given = newpath 143 self.restat() 144 145 def removeFile(self, path): 146 if path.is_dir(): 147 os.rmdir(path) 148 else: 149 path.unlink() 150 151 def deleteSourceRow(self, r): 152 self.removeFile(r) 153 154 def iterload(self): 155 hidden_files = self.options.dir_hidden 156 157 def _walkfiles(p): 158 basepath = str(p) 159 for folder, subdirs, files in os.walk(basepath): 160 subfolder = folder[len(basepath)+1:] 161 if not hidden_files and subfolder.startswith('.'): continue 162 if subfolder in ['.', '..']: continue 163 164 fpath = Path(folder) 165 yield fpath 166 167 for fn in files: 168 yield fpath/fn 169 170 def _listfiles(p): 171 basepath = str(p) 172 for fn in os.listdir(basepath): 173 yield p/fn 174 175 176 basepath = str(self.source) 177 178 folders = set() 179 f = _walkfiles if self.options.dir_recurse else _listfiles 180 181 for p in f(self.source): 182 if not hidden_files and p.name.startswith('.'): 183 continue 184 185 yield p 186 187 def preloadHook(self): 188 super().preloadHook() 189 Path.stat.cache_clear() 190 191 def restat(self): 192 vstat.cache_clear() 193 194 @asyncthread 195 def putChanges(self): 196 self.commitAdds() 197 self.commitMods() 198 self.commitDeletes() 199 200 self._deferredDels.clear() 201 self.reload() 202 203 204 class FileListSheet(DirSheet): 205 _ordering = [] 206 def iterload(self): 207 for fn in self.source.open_text(): 208 yield Path(fn.rstrip()) 209 210 211 @VisiData.api 212 def inputShell(vd): 213 cmd = vd.input("sh$ ", type="sh") 214 if '$' not in cmd: 215 vd.warning('no $column in command') 216 return cmd 217 218 BaseSheet.addCommand('', 'open-dir-current', 'vd.push(vd.currentDirSheet)', 'open Directory Sheet: browse properties of files in current directory') 219 220 Sheet.addCommand('z;', 'addcol-sh', 'cmd=inputShell(); addShellColumns(cmd, sheet)', 'create new column from bash expression, with $columnNames as variables') 221 222 DirSheet.addCommand(ENTER, 'open-row', 'vd.push(openSource(cursorRow or fail("no row"), filetype="dir" if cursorRow.is_dir() else LazyComputeRow(sheet, cursorRow).ext))', 'open current file as a new sheet') 223 DirSheet.addCommand('g'+ENTER, 'open-rows', 'for r in selectedRows: vd.push(openSource(r))', 'open selected files as new sheets') 224 DirSheet.addCommand('^O', 'sysopen-row', 'launchEditor(cursorRow)', 'open current file in external $EDITOR') 225 DirSheet.addCommand('g^O', 'sysopen-rows', 'launchEditor(*selectedRows)', 'open selected files in external $EDITOR') 226 227 DirSheet.addCommand('y', 'copy-row', 'copy_files([cursorRow], inputPath("copy to dest: "))', 'copy file to given directory') 228 DirSheet.addCommand('gy', 'copy-selected', 'copy_files(selectedRows, inputPath("copy to dest: ", value=cursorRow.given))', 'copy selected files to given directory') 229 230 @DirSheet.api 231 @asyncthread 232 def copy_files(sheet, paths, dest): 233 destdir = Path(dest) 234 destdir.is_dir() or vd.fail('target must be directory') 235 vd.status('copying %s %s to %s' % (len(paths), sheet.rowtype, destdir)) 236 os.makedirs(destdir, exist_ok=True) 237 for srcpath in Progress(paths, gerund='copying'): 238 try: 239 destpath = destdir/str(srcpath._path.name) 240 if srcpath.is_dir(): 241 shutil.copy_tree(srcpath, destpath) 242 else: 243 shutil.copyfile(srcpath, destpath) 244 except Exception as e: 245 vd.exceptionCaught(e) 246 247 248 vd.addGlobals({ 249 'DirSheet': DirSheet 250 }) 251 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/shell.py b/visidata/shell.py --- a/visidata/shell.py +++ b/visidata/shell.py @@ -151,6 +151,9 @@ def deleteSourceRow(self, r): self.removeFile(r) + def newRow(self): + vd.fail('new file not supported') + def iterload(self): hidden_files = self.options.dir_hidden
{"golden_diff": "diff --git a/visidata/shell.py b/visidata/shell.py\n--- a/visidata/shell.py\n+++ b/visidata/shell.py\n@@ -151,6 +151,9 @@\n def deleteSourceRow(self, r):\n self.removeFile(r)\n \n+ def newRow(self):\n+ vd.fail('new file not supported')\n+\n def iterload(self):\n hidden_files = self.options.dir_hidden\n", "issue": "[v2.9dev] Disable adding new row in DirSheet\n**Small description**\r\nUnless used, `add-row` should probably be disabled on DirSheet as it creates an error\r\n\r\n**Expected result**\r\nA warning to be shown to the user that a new row/file cannot be created.\r\n\r\n**Actual result with screenshot**\r\n![image](https://user-images.githubusercontent.com/11225502/155373843-aeef6d8e-cc96-4559-b3e6-dc9690aab25f.png)\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\nOpen vd directory with vd (`vd .`) and press `a`\n", "before_files": [{"content": "import os\nimport shutil\nimport stat\nimport subprocess\nimport contextlib\ntry:\n import pwd\n import grp\nexcept ImportError:\n pass # pwd,grp modules not available on Windows\n\nfrom visidata import Column, Sheet, LazyComputeRow, asynccache, BaseSheet, vd\nfrom visidata import Path, ENTER, date, asyncthread, FileExistsError, VisiData\nfrom visidata import modtime, filesize, vstat, Progress, TextSheet\n\n\nvd.option('dir_recurse', False, 'walk source path recursively on DirSheet')\nvd.option('dir_hidden', False, 'load hidden files on DirSheet')\n\n\[email protected]_property\ndef currentDirSheet(p):\n 'Support opening the current DirSheet from the vdmenu'\n return DirSheet('.', source=Path('.'))\n\n@asyncthread\ndef exec_shell(*args):\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err or out:\n lines = err.decode('utf8').splitlines() + out.decode('utf8').splitlines()\n vd.push(TextSheet(' '.join(args), source=lines))\n\n\[email protected]\ndef open_dir(vd, p):\n return DirSheet(p.name, source=p)\n\[email protected]\ndef open_fdir(vd, p):\n return FileListSheet(p.name, source=p)\n\[email protected]\ndef addShellColumns(vd, cmd, sheet):\n shellcol = ColumnShell(cmd, source=sheet, width=0)\n sheet.addColumnAtCursor(\n shellcol,\n Column(cmd+'_stdout', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[0]),\n Column(cmd+'_stderr', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[1]))\n\n\nclass ColumnShell(Column):\n def __init__(self, name, cmd=None, **kwargs):\n super().__init__(name, **kwargs)\n self.expr = cmd or name\n\n @asynccache(lambda col,row: (col, col.sheet.rowid(row)))\n def calcValue(self, row):\n try:\n import shlex\n args = []\n context = LazyComputeRow(self.source, row)\n for arg in shlex.split(self.expr):\n if arg.startswith('$'):\n args.append(shlex.quote(str(context[arg[1:]])))\n else:\n args.append(arg)\n\n p = subprocess.Popen([os.getenv('SHELL', 'bash'), '-c', ' '.join(args)],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return p.communicate()\n except Exception as e:\n vd.exceptionCaught(e)\n\n\nclass DirSheet(Sheet):\n 'Sheet displaying directory, using ENTER to open a particular file. Edited fields are applied to the filesystem.'\n rowtype = 'files' # rowdef: Path\n defer = True\n columns = [\n Column('directory',\n getter=lambda col,row: str(row.parent) if str(row.parent) == '.' else str(row.parent) + '/',\n setter=lambda col,row,val: col.sheet.moveFile(row, val)),\n Column('filename',\n getter=lambda col,row: row.name + row.suffix,\n setter=lambda col,row,val: col.sheet.renameFile(row, val)),\n Column('abspath', width=0, type=str,\n getter=lambda col,row: row,\n setter=lambda col,row,val: os.rename(row, val)),\n Column('ext', getter=lambda col,row: row.is_dir() and '/' or row.ext),\n Column('size', type=int,\n getter=lambda col,row: filesize(row),\n setter=lambda col,row,val: os.truncate(row, int(val))),\n Column('modtime', type=date,\n getter=lambda col,row: modtime(row),\n setter=lambda col,row,val: os.utime(row, times=((row.stat().st_atime, float(val))))),\n Column('owner', width=0,\n getter=lambda col,row: pwd.getpwuid(row.stat().st_uid).pw_name,\n setter=lambda col,row,val: os.chown(row, pwd.getpwnam(val).pw_uid, -1)),\n Column('group', width=0,\n getter=lambda col,row: grp.getgrgid(row.stat().st_gid).gr_name,\n setter=lambda col,row,val: os.chown(row, -1, grp.getgrnam(val).pw_gid)),\n Column('mode', width=0,\n getter=lambda col,row: '{:o}'.format(row.stat().st_mode),\n setter=lambda col,row,val: os.chmod(row, int(val, 8))),\n Column('filetype', width=0, cache='async', getter=lambda col,row: subprocess.Popen(['file', '--brief', row], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].strip()),\n ]\n nKeys = 2\n _ordering = [('modtime', True)] # sort by reverse modtime initially\n\n @staticmethod\n def colorOwner(sheet, col, row, val):\n ret = ''\n if col.name == 'group':\n mode = row.stat().st_mode\n if mode & stat.S_IXGRP: ret = 'bold '\n if mode & stat.S_IWGRP: return ret + 'green'\n if mode & stat.S_IRGRP: return ret + 'yellow'\n elif col.name == 'owner':\n mode = row.stat().st_mode\n if mode & stat.S_IXUSR: ret = 'bold '\n if mode & stat.S_IWUSR: return ret + 'green'\n if mode & stat.S_IRUSR: return ret + 'yellow'\n\n def moveFile(self, row, newparent):\n parent = Path(newparent)\n newpath = Path(parent/(row.name + row.suffix))\n if parent.exists():\n if not parent.is_dir():\n vd.error('destination %s not a directory' % parent)\n else:\n with contextlib.suppress(FileExistsError):\n os.makedirs(parent)\n\n row.rename(newpath)\n row.given = newpath # modify visidata.Path\n self.restat()\n\n def renameFile(self, row, val):\n newpath = row.with_name(val)\n row.rename(newpath)\n row.given = newpath\n self.restat()\n\n def removeFile(self, path):\n if path.is_dir():\n os.rmdir(path)\n else:\n path.unlink()\n\n def deleteSourceRow(self, r):\n self.removeFile(r)\n\n def iterload(self):\n hidden_files = self.options.dir_hidden\n\n def _walkfiles(p):\n basepath = str(p)\n for folder, subdirs, files in os.walk(basepath):\n subfolder = folder[len(basepath)+1:]\n if not hidden_files and subfolder.startswith('.'): continue\n if subfolder in ['.', '..']: continue\n\n fpath = Path(folder)\n yield fpath\n\n for fn in files:\n yield fpath/fn\n\n def _listfiles(p):\n basepath = str(p)\n for fn in os.listdir(basepath):\n yield p/fn\n\n\n basepath = str(self.source)\n\n folders = set()\n f = _walkfiles if self.options.dir_recurse else _listfiles\n\n for p in f(self.source):\n if not hidden_files and p.name.startswith('.'):\n continue\n\n yield p\n\n def preloadHook(self):\n super().preloadHook()\n Path.stat.cache_clear()\n\n def restat(self):\n vstat.cache_clear()\n\n @asyncthread\n def putChanges(self):\n self.commitAdds()\n self.commitMods()\n self.commitDeletes()\n\n self._deferredDels.clear()\n self.reload()\n\n\nclass FileListSheet(DirSheet):\n _ordering = []\n def iterload(self):\n for fn in self.source.open_text():\n yield Path(fn.rstrip())\n\n\[email protected]\ndef inputShell(vd):\n cmd = vd.input(\"sh$ \", type=\"sh\")\n if '$' not in cmd:\n vd.warning('no $column in command')\n return cmd\n\nBaseSheet.addCommand('', 'open-dir-current', 'vd.push(vd.currentDirSheet)', 'open Directory Sheet: browse properties of files in current directory')\n\nSheet.addCommand('z;', 'addcol-sh', 'cmd=inputShell(); addShellColumns(cmd, sheet)', 'create new column from bash expression, with $columnNames as variables')\n\nDirSheet.addCommand(ENTER, 'open-row', 'vd.push(openSource(cursorRow or fail(\"no row\"), filetype=\"dir\" if cursorRow.is_dir() else LazyComputeRow(sheet, cursorRow).ext))', 'open current file as a new sheet')\nDirSheet.addCommand('g'+ENTER, 'open-rows', 'for r in selectedRows: vd.push(openSource(r))', 'open selected files as new sheets')\nDirSheet.addCommand('^O', 'sysopen-row', 'launchEditor(cursorRow)', 'open current file in external $EDITOR')\nDirSheet.addCommand('g^O', 'sysopen-rows', 'launchEditor(*selectedRows)', 'open selected files in external $EDITOR')\n\nDirSheet.addCommand('y', 'copy-row', 'copy_files([cursorRow], inputPath(\"copy to dest: \"))', 'copy file to given directory')\nDirSheet.addCommand('gy', 'copy-selected', 'copy_files(selectedRows, inputPath(\"copy to dest: \", value=cursorRow.given))', 'copy selected files to given directory')\n\[email protected]\n@asyncthread\ndef copy_files(sheet, paths, dest):\n destdir = Path(dest)\n destdir.is_dir() or vd.fail('target must be directory')\n vd.status('copying %s %s to %s' % (len(paths), sheet.rowtype, destdir))\n os.makedirs(destdir, exist_ok=True)\n for srcpath in Progress(paths, gerund='copying'):\n try:\n destpath = destdir/str(srcpath._path.name)\n if srcpath.is_dir():\n shutil.copy_tree(srcpath, destpath)\n else:\n shutil.copyfile(srcpath, destpath)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\nvd.addGlobals({\n 'DirSheet': DirSheet\n})\n", "path": "visidata/shell.py"}], "after_files": [{"content": "import os\nimport shutil\nimport stat\nimport subprocess\nimport contextlib\ntry:\n import pwd\n import grp\nexcept ImportError:\n pass # pwd,grp modules not available on Windows\n\nfrom visidata import Column, Sheet, LazyComputeRow, asynccache, BaseSheet, vd\nfrom visidata import Path, ENTER, date, asyncthread, FileExistsError, VisiData\nfrom visidata import modtime, filesize, vstat, Progress, TextSheet\n\n\nvd.option('dir_recurse', False, 'walk source path recursively on DirSheet')\nvd.option('dir_hidden', False, 'load hidden files on DirSheet')\n\n\[email protected]_property\ndef currentDirSheet(p):\n 'Support opening the current DirSheet from the vdmenu'\n return DirSheet('.', source=Path('.'))\n\n@asyncthread\ndef exec_shell(*args):\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err or out:\n lines = err.decode('utf8').splitlines() + out.decode('utf8').splitlines()\n vd.push(TextSheet(' '.join(args), source=lines))\n\n\[email protected]\ndef open_dir(vd, p):\n return DirSheet(p.name, source=p)\n\[email protected]\ndef open_fdir(vd, p):\n return FileListSheet(p.name, source=p)\n\[email protected]\ndef addShellColumns(vd, cmd, sheet):\n shellcol = ColumnShell(cmd, source=sheet, width=0)\n sheet.addColumnAtCursor(\n shellcol,\n Column(cmd+'_stdout', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[0]),\n Column(cmd+'_stderr', srccol=shellcol, getter=lambda col,row: col.srccol.getValue(row)[1]))\n\n\nclass ColumnShell(Column):\n def __init__(self, name, cmd=None, **kwargs):\n super().__init__(name, **kwargs)\n self.expr = cmd or name\n\n @asynccache(lambda col,row: (col, col.sheet.rowid(row)))\n def calcValue(self, row):\n try:\n import shlex\n args = []\n context = LazyComputeRow(self.source, row)\n for arg in shlex.split(self.expr):\n if arg.startswith('$'):\n args.append(shlex.quote(str(context[arg[1:]])))\n else:\n args.append(arg)\n\n p = subprocess.Popen([os.getenv('SHELL', 'bash'), '-c', ' '.join(args)],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return p.communicate()\n except Exception as e:\n vd.exceptionCaught(e)\n\n\nclass DirSheet(Sheet):\n 'Sheet displaying directory, using ENTER to open a particular file. Edited fields are applied to the filesystem.'\n rowtype = 'files' # rowdef: Path\n defer = True\n columns = [\n Column('directory',\n getter=lambda col,row: str(row.parent) if str(row.parent) == '.' else str(row.parent) + '/',\n setter=lambda col,row,val: col.sheet.moveFile(row, val)),\n Column('filename',\n getter=lambda col,row: row.name + row.suffix,\n setter=lambda col,row,val: col.sheet.renameFile(row, val)),\n Column('abspath', width=0, type=str,\n getter=lambda col,row: row,\n setter=lambda col,row,val: os.rename(row, val)),\n Column('ext', getter=lambda col,row: row.is_dir() and '/' or row.ext),\n Column('size', type=int,\n getter=lambda col,row: filesize(row),\n setter=lambda col,row,val: os.truncate(row, int(val))),\n Column('modtime', type=date,\n getter=lambda col,row: modtime(row),\n setter=lambda col,row,val: os.utime(row, times=((row.stat().st_atime, float(val))))),\n Column('owner', width=0,\n getter=lambda col,row: pwd.getpwuid(row.stat().st_uid).pw_name,\n setter=lambda col,row,val: os.chown(row, pwd.getpwnam(val).pw_uid, -1)),\n Column('group', width=0,\n getter=lambda col,row: grp.getgrgid(row.stat().st_gid).gr_name,\n setter=lambda col,row,val: os.chown(row, -1, grp.getgrnam(val).pw_gid)),\n Column('mode', width=0,\n getter=lambda col,row: '{:o}'.format(row.stat().st_mode),\n setter=lambda col,row,val: os.chmod(row, int(val, 8))),\n Column('filetype', width=0, cache='async', getter=lambda col,row: subprocess.Popen(['file', '--brief', row], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].strip()),\n ]\n nKeys = 2\n _ordering = [('modtime', True)] # sort by reverse modtime initially\n\n @staticmethod\n def colorOwner(sheet, col, row, val):\n ret = ''\n if col.name == 'group':\n mode = row.stat().st_mode\n if mode & stat.S_IXGRP: ret = 'bold '\n if mode & stat.S_IWGRP: return ret + 'green'\n if mode & stat.S_IRGRP: return ret + 'yellow'\n elif col.name == 'owner':\n mode = row.stat().st_mode\n if mode & stat.S_IXUSR: ret = 'bold '\n if mode & stat.S_IWUSR: return ret + 'green'\n if mode & stat.S_IRUSR: return ret + 'yellow'\n\n def moveFile(self, row, newparent):\n parent = Path(newparent)\n newpath = Path(parent/(row.name + row.suffix))\n if parent.exists():\n if not parent.is_dir():\n vd.error('destination %s not a directory' % parent)\n else:\n with contextlib.suppress(FileExistsError):\n os.makedirs(parent)\n\n row.rename(newpath)\n row.given = newpath # modify visidata.Path\n self.restat()\n\n def renameFile(self, row, val):\n newpath = row.with_name(val)\n row.rename(newpath)\n row.given = newpath\n self.restat()\n\n def removeFile(self, path):\n if path.is_dir():\n os.rmdir(path)\n else:\n path.unlink()\n\n def deleteSourceRow(self, r):\n self.removeFile(r)\n\n def newRow(self):\n vd.fail('new file not supported')\n\n def iterload(self):\n hidden_files = self.options.dir_hidden\n\n def _walkfiles(p):\n basepath = str(p)\n for folder, subdirs, files in os.walk(basepath):\n subfolder = folder[len(basepath)+1:]\n if not hidden_files and subfolder.startswith('.'): continue\n if subfolder in ['.', '..']: continue\n\n fpath = Path(folder)\n yield fpath\n\n for fn in files:\n yield fpath/fn\n\n def _listfiles(p):\n basepath = str(p)\n for fn in os.listdir(basepath):\n yield p/fn\n\n\n basepath = str(self.source)\n\n folders = set()\n f = _walkfiles if self.options.dir_recurse else _listfiles\n\n for p in f(self.source):\n if not hidden_files and p.name.startswith('.'):\n continue\n\n yield p\n\n def preloadHook(self):\n super().preloadHook()\n Path.stat.cache_clear()\n\n def restat(self):\n vstat.cache_clear()\n\n @asyncthread\n def putChanges(self):\n self.commitAdds()\n self.commitMods()\n self.commitDeletes()\n\n self._deferredDels.clear()\n self.reload()\n\n\nclass FileListSheet(DirSheet):\n _ordering = []\n def iterload(self):\n for fn in self.source.open_text():\n yield Path(fn.rstrip())\n\n\[email protected]\ndef inputShell(vd):\n cmd = vd.input(\"sh$ \", type=\"sh\")\n if '$' not in cmd:\n vd.warning('no $column in command')\n return cmd\n\nBaseSheet.addCommand('', 'open-dir-current', 'vd.push(vd.currentDirSheet)', 'open Directory Sheet: browse properties of files in current directory')\n\nSheet.addCommand('z;', 'addcol-sh', 'cmd=inputShell(); addShellColumns(cmd, sheet)', 'create new column from bash expression, with $columnNames as variables')\n\nDirSheet.addCommand(ENTER, 'open-row', 'vd.push(openSource(cursorRow or fail(\"no row\"), filetype=\"dir\" if cursorRow.is_dir() else LazyComputeRow(sheet, cursorRow).ext))', 'open current file as a new sheet')\nDirSheet.addCommand('g'+ENTER, 'open-rows', 'for r in selectedRows: vd.push(openSource(r))', 'open selected files as new sheets')\nDirSheet.addCommand('^O', 'sysopen-row', 'launchEditor(cursorRow)', 'open current file in external $EDITOR')\nDirSheet.addCommand('g^O', 'sysopen-rows', 'launchEditor(*selectedRows)', 'open selected files in external $EDITOR')\n\nDirSheet.addCommand('y', 'copy-row', 'copy_files([cursorRow], inputPath(\"copy to dest: \"))', 'copy file to given directory')\nDirSheet.addCommand('gy', 'copy-selected', 'copy_files(selectedRows, inputPath(\"copy to dest: \", value=cursorRow.given))', 'copy selected files to given directory')\n\[email protected]\n@asyncthread\ndef copy_files(sheet, paths, dest):\n destdir = Path(dest)\n destdir.is_dir() or vd.fail('target must be directory')\n vd.status('copying %s %s to %s' % (len(paths), sheet.rowtype, destdir))\n os.makedirs(destdir, exist_ok=True)\n for srcpath in Progress(paths, gerund='copying'):\n try:\n destpath = destdir/str(srcpath._path.name)\n if srcpath.is_dir():\n shutil.copy_tree(srcpath, destpath)\n else:\n shutil.copyfile(srcpath, destpath)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\nvd.addGlobals({\n 'DirSheet': DirSheet\n})\n", "path": "visidata/shell.py"}]}
3,290
98
gh_patches_debug_5699
rasdani/github-patches
git_diff
kornia__kornia-698
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enabled RandomResizedCrop batch mode when same_on_batch=False ### Description #680 The previous implementation would crop the same size then resize. This PR enabled different cropped sizes across the batch. <img width="1028" alt="Screenshot 2020-09-15 at 11 18 54 PM" src="https://user-images.githubusercontent.com/15955486/93230317-1f4f1980-f7aa-11ea-9f07-bb85c2bab50f.png"> ### Status **Ready/Work in progress/Hold** ### Types of changes <!--- Put an `x` in all the boxes that apply, and remove the not applicable items --> - [x] Bug fix (non-breaking change which fixes an issue) - [ ] Breaking change (fix or new feature that would cause existing functionality to change) - [ ] New tests added to cover the changes - [ ] Docstrings/Documentation updated ## PR Checklist ### PR Implementer This is a small checklist for the implementation details of this PR. If there are any questions regarding code style or other conventions check out our [summary](https://github.com/kornia/kornia/blob/master/CONTRIBUTING.rst). - [ ] Did you discuss the functionality or any breaking changes before ? - [ ] **Pass all tests**: did you test in local ? `make test` - [ ] Unittests: did you add tests for your new functionality ? - [ ] Documentations: did you build documentation ? `make build-docs` - [ ] Implementation: is your code well commented and follow conventions ? `make lint` - [ ] Docstrings & Typing: has your code documentation and typing ? `make mypy` - [ ] Update notebooks & documentation if necessary ### KorniaTeam <details> <summary>KorniaTeam workflow</summary> - [ ] Assign correct label - [ ] Assign PR to a reviewer - [ ] Does this PR close an Issue? (add `closes #IssueNumber` at the bottom if not already in description) </details> ### Reviewer <details> <summary>Reviewer workflow</summary> - [ ] Do all tests pass? (Unittests, Typing, Linting, Documentation, Environment) - [ ] Does the implementation follow `kornia` design conventions? - [ ] Is the documentation complete enough ? - [ ] Are the tests covering simple and corner cases ? </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kornia/geometry/homography.py` Content: ``` 1 import warnings 2 3 import torch 4 from typing import Tuple, Optional 5 6 import kornia 7 from kornia.geometry.epipolar import normalize_points 8 9 TupleTensor = Tuple[torch.Tensor, torch.Tensor] 10 11 12 def find_homography_dlt( 13 points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor] = None) -> torch.Tensor: 14 r"""Computes the homography matrix using the DLT formulation. 15 16 The linear system is solved by using the Weighted Least Squares Solution for the 4 Points algorithm. 17 18 Args: 19 points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`. 20 points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`. 21 weights (torch.Tensor, optional): Tensor containing the weights per point correspondence with a shape of 22 :math:`(B, N)`. Defaults to all ones. 23 24 Returns: 25 torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`. 26 """ 27 assert points1.shape == points2.shape, points1.shape 28 assert len(points1.shape) >= 1 and points1.shape[-1] == 2, points1.shape 29 assert points1.shape[1] >= 4, points1.shape 30 31 eps: float = 1e-8 32 points1_norm, transform1 = normalize_points(points1) 33 points2_norm, transform2 = normalize_points(points2) 34 35 x1, y1 = torch.chunk(points1_norm, dim=-1, chunks=2) # BxNx1 36 x2, y2 = torch.chunk(points2_norm, dim=-1, chunks=2) # BxNx1 37 ones, zeros = torch.ones_like(x1), torch.zeros_like(x1) 38 39 # DIAPO 11: https://www.uio.no/studier/emner/matnat/its/nedlagte-emner/UNIK4690/v16/forelesninger/lecture_4_3-estimating-homographies-from-feature-correspondences.pdf # noqa: E501 40 ax = torch.cat([zeros, zeros, zeros, -x1, -y1, -ones, y2 * x1, y2 * y1, y2], dim=-1) 41 ay = torch.cat([x1, y1, ones, zeros, zeros, zeros, -x2 * x1, -x2 * y1, -x2], dim=-1) 42 A = torch.cat((ax, ay), dim=-1).reshape(ax.shape[0], -1, ax.shape[-1]) 43 44 if weights is None: 45 # All points are equally important 46 A = A.transpose(-2, -1) @ A 47 else: 48 # We should use provided weights 49 assert len(weights.shape) == 2 and weights.shape == points1.shape[:2], weights.shape 50 w_diag = torch.diag_embed(weights.repeat(1, 2)) 51 A = A.transpose(-2, -1) @ w_diag @ A 52 53 try: 54 U, S, V = torch.svd(A) 55 except: 56 warnings.warn('SVD did not converge', RuntimeWarning) 57 return torch.empty((points1_norm.size(0), 3, 3), device=points1.device) 58 59 H = V[..., -1].view(-1, 3, 3) 60 H = transform2.inverse() @ (H @ transform1) 61 H_norm = H / (H[..., -1:, -1:] + eps) 62 return H_norm 63 64 65 def find_homography_dlt_iterated(points1: torch.Tensor, 66 points2: torch.Tensor, 67 weights: torch.Tensor, 68 soft_inl_th: float = 3.0, 69 n_iter: int = 5) -> torch.Tensor: 70 r"""Computes the homography matrix using the iteratively-reweighted least squares (IRWLS). 71 72 The linear system is solved by using the Reweighted Least Squares Solution for the 4 Points algorithm. 73 74 Args: 75 points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`. 76 points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`. 77 weights (torch.Tensor): Tensor containing the weights per point correspondence with a shape of :math:`(B, N)`. 78 Used for the first iteration of the IRWLS. 79 soft_inl_th (float): Soft inlier threshold used for weight calculation. 80 n_iter (int): number of iterations. Default is 5. 81 82 Returns: 83 torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`. 84 """ 85 86 '''Function, which finds homography via iteratively-reweighted 87 least squares ToDo: add citation''' 88 H: torch.Tensor = find_homography_dlt(points1, points2, weights) 89 for i in range(n_iter - 1): 90 pts1_in_2: torch.Tensor = kornia.transform_points(H, points1) 91 error_squared: torch.Tensor = (pts1_in_2 - points2).pow(2).sum(dim=-1) 92 weights_new: torch.Tensor = torch.exp(-error_squared / (2.0 * (soft_inl_th ** 2))) 93 H = find_homography_dlt(points1, points2, weights_new) 94 return H 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kornia/geometry/homography.py b/kornia/geometry/homography.py --- a/kornia/geometry/homography.py +++ b/kornia/geometry/homography.py @@ -47,7 +47,7 @@ else: # We should use provided weights assert len(weights.shape) == 2 and weights.shape == points1.shape[:2], weights.shape - w_diag = torch.diag_embed(weights.repeat(1, 2)) + w_diag = torch.diag_embed(weights.unsqueeze(dim=-1).repeat(1, 1, 2).reshape(weights.shape[0], -1)) A = A.transpose(-2, -1) @ w_diag @ A try:
{"golden_diff": "diff --git a/kornia/geometry/homography.py b/kornia/geometry/homography.py\n--- a/kornia/geometry/homography.py\n+++ b/kornia/geometry/homography.py\n@@ -47,7 +47,7 @@\n else:\n # We should use provided weights\n assert len(weights.shape) == 2 and weights.shape == points1.shape[:2], weights.shape\n- w_diag = torch.diag_embed(weights.repeat(1, 2))\n+ w_diag = torch.diag_embed(weights.unsqueeze(dim=-1).repeat(1, 1, 2).reshape(weights.shape[0], -1))\n A = A.transpose(-2, -1) @ w_diag @ A\n \n try:\n", "issue": "Enabled RandomResizedCrop batch mode when same_on_batch=False\n### Description\r\n\r\n#680\r\n\r\nThe previous implementation would crop the same size then resize. This PR enabled different cropped sizes across the batch.\r\n\r\n<img width=\"1028\" alt=\"Screenshot 2020-09-15 at 11 18 54 PM\" src=\"https://user-images.githubusercontent.com/15955486/93230317-1f4f1980-f7aa-11ea-9f07-bb85c2bab50f.png\">\r\n\r\n\r\n\r\n### Status\r\n**Ready/Work in progress/Hold**\r\n\r\n### Types of changes\r\n<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->\r\n- [x] Bug fix (non-breaking change which fixes an issue)\r\n- [ ] Breaking change (fix or new feature that would cause existing functionality to change)\r\n- [ ] New tests added to cover the changes\r\n- [ ] Docstrings/Documentation updated\r\n\r\n\r\n## PR Checklist\r\n### PR Implementer\r\nThis is a small checklist for the implementation details of this PR.\r\n\r\nIf there are any questions regarding code style or other conventions check out our \r\n[summary](https://github.com/kornia/kornia/blob/master/CONTRIBUTING.rst).\r\n\r\n- [ ] Did you discuss the functionality or any breaking changes before ?\r\n- [ ] **Pass all tests**: did you test in local ? `make test`\r\n- [ ] Unittests: did you add tests for your new functionality ?\r\n- [ ] Documentations: did you build documentation ? `make build-docs`\r\n- [ ] Implementation: is your code well commented and follow conventions ? `make lint`\r\n- [ ] Docstrings & Typing: has your code documentation and typing ? `make mypy`\r\n- [ ] Update notebooks & documentation if necessary\r\n\r\n### KorniaTeam\r\n<details>\r\n <summary>KorniaTeam workflow</summary>\r\n \r\n - [ ] Assign correct label\r\n - [ ] Assign PR to a reviewer\r\n - [ ] Does this PR close an Issue? (add `closes #IssueNumber` at the bottom if \r\n not already in description)\r\n\r\n</details>\r\n\r\n### Reviewer\r\n<details>\r\n <summary>Reviewer workflow</summary>\r\n\r\n - [ ] Do all tests pass? (Unittests, Typing, Linting, Documentation, Environment)\r\n - [ ] Does the implementation follow `kornia` design conventions?\r\n - [ ] Is the documentation complete enough ?\r\n - [ ] Are the tests covering simple and corner cases ?\r\n \r\n</details>\r\n\n", "before_files": [{"content": "import warnings\n\nimport torch\nfrom typing import Tuple, Optional\n\nimport kornia\nfrom kornia.geometry.epipolar import normalize_points\n\nTupleTensor = Tuple[torch.Tensor, torch.Tensor]\n\n\ndef find_homography_dlt(\n points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor] = None) -> torch.Tensor:\n r\"\"\"Computes the homography matrix using the DLT formulation.\n\n The linear system is solved by using the Weighted Least Squares Solution for the 4 Points algorithm.\n\n Args:\n points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`.\n points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`.\n weights (torch.Tensor, optional): Tensor containing the weights per point correspondence with a shape of\n :math:`(B, N)`. Defaults to all ones.\n\n Returns:\n torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`.\n \"\"\"\n assert points1.shape == points2.shape, points1.shape\n assert len(points1.shape) >= 1 and points1.shape[-1] == 2, points1.shape\n assert points1.shape[1] >= 4, points1.shape\n\n eps: float = 1e-8\n points1_norm, transform1 = normalize_points(points1)\n points2_norm, transform2 = normalize_points(points2)\n\n x1, y1 = torch.chunk(points1_norm, dim=-1, chunks=2) # BxNx1\n x2, y2 = torch.chunk(points2_norm, dim=-1, chunks=2) # BxNx1\n ones, zeros = torch.ones_like(x1), torch.zeros_like(x1)\n\n # DIAPO 11: https://www.uio.no/studier/emner/matnat/its/nedlagte-emner/UNIK4690/v16/forelesninger/lecture_4_3-estimating-homographies-from-feature-correspondences.pdf # noqa: E501\n ax = torch.cat([zeros, zeros, zeros, -x1, -y1, -ones, y2 * x1, y2 * y1, y2], dim=-1)\n ay = torch.cat([x1, y1, ones, zeros, zeros, zeros, -x2 * x1, -x2 * y1, -x2], dim=-1)\n A = torch.cat((ax, ay), dim=-1).reshape(ax.shape[0], -1, ax.shape[-1])\n\n if weights is None:\n # All points are equally important\n A = A.transpose(-2, -1) @ A\n else:\n # We should use provided weights\n assert len(weights.shape) == 2 and weights.shape == points1.shape[:2], weights.shape\n w_diag = torch.diag_embed(weights.repeat(1, 2))\n A = A.transpose(-2, -1) @ w_diag @ A\n\n try:\n U, S, V = torch.svd(A)\n except:\n warnings.warn('SVD did not converge', RuntimeWarning)\n return torch.empty((points1_norm.size(0), 3, 3), device=points1.device)\n\n H = V[..., -1].view(-1, 3, 3)\n H = transform2.inverse() @ (H @ transform1)\n H_norm = H / (H[..., -1:, -1:] + eps)\n return H_norm\n\n\ndef find_homography_dlt_iterated(points1: torch.Tensor,\n points2: torch.Tensor,\n weights: torch.Tensor,\n soft_inl_th: float = 3.0,\n n_iter: int = 5) -> torch.Tensor:\n r\"\"\"Computes the homography matrix using the iteratively-reweighted least squares (IRWLS).\n\n The linear system is solved by using the Reweighted Least Squares Solution for the 4 Points algorithm.\n\n Args:\n points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`.\n points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`.\n weights (torch.Tensor): Tensor containing the weights per point correspondence with a shape of :math:`(B, N)`.\n Used for the first iteration of the IRWLS.\n soft_inl_th (float): Soft inlier threshold used for weight calculation.\n n_iter (int): number of iterations. Default is 5.\n\n Returns:\n torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`.\n \"\"\"\n\n '''Function, which finds homography via iteratively-reweighted\n least squares ToDo: add citation'''\n H: torch.Tensor = find_homography_dlt(points1, points2, weights)\n for i in range(n_iter - 1):\n pts1_in_2: torch.Tensor = kornia.transform_points(H, points1)\n error_squared: torch.Tensor = (pts1_in_2 - points2).pow(2).sum(dim=-1)\n weights_new: torch.Tensor = torch.exp(-error_squared / (2.0 * (soft_inl_th ** 2)))\n H = find_homography_dlt(points1, points2, weights_new)\n return H\n", "path": "kornia/geometry/homography.py"}], "after_files": [{"content": "import warnings\n\nimport torch\nfrom typing import Tuple, Optional\n\nimport kornia\nfrom kornia.geometry.epipolar import normalize_points\n\nTupleTensor = Tuple[torch.Tensor, torch.Tensor]\n\n\ndef find_homography_dlt(\n points1: torch.Tensor, points2: torch.Tensor, weights: Optional[torch.Tensor] = None) -> torch.Tensor:\n r\"\"\"Computes the homography matrix using the DLT formulation.\n\n The linear system is solved by using the Weighted Least Squares Solution for the 4 Points algorithm.\n\n Args:\n points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`.\n points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`.\n weights (torch.Tensor, optional): Tensor containing the weights per point correspondence with a shape of\n :math:`(B, N)`. Defaults to all ones.\n\n Returns:\n torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`.\n \"\"\"\n assert points1.shape == points2.shape, points1.shape\n assert len(points1.shape) >= 1 and points1.shape[-1] == 2, points1.shape\n assert points1.shape[1] >= 4, points1.shape\n\n eps: float = 1e-8\n points1_norm, transform1 = normalize_points(points1)\n points2_norm, transform2 = normalize_points(points2)\n\n x1, y1 = torch.chunk(points1_norm, dim=-1, chunks=2) # BxNx1\n x2, y2 = torch.chunk(points2_norm, dim=-1, chunks=2) # BxNx1\n ones, zeros = torch.ones_like(x1), torch.zeros_like(x1)\n\n # DIAPO 11: https://www.uio.no/studier/emner/matnat/its/nedlagte-emner/UNIK4690/v16/forelesninger/lecture_4_3-estimating-homographies-from-feature-correspondences.pdf # noqa: E501\n ax = torch.cat([zeros, zeros, zeros, -x1, -y1, -ones, y2 * x1, y2 * y1, y2], dim=-1)\n ay = torch.cat([x1, y1, ones, zeros, zeros, zeros, -x2 * x1, -x2 * y1, -x2], dim=-1)\n A = torch.cat((ax, ay), dim=-1).reshape(ax.shape[0], -1, ax.shape[-1])\n\n if weights is None:\n # All points are equally important\n A = A.transpose(-2, -1) @ A\n else:\n # We should use provided weights\n assert len(weights.shape) == 2 and weights.shape == points1.shape[:2], weights.shape\n w_diag = torch.diag_embed(weights.unsqueeze(dim=-1).repeat(1, 1, 2).reshape(weights.shape[0], -1))\n A = A.transpose(-2, -1) @ w_diag @ A\n\n try:\n U, S, V = torch.svd(A)\n except:\n warnings.warn('SVD did not converge', RuntimeWarning)\n return torch.empty((points1_norm.size(0), 3, 3), device=points1.device)\n\n H = V[..., -1].view(-1, 3, 3)\n H = transform2.inverse() @ (H @ transform1)\n H_norm = H / (H[..., -1:, -1:] + eps)\n return H_norm\n\n\ndef find_homography_dlt_iterated(points1: torch.Tensor,\n points2: torch.Tensor,\n weights: torch.Tensor,\n soft_inl_th: float = 3.0,\n n_iter: int = 5) -> torch.Tensor:\n r\"\"\"Computes the homography matrix using the iteratively-reweighted least squares (IRWLS).\n\n The linear system is solved by using the Reweighted Least Squares Solution for the 4 Points algorithm.\n\n Args:\n points1 (torch.Tensor): A set of points in the first image with a tensor shape :math:`(B, N, 2)`.\n points2 (torch.Tensor): A set of points in the second image with a tensor shape :math:`(B, N, 2)`.\n weights (torch.Tensor): Tensor containing the weights per point correspondence with a shape of :math:`(B, N)`.\n Used for the first iteration of the IRWLS.\n soft_inl_th (float): Soft inlier threshold used for weight calculation.\n n_iter (int): number of iterations. Default is 5.\n\n Returns:\n torch.Tensor: the computed homography matrix with shape :math:`(B, 3, 3)`.\n \"\"\"\n\n '''Function, which finds homography via iteratively-reweighted\n least squares ToDo: add citation'''\n H: torch.Tensor = find_homography_dlt(points1, points2, weights)\n for i in range(n_iter - 1):\n pts1_in_2: torch.Tensor = kornia.transform_points(H, points1)\n error_squared: torch.Tensor = (pts1_in_2 - points2).pow(2).sum(dim=-1)\n weights_new: torch.Tensor = torch.exp(-error_squared / (2.0 * (soft_inl_th ** 2)))\n H = find_homography_dlt(points1, points2, weights_new)\n return H\n", "path": "kornia/geometry/homography.py"}]}
2,230
162
gh_patches_debug_1684
rasdani/github-patches
git_diff
geopandas__geopandas-2398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop Python 3.7 We should consider dropping support for Python 3.7. We are roughly following numpy model (#1457) and numpy itself is 3.8+ now. Same applies to pyproj, which requires 3.8 (and causes some macOS CI failures because of some conda issues). I forgot about Python versions when doing #2358 and bumped only packages. @jorisvandenbossche if you're fine with that, I'll update CI matrix and related things. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env/python 2 """Installation script 3 4 """ 5 6 import os 7 8 try: 9 from setuptools import setup 10 except ImportError: 11 from distutils.core import setup 12 13 import versioneer 14 15 LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to 16 `pandas`_ objects. 17 18 The goal of GeoPandas is to make working with geospatial data in 19 python easier. It combines the capabilities of `pandas`_ and `shapely`_, 20 providing geospatial operations in pandas and a high-level interface 21 to multiple geometries to shapely. GeoPandas enables you to easily do 22 operations in python that would otherwise require a spatial database 23 such as PostGIS. 24 25 .. _pandas: http://pandas.pydata.org 26 .. _shapely: http://shapely.readthedocs.io/en/latest/ 27 """ 28 29 if os.environ.get("READTHEDOCS", False) == "True": 30 INSTALL_REQUIRES = [] 31 else: 32 INSTALL_REQUIRES = [ 33 "pandas >= 1.0.0", 34 "shapely >= 1.7", 35 "fiona >= 1.8", 36 "pyproj >= 2.6.1.post1", 37 "packaging", 38 ] 39 40 # get all data dirs in the datasets module 41 data_files = [] 42 43 for item in os.listdir("geopandas/datasets"): 44 if not item.startswith("__"): 45 if os.path.isdir(os.path.join("geopandas/datasets/", item)): 46 data_files.append(os.path.join("datasets", item, "*")) 47 elif item.endswith(".zip"): 48 data_files.append(os.path.join("datasets", item)) 49 50 data_files.append("tests/data/*") 51 52 53 setup( 54 name="geopandas", 55 version=versioneer.get_version(), 56 description="Geographic pandas extensions", 57 license="BSD", 58 author="GeoPandas contributors", 59 author_email="[email protected]", 60 url="http://geopandas.org", 61 project_urls={ 62 "Source": "https://github.com/geopandas/geopandas", 63 }, 64 long_description=LONG_DESCRIPTION, 65 packages=[ 66 "geopandas", 67 "geopandas.io", 68 "geopandas.tools", 69 "geopandas.datasets", 70 "geopandas.tests", 71 "geopandas.tools.tests", 72 ], 73 package_data={"geopandas": data_files}, 74 python_requires=">=3.7", 75 install_requires=INSTALL_REQUIRES, 76 cmdclass=versioneer.get_cmdclass(), 77 ) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ "geopandas.tools.tests", ], package_data={"geopandas": data_files}, - python_requires=">=3.7", + python_requires=">=3.8", install_requires=INSTALL_REQUIRES, cmdclass=versioneer.get_cmdclass(), )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,7 @@\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n- python_requires=\">=3.7\",\n+ python_requires=\">=3.8\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n )\n", "issue": "Drop Python 3.7\nWe should consider dropping support for Python 3.7. We are roughly following numpy model (#1457) and numpy itself is 3.8+ now. Same applies to pyproj, which requires 3.8 (and causes some macOS CI failures because of some conda issues). \r\n\r\nI forgot about Python versions when doing #2358 and bumped only packages.\r\n\r\n@jorisvandenbossche if you're fine with that, I'll update CI matrix and related things.\n", "before_files": [{"content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://shapely.readthedocs.io/en/latest/\n\"\"\"\n\nif os.environ.get(\"READTHEDOCS\", False) == \"True\":\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n \"pandas >= 1.0.0\",\n \"shapely >= 1.7\",\n \"fiona >= 1.8\",\n \"pyproj >= 2.6.1.post1\",\n \"packaging\",\n ]\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith(\"__\"):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, \"*\"))\n elif item.endswith(\".zip\"):\n data_files.append(os.path.join(\"datasets\", item))\n\ndata_files.append(\"tests/data/*\")\n\n\nsetup(\n name=\"geopandas\",\n version=versioneer.get_version(),\n description=\"Geographic pandas extensions\",\n license=\"BSD\",\n author=\"GeoPandas contributors\",\n author_email=\"[email protected]\",\n url=\"http://geopandas.org\",\n project_urls={\n \"Source\": \"https://github.com/geopandas/geopandas\",\n },\n long_description=LONG_DESCRIPTION,\n packages=[\n \"geopandas\",\n \"geopandas.io\",\n \"geopandas.tools\",\n \"geopandas.datasets\",\n \"geopandas.tests\",\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n python_requires=\">=3.7\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://shapely.readthedocs.io/en/latest/\n\"\"\"\n\nif os.environ.get(\"READTHEDOCS\", False) == \"True\":\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n \"pandas >= 1.0.0\",\n \"shapely >= 1.7\",\n \"fiona >= 1.8\",\n \"pyproj >= 2.6.1.post1\",\n \"packaging\",\n ]\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith(\"__\"):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, \"*\"))\n elif item.endswith(\".zip\"):\n data_files.append(os.path.join(\"datasets\", item))\n\ndata_files.append(\"tests/data/*\")\n\n\nsetup(\n name=\"geopandas\",\n version=versioneer.get_version(),\n description=\"Geographic pandas extensions\",\n license=\"BSD\",\n author=\"GeoPandas contributors\",\n author_email=\"[email protected]\",\n url=\"http://geopandas.org\",\n project_urls={\n \"Source\": \"https://github.com/geopandas/geopandas\",\n },\n long_description=LONG_DESCRIPTION,\n packages=[\n \"geopandas\",\n \"geopandas.io\",\n \"geopandas.tools\",\n \"geopandas.datasets\",\n \"geopandas.tests\",\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n python_requires=\">=3.8\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n)\n", "path": "setup.py"}]}
1,056
92
gh_patches_debug_5914
rasdani/github-patches
git_diff
DataDog__dd-trace-py-1585
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- profiling/line2def does not handle empty filenames ### Which version of dd-trace-py are you using? We're not running dd-trace - we're running the profiler by importing `ddtrace.profiling.auto`. ### Which version of the libraries are you using? ddtrace: 0.40.0 datadog: 0.38.0 You can copy/paste the output of `pip freeze` here. ### How can we reproduce your problem? I'm unsure - this appears to happen sporadically. ### What is the result that you get? First, ddtrace runs into a KeyError in `_to_Location`, line 90: ``` def _to_Location(self, filename, lineno, funcname=None): try: return self._locations[(filename, lineno, funcname)] ``` `filename` is '', `lineno` is 1, `funcname` is None. Next, in `filename_and_lineno_to_def`, line 63, we get an IndexError: ``` def filename_and_lineno_to_def(filename, lineno): if filename[0] == "<" and filename[-1] == ">": return default_def(filename, lineno) ``` Since the filename is an empty string, this complains. ### What is the result that you expected? Not an error. If you need more information, please let me know! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/profiling/_line2def.py` Content: ``` 1 # -*- encoding: utf-8 -*- 2 import ast 3 4 import intervaltree 5 6 7 try: 8 from functools import lru_cache 9 except ImportError: 10 # This is for Python 2 but Python 2 does not use this module. 11 # It's just useful for unit tests. 12 def lru_cache(maxsize): 13 def w(f): 14 return f 15 16 return w 17 18 19 try: 20 # Python 2 does not have this. 21 from tokenize import open as source_open 22 except ImportError: 23 source_open = open 24 25 from ddtrace.vendor import six 26 27 28 def _compute_interval(node): 29 min_lineno = node.lineno 30 max_lineno = node.lineno 31 for node in ast.walk(node): 32 if hasattr(node, "lineno"): 33 min_lineno = min(min_lineno, node.lineno) 34 max_lineno = max(max_lineno, node.lineno) 35 return (min_lineno, max_lineno + 1) 36 37 38 if six.PY3: 39 _DEFS = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef) 40 else: 41 _DEFS = (ast.FunctionDef, ast.ClassDef) 42 43 44 @lru_cache(maxsize=256) 45 def file_to_tree(filename): 46 # Use tokenize.open to detect encoding 47 with source_open(filename) as f: 48 parsed = ast.parse(f.read(), filename=filename) 49 tree = intervaltree.IntervalTree() 50 for node in ast.walk(parsed): 51 if isinstance(node, _DEFS): 52 start, end = _compute_interval(node) 53 tree[start:end] = node 54 return tree 55 56 57 def default_def(filename, lineno): 58 return filename + ":" + str(lineno) 59 60 61 @lru_cache(maxsize=8192) 62 def filename_and_lineno_to_def(filename, lineno): 63 if filename[0] == "<" and filename[-1] == ">": 64 return default_def(filename, lineno) 65 66 try: 67 matches = file_to_tree(filename)[lineno] 68 except (IOError, OSError, SyntaxError): 69 return default_def(filename, lineno) 70 if matches: 71 return min(matches, key=lambda i: i.length()).data.name 72 73 return default_def(filename, lineno) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/profiling/_line2def.py b/ddtrace/profiling/_line2def.py --- a/ddtrace/profiling/_line2def.py +++ b/ddtrace/profiling/_line2def.py @@ -55,12 +55,12 @@ def default_def(filename, lineno): - return filename + ":" + str(lineno) + return str(filename) + ":" + str(lineno) @lru_cache(maxsize=8192) def filename_and_lineno_to_def(filename, lineno): - if filename[0] == "<" and filename[-1] == ">": + if not filename or (filename[0] == "<" and filename[-1] == ">"): return default_def(filename, lineno) try:
{"golden_diff": "diff --git a/ddtrace/profiling/_line2def.py b/ddtrace/profiling/_line2def.py\n--- a/ddtrace/profiling/_line2def.py\n+++ b/ddtrace/profiling/_line2def.py\n@@ -55,12 +55,12 @@\n \n \n def default_def(filename, lineno):\n- return filename + \":\" + str(lineno)\n+ return str(filename) + \":\" + str(lineno)\n \n \n @lru_cache(maxsize=8192)\n def filename_and_lineno_to_def(filename, lineno):\n- if filename[0] == \"<\" and filename[-1] == \">\":\n+ if not filename or (filename[0] == \"<\" and filename[-1] == \">\"):\n return default_def(filename, lineno)\n \n try:\n", "issue": "profiling/line2def does not handle empty filenames\n### Which version of dd-trace-py are you using?\r\nWe're not running dd-trace - we're running the profiler by importing `ddtrace.profiling.auto`.\r\n\r\n### Which version of the libraries are you using?\r\nddtrace: 0.40.0\r\ndatadog: 0.38.0\r\n\r\nYou can copy/paste the output of `pip freeze` here.\r\n\r\n### How can we reproduce your problem?\r\nI'm unsure - this appears to happen sporadically.\r\n\r\n### What is the result that you get?\r\nFirst, ddtrace runs into a KeyError in `_to_Location`, line 90:\r\n```\r\ndef _to_Location(self, filename, lineno, funcname=None):\r\n try:\r\n return self._locations[(filename, lineno, funcname)]\r\n```\r\n`filename` is '', `lineno` is 1, `funcname` is None.\r\n\r\nNext, in `filename_and_lineno_to_def`, line 63, we get an IndexError:\r\n```\r\ndef filename_and_lineno_to_def(filename, lineno):\r\n if filename[0] == \"<\" and filename[-1] == \">\":\r\n return default_def(filename, lineno)\r\n```\r\nSince the filename is an empty string, this complains.\r\n\r\n\r\n\r\n### What is the result that you expected?\r\nNot an error.\r\n\r\nIf you need more information, please let me know!\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport ast\n\nimport intervaltree\n\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n # This is for Python\u00a02 but Python\u00a02 does not use this module.\n # It's just useful for unit tests.\n def lru_cache(maxsize):\n def w(f):\n return f\n\n return w\n\n\ntry:\n # Python\u00a02 does not have this.\n from tokenize import open as source_open\nexcept ImportError:\n source_open = open\n\nfrom ddtrace.vendor import six\n\n\ndef _compute_interval(node):\n min_lineno = node.lineno\n max_lineno = node.lineno\n for node in ast.walk(node):\n if hasattr(node, \"lineno\"):\n min_lineno = min(min_lineno, node.lineno)\n max_lineno = max(max_lineno, node.lineno)\n return (min_lineno, max_lineno + 1)\n\n\nif six.PY3:\n _DEFS = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)\nelse:\n _DEFS = (ast.FunctionDef, ast.ClassDef)\n\n\n@lru_cache(maxsize=256)\ndef file_to_tree(filename):\n # Use tokenize.open to detect encoding\n with source_open(filename) as f:\n parsed = ast.parse(f.read(), filename=filename)\n tree = intervaltree.IntervalTree()\n for node in ast.walk(parsed):\n if isinstance(node, _DEFS):\n start, end = _compute_interval(node)\n tree[start:end] = node\n return tree\n\n\ndef default_def(filename, lineno):\n return filename + \":\" + str(lineno)\n\n\n@lru_cache(maxsize=8192)\ndef filename_and_lineno_to_def(filename, lineno):\n if filename[0] == \"<\" and filename[-1] == \">\":\n return default_def(filename, lineno)\n\n try:\n matches = file_to_tree(filename)[lineno]\n except (IOError, OSError, SyntaxError):\n return default_def(filename, lineno)\n if matches:\n return min(matches, key=lambda i: i.length()).data.name\n\n return default_def(filename, lineno)\n", "path": "ddtrace/profiling/_line2def.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\nimport ast\n\nimport intervaltree\n\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n # This is for Python\u00a02 but Python\u00a02 does not use this module.\n # It's just useful for unit tests.\n def lru_cache(maxsize):\n def w(f):\n return f\n\n return w\n\n\ntry:\n # Python\u00a02 does not have this.\n from tokenize import open as source_open\nexcept ImportError:\n source_open = open\n\nfrom ddtrace.vendor import six\n\n\ndef _compute_interval(node):\n min_lineno = node.lineno\n max_lineno = node.lineno\n for node in ast.walk(node):\n if hasattr(node, \"lineno\"):\n min_lineno = min(min_lineno, node.lineno)\n max_lineno = max(max_lineno, node.lineno)\n return (min_lineno, max_lineno + 1)\n\n\nif six.PY3:\n _DEFS = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)\nelse:\n _DEFS = (ast.FunctionDef, ast.ClassDef)\n\n\n@lru_cache(maxsize=256)\ndef file_to_tree(filename):\n # Use tokenize.open to detect encoding\n with source_open(filename) as f:\n parsed = ast.parse(f.read(), filename=filename)\n tree = intervaltree.IntervalTree()\n for node in ast.walk(parsed):\n if isinstance(node, _DEFS):\n start, end = _compute_interval(node)\n tree[start:end] = node\n return tree\n\n\ndef default_def(filename, lineno):\n return str(filename) + \":\" + str(lineno)\n\n\n@lru_cache(maxsize=8192)\ndef filename_and_lineno_to_def(filename, lineno):\n if not filename or (filename[0] == \"<\" and filename[-1] == \">\"):\n return default_def(filename, lineno)\n\n try:\n matches = file_to_tree(filename)[lineno]\n except (IOError, OSError, SyntaxError):\n return default_def(filename, lineno)\n if matches:\n return min(matches, key=lambda i: i.length()).data.name\n\n return default_def(filename, lineno)\n", "path": "ddtrace/profiling/_line2def.py"}]}
1,159
175
gh_patches_debug_6153
rasdani/github-patches
git_diff
bramstroker__homeassistant-powercalc-2027
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade to Python 3.12 After HA 2024.2.0 is release we could upgrade alle python dependencies to Python 3.12 Also all github action can run on python 3.12 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/powercalc/sensors/utility_meter.py` Content: ``` 1 from __future__ import annotations 2 3 import inspect 4 import logging 5 from decimal import Decimal 6 from typing import cast 7 8 import homeassistant.helpers.entity_registry as er 9 from homeassistant.components.select import DOMAIN as SELECT_DOMAIN 10 from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN 11 from homeassistant.components.utility_meter.const import ( 12 DATA_TARIFF_SENSORS, 13 DATA_UTILITY, 14 ) 15 from homeassistant.components.utility_meter.select import TariffSelect 16 from homeassistant.components.utility_meter.sensor import UtilityMeterSensor 17 from homeassistant.core import HomeAssistant 18 from homeassistant.helpers.entity_component import EntityComponent 19 from homeassistant.helpers.typing import StateType 20 21 from custom_components.powercalc.const import ( 22 CONF_CREATE_UTILITY_METERS, 23 CONF_ENERGY_SENSOR_PRECISION, 24 CONF_IGNORE_UNAVAILABLE_STATE, CONF_UTILITY_METER_OFFSET, 25 CONF_UTILITY_METER_TARIFFS, 26 CONF_UTILITY_METER_TYPES, 27 DEFAULT_ENERGY_SENSOR_PRECISION, 28 DOMAIN, 29 ) 30 31 from .abstract import BaseEntity 32 from .energy import EnergySensor, RealEnergySensor 33 34 _LOGGER = logging.getLogger(__name__) 35 36 GENERAL_TARIFF = "general" 37 38 39 async def create_utility_meters( 40 hass: HomeAssistant, 41 energy_sensor: EnergySensor, 42 sensor_config: dict, 43 net_consumption: bool = False, 44 ) -> list[VirtualUtilityMeter]: 45 """Create the utility meters.""" 46 if not sensor_config.get(CONF_CREATE_UTILITY_METERS): 47 return [] 48 49 utility_meters = [] 50 51 if DATA_UTILITY not in hass.data: # pragma: no cover 52 hass.data[DATA_UTILITY] = {} 53 54 tariffs = sensor_config.get(CONF_UTILITY_METER_TARIFFS) 55 meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES) 56 for meter_type in meter_types: # type: ignore 57 tariff_sensors = [] 58 59 name = f"{energy_sensor.name} {meter_type}" 60 entity_id = f"{energy_sensor.entity_id}_{meter_type}" 61 unique_id = None 62 if energy_sensor.unique_id: 63 unique_id = f"{energy_sensor.unique_id}_{meter_type}" 64 65 # Prevent duplicate creation of utility meter. See #1322 66 if isinstance(energy_sensor, RealEnergySensor) and unique_id: 67 entity_registry = er.async_get(hass) 68 existing_entity_id = entity_registry.async_get_entity_id( 69 domain=SENSOR_DOMAIN, 70 platform=DOMAIN, 71 unique_id=unique_id, 72 ) 73 if existing_entity_id and hass.states.get(existing_entity_id): 74 continue # pragma: no cover 75 76 # Create generic utility meter (no specific tariffs) 77 if not tariffs or GENERAL_TARIFF in tariffs: 78 utility_meter = await create_utility_meter( 79 energy_sensor.entity_id, 80 entity_id, 81 name, 82 sensor_config, 83 meter_type, 84 unique_id, 85 net_consumption=net_consumption, 86 ) 87 tariff_sensors.append(utility_meter) 88 utility_meters.append(utility_meter) 89 90 # Create utility meter for each tariff, and the tariff select entity which allows you to select a tariff. 91 if tariffs: 92 filtered_tariffs = tariffs = [ 93 t for t in list(tariffs) if t != GENERAL_TARIFF 94 ] 95 tariff_select = await create_tariff_select( 96 filtered_tariffs, 97 hass, 98 name, 99 unique_id, 100 ) 101 102 for tariff in filtered_tariffs: 103 utility_meter = await create_utility_meter( 104 energy_sensor.entity_id, 105 entity_id, 106 name, 107 sensor_config, 108 meter_type, 109 unique_id, 110 tariff, 111 tariff_select.entity_id, 112 ) 113 tariff_sensors.append(utility_meter) 114 utility_meters.append(utility_meter) 115 116 hass.data[DATA_UTILITY][entity_id] = {DATA_TARIFF_SENSORS: tariff_sensors} 117 118 return utility_meters 119 120 121 async def create_tariff_select( 122 tariffs: list, 123 hass: HomeAssistant, 124 name: str, 125 unique_id: str | None, 126 ) -> TariffSelect: 127 """Create tariff selection entity.""" 128 _LOGGER.debug("Creating utility_meter tariff select: %s", name) 129 130 select_component = cast(EntityComponent, hass.data[SELECT_DOMAIN]) 131 select_unique_id = None 132 if unique_id: 133 select_unique_id = f"{unique_id}_select" 134 135 tariff_select = TariffSelect( 136 name, 137 tariffs, 138 select_unique_id, 139 ) 140 141 await select_component.async_add_entities([tariff_select]) 142 143 return tariff_select 144 145 146 async def create_utility_meter( 147 source_entity: str, 148 entity_id: str, 149 name: str, 150 sensor_config: dict, 151 meter_type: str, 152 unique_id: str | None = None, 153 tariff: str | None = None, 154 tariff_entity: str | None = None, 155 net_consumption: bool = False, 156 ) -> VirtualUtilityMeter: 157 """Create a utility meter entity, one per tariff.""" 158 parent_meter = entity_id 159 if tariff: 160 name = f"{name} {tariff}" 161 entity_id = f"{entity_id}_{tariff}" 162 if unique_id: 163 unique_id = f"{unique_id}_{tariff}" 164 165 _LOGGER.debug("Creating utility_meter sensor: %s (entity_id=%s)", name, entity_id) 166 167 params = { 168 "source_entity": source_entity, 169 "name": name, 170 "meter_type": meter_type, 171 "meter_offset": sensor_config.get(CONF_UTILITY_METER_OFFSET), 172 "net_consumption": net_consumption, 173 "tariff": tariff, 174 "tariff_entity": tariff_entity, 175 } 176 177 signature = inspect.signature(UtilityMeterSensor.__init__) 178 if "parent_meter" in signature.parameters: 179 params["parent_meter"] = parent_meter 180 if "delta_values" in signature.parameters: 181 params["delta_values"] = False 182 if "unique_id" in signature.parameters: 183 params["unique_id"] = unique_id 184 if "cron_pattern" in signature.parameters: 185 params["cron_pattern"] = None 186 if "periodically_resetting" in signature.parameters: 187 params["periodically_resetting"] = False 188 if "sensor_always_available" in signature.parameters: 189 params["sensor_always_available"] = sensor_config.get(CONF_IGNORE_UNAVAILABLE_STATE) or False 190 191 utility_meter = VirtualUtilityMeter(**params) 192 utility_meter.rounding_digits = sensor_config.get(CONF_ENERGY_SENSOR_PRECISION) # type: ignore 193 utility_meter.entity_id = entity_id 194 195 return utility_meter 196 197 198 class VirtualUtilityMeter(UtilityMeterSensor, BaseEntity): # type: ignore 199 rounding_digits: int = DEFAULT_ENERGY_SENSOR_PRECISION 200 201 @property 202 def unique_id(self) -> str | None: 203 """Return the unique id.""" 204 return self._attr_unique_id 205 206 @property 207 def native_value(self) -> Decimal | StateType: 208 """Return the state of the sensor.""" 209 if self.rounding_digits and self._state is not None: 210 return round(self._state, self.rounding_digits) 211 212 return self._state 213 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/powercalc/sensors/utility_meter.py b/custom_components/powercalc/sensors/utility_meter.py --- a/custom_components/powercalc/sensors/utility_meter.py +++ b/custom_components/powercalc/sensors/utility_meter.py @@ -21,7 +21,8 @@ from custom_components.powercalc.const import ( CONF_CREATE_UTILITY_METERS, CONF_ENERGY_SENSOR_PRECISION, - CONF_IGNORE_UNAVAILABLE_STATE, CONF_UTILITY_METER_OFFSET, + CONF_IGNORE_UNAVAILABLE_STATE, + CONF_UTILITY_METER_OFFSET, CONF_UTILITY_METER_TARIFFS, CONF_UTILITY_METER_TYPES, DEFAULT_ENERGY_SENSOR_PRECISION,
{"golden_diff": "diff --git a/custom_components/powercalc/sensors/utility_meter.py b/custom_components/powercalc/sensors/utility_meter.py\n--- a/custom_components/powercalc/sensors/utility_meter.py\n+++ b/custom_components/powercalc/sensors/utility_meter.py\n@@ -21,7 +21,8 @@\n from custom_components.powercalc.const import (\n CONF_CREATE_UTILITY_METERS,\n CONF_ENERGY_SENSOR_PRECISION,\n- CONF_IGNORE_UNAVAILABLE_STATE, CONF_UTILITY_METER_OFFSET,\n+ CONF_IGNORE_UNAVAILABLE_STATE,\n+ CONF_UTILITY_METER_OFFSET,\n CONF_UTILITY_METER_TARIFFS,\n CONF_UTILITY_METER_TYPES,\n DEFAULT_ENERGY_SENSOR_PRECISION,\n", "issue": "Upgrade to Python 3.12\nAfter HA 2024.2.0 is release we could upgrade alle python dependencies to Python 3.12\r\nAlso all github action can run on python 3.12\n", "before_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport logging\nfrom decimal import Decimal\nfrom typing import cast\n\nimport homeassistant.helpers.entity_registry as er\nfrom homeassistant.components.select import DOMAIN as SELECT_DOMAIN\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.utility_meter.const import (\n DATA_TARIFF_SENSORS,\n DATA_UTILITY,\n)\nfrom homeassistant.components.utility_meter.select import TariffSelect\nfrom homeassistant.components.utility_meter.sensor import UtilityMeterSensor\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.helpers.typing import StateType\n\nfrom custom_components.powercalc.const import (\n CONF_CREATE_UTILITY_METERS,\n CONF_ENERGY_SENSOR_PRECISION,\n CONF_IGNORE_UNAVAILABLE_STATE, CONF_UTILITY_METER_OFFSET,\n CONF_UTILITY_METER_TARIFFS,\n CONF_UTILITY_METER_TYPES,\n DEFAULT_ENERGY_SENSOR_PRECISION,\n DOMAIN,\n)\n\nfrom .abstract import BaseEntity\nfrom .energy import EnergySensor, RealEnergySensor\n\n_LOGGER = logging.getLogger(__name__)\n\nGENERAL_TARIFF = \"general\"\n\n\nasync def create_utility_meters(\n hass: HomeAssistant,\n energy_sensor: EnergySensor,\n sensor_config: dict,\n net_consumption: bool = False,\n) -> list[VirtualUtilityMeter]:\n \"\"\"Create the utility meters.\"\"\"\n if not sensor_config.get(CONF_CREATE_UTILITY_METERS):\n return []\n\n utility_meters = []\n\n if DATA_UTILITY not in hass.data: # pragma: no cover\n hass.data[DATA_UTILITY] = {}\n\n tariffs = sensor_config.get(CONF_UTILITY_METER_TARIFFS)\n meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES)\n for meter_type in meter_types: # type: ignore\n tariff_sensors = []\n\n name = f\"{energy_sensor.name} {meter_type}\"\n entity_id = f\"{energy_sensor.entity_id}_{meter_type}\"\n unique_id = None\n if energy_sensor.unique_id:\n unique_id = f\"{energy_sensor.unique_id}_{meter_type}\"\n\n # Prevent duplicate creation of utility meter. See #1322\n if isinstance(energy_sensor, RealEnergySensor) and unique_id:\n entity_registry = er.async_get(hass)\n existing_entity_id = entity_registry.async_get_entity_id(\n domain=SENSOR_DOMAIN,\n platform=DOMAIN,\n unique_id=unique_id,\n )\n if existing_entity_id and hass.states.get(existing_entity_id):\n continue # pragma: no cover\n\n # Create generic utility meter (no specific tariffs)\n if not tariffs or GENERAL_TARIFF in tariffs:\n utility_meter = await create_utility_meter(\n energy_sensor.entity_id,\n entity_id,\n name,\n sensor_config,\n meter_type,\n unique_id,\n net_consumption=net_consumption,\n )\n tariff_sensors.append(utility_meter)\n utility_meters.append(utility_meter)\n\n # Create utility meter for each tariff, and the tariff select entity which allows you to select a tariff.\n if tariffs:\n filtered_tariffs = tariffs = [\n t for t in list(tariffs) if t != GENERAL_TARIFF\n ]\n tariff_select = await create_tariff_select(\n filtered_tariffs,\n hass,\n name,\n unique_id,\n )\n\n for tariff in filtered_tariffs:\n utility_meter = await create_utility_meter(\n energy_sensor.entity_id,\n entity_id,\n name,\n sensor_config,\n meter_type,\n unique_id,\n tariff,\n tariff_select.entity_id,\n )\n tariff_sensors.append(utility_meter)\n utility_meters.append(utility_meter)\n\n hass.data[DATA_UTILITY][entity_id] = {DATA_TARIFF_SENSORS: tariff_sensors}\n\n return utility_meters\n\n\nasync def create_tariff_select(\n tariffs: list,\n hass: HomeAssistant,\n name: str,\n unique_id: str | None,\n) -> TariffSelect:\n \"\"\"Create tariff selection entity.\"\"\"\n _LOGGER.debug(\"Creating utility_meter tariff select: %s\", name)\n\n select_component = cast(EntityComponent, hass.data[SELECT_DOMAIN])\n select_unique_id = None\n if unique_id:\n select_unique_id = f\"{unique_id}_select\"\n\n tariff_select = TariffSelect(\n name,\n tariffs,\n select_unique_id,\n )\n\n await select_component.async_add_entities([tariff_select])\n\n return tariff_select\n\n\nasync def create_utility_meter(\n source_entity: str,\n entity_id: str,\n name: str,\n sensor_config: dict,\n meter_type: str,\n unique_id: str | None = None,\n tariff: str | None = None,\n tariff_entity: str | None = None,\n net_consumption: bool = False,\n) -> VirtualUtilityMeter:\n \"\"\"Create a utility meter entity, one per tariff.\"\"\"\n parent_meter = entity_id\n if tariff:\n name = f\"{name} {tariff}\"\n entity_id = f\"{entity_id}_{tariff}\"\n if unique_id:\n unique_id = f\"{unique_id}_{tariff}\"\n\n _LOGGER.debug(\"Creating utility_meter sensor: %s (entity_id=%s)\", name, entity_id)\n\n params = {\n \"source_entity\": source_entity,\n \"name\": name,\n \"meter_type\": meter_type,\n \"meter_offset\": sensor_config.get(CONF_UTILITY_METER_OFFSET),\n \"net_consumption\": net_consumption,\n \"tariff\": tariff,\n \"tariff_entity\": tariff_entity,\n }\n\n signature = inspect.signature(UtilityMeterSensor.__init__)\n if \"parent_meter\" in signature.parameters:\n params[\"parent_meter\"] = parent_meter\n if \"delta_values\" in signature.parameters:\n params[\"delta_values\"] = False\n if \"unique_id\" in signature.parameters:\n params[\"unique_id\"] = unique_id\n if \"cron_pattern\" in signature.parameters:\n params[\"cron_pattern\"] = None\n if \"periodically_resetting\" in signature.parameters:\n params[\"periodically_resetting\"] = False\n if \"sensor_always_available\" in signature.parameters:\n params[\"sensor_always_available\"] = sensor_config.get(CONF_IGNORE_UNAVAILABLE_STATE) or False\n\n utility_meter = VirtualUtilityMeter(**params)\n utility_meter.rounding_digits = sensor_config.get(CONF_ENERGY_SENSOR_PRECISION) # type: ignore\n utility_meter.entity_id = entity_id\n\n return utility_meter\n\n\nclass VirtualUtilityMeter(UtilityMeterSensor, BaseEntity): # type: ignore\n rounding_digits: int = DEFAULT_ENERGY_SENSOR_PRECISION\n\n @property\n def unique_id(self) -> str | None:\n \"\"\"Return the unique id.\"\"\"\n return self._attr_unique_id\n\n @property\n def native_value(self) -> Decimal | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n if self.rounding_digits and self._state is not None:\n return round(self._state, self.rounding_digits)\n\n return self._state\n", "path": "custom_components/powercalc/sensors/utility_meter.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport logging\nfrom decimal import Decimal\nfrom typing import cast\n\nimport homeassistant.helpers.entity_registry as er\nfrom homeassistant.components.select import DOMAIN as SELECT_DOMAIN\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.utility_meter.const import (\n DATA_TARIFF_SENSORS,\n DATA_UTILITY,\n)\nfrom homeassistant.components.utility_meter.select import TariffSelect\nfrom homeassistant.components.utility_meter.sensor import UtilityMeterSensor\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.helpers.typing import StateType\n\nfrom custom_components.powercalc.const import (\n CONF_CREATE_UTILITY_METERS,\n CONF_ENERGY_SENSOR_PRECISION,\n CONF_IGNORE_UNAVAILABLE_STATE,\n CONF_UTILITY_METER_OFFSET,\n CONF_UTILITY_METER_TARIFFS,\n CONF_UTILITY_METER_TYPES,\n DEFAULT_ENERGY_SENSOR_PRECISION,\n DOMAIN,\n)\n\nfrom .abstract import BaseEntity\nfrom .energy import EnergySensor, RealEnergySensor\n\n_LOGGER = logging.getLogger(__name__)\n\nGENERAL_TARIFF = \"general\"\n\n\nasync def create_utility_meters(\n hass: HomeAssistant,\n energy_sensor: EnergySensor,\n sensor_config: dict,\n net_consumption: bool = False,\n) -> list[VirtualUtilityMeter]:\n \"\"\"Create the utility meters.\"\"\"\n if not sensor_config.get(CONF_CREATE_UTILITY_METERS):\n return []\n\n utility_meters = []\n\n if DATA_UTILITY not in hass.data: # pragma: no cover\n hass.data[DATA_UTILITY] = {}\n\n tariffs = sensor_config.get(CONF_UTILITY_METER_TARIFFS)\n meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES)\n for meter_type in meter_types: # type: ignore\n tariff_sensors = []\n\n name = f\"{energy_sensor.name} {meter_type}\"\n entity_id = f\"{energy_sensor.entity_id}_{meter_type}\"\n unique_id = None\n if energy_sensor.unique_id:\n unique_id = f\"{energy_sensor.unique_id}_{meter_type}\"\n\n # Prevent duplicate creation of utility meter. See #1322\n if isinstance(energy_sensor, RealEnergySensor) and unique_id:\n entity_registry = er.async_get(hass)\n existing_entity_id = entity_registry.async_get_entity_id(\n domain=SENSOR_DOMAIN,\n platform=DOMAIN,\n unique_id=unique_id,\n )\n if existing_entity_id and hass.states.get(existing_entity_id):\n continue # pragma: no cover\n\n # Create generic utility meter (no specific tariffs)\n if not tariffs or GENERAL_TARIFF in tariffs:\n utility_meter = await create_utility_meter(\n energy_sensor.entity_id,\n entity_id,\n name,\n sensor_config,\n meter_type,\n unique_id,\n net_consumption=net_consumption,\n )\n tariff_sensors.append(utility_meter)\n utility_meters.append(utility_meter)\n\n # Create utility meter for each tariff, and the tariff select entity which allows you to select a tariff.\n if tariffs:\n filtered_tariffs = tariffs = [\n t for t in list(tariffs) if t != GENERAL_TARIFF\n ]\n tariff_select = await create_tariff_select(\n filtered_tariffs,\n hass,\n name,\n unique_id,\n )\n\n for tariff in filtered_tariffs:\n utility_meter = await create_utility_meter(\n energy_sensor.entity_id,\n entity_id,\n name,\n sensor_config,\n meter_type,\n unique_id,\n tariff,\n tariff_select.entity_id,\n )\n tariff_sensors.append(utility_meter)\n utility_meters.append(utility_meter)\n\n hass.data[DATA_UTILITY][entity_id] = {DATA_TARIFF_SENSORS: tariff_sensors}\n\n return utility_meters\n\n\nasync def create_tariff_select(\n tariffs: list,\n hass: HomeAssistant,\n name: str,\n unique_id: str | None,\n) -> TariffSelect:\n \"\"\"Create tariff selection entity.\"\"\"\n _LOGGER.debug(\"Creating utility_meter tariff select: %s\", name)\n\n select_component = cast(EntityComponent, hass.data[SELECT_DOMAIN])\n select_unique_id = None\n if unique_id:\n select_unique_id = f\"{unique_id}_select\"\n\n tariff_select = TariffSelect(\n name,\n tariffs,\n select_unique_id,\n )\n\n await select_component.async_add_entities([tariff_select])\n\n return tariff_select\n\n\nasync def create_utility_meter(\n source_entity: str,\n entity_id: str,\n name: str,\n sensor_config: dict,\n meter_type: str,\n unique_id: str | None = None,\n tariff: str | None = None,\n tariff_entity: str | None = None,\n net_consumption: bool = False,\n) -> VirtualUtilityMeter:\n \"\"\"Create a utility meter entity, one per tariff.\"\"\"\n parent_meter = entity_id\n if tariff:\n name = f\"{name} {tariff}\"\n entity_id = f\"{entity_id}_{tariff}\"\n if unique_id:\n unique_id = f\"{unique_id}_{tariff}\"\n\n _LOGGER.debug(\"Creating utility_meter sensor: %s (entity_id=%s)\", name, entity_id)\n\n params = {\n \"source_entity\": source_entity,\n \"name\": name,\n \"meter_type\": meter_type,\n \"meter_offset\": sensor_config.get(CONF_UTILITY_METER_OFFSET),\n \"net_consumption\": net_consumption,\n \"tariff\": tariff,\n \"tariff_entity\": tariff_entity,\n }\n\n signature = inspect.signature(UtilityMeterSensor.__init__)\n if \"parent_meter\" in signature.parameters:\n params[\"parent_meter\"] = parent_meter\n if \"delta_values\" in signature.parameters:\n params[\"delta_values\"] = False\n if \"unique_id\" in signature.parameters:\n params[\"unique_id\"] = unique_id\n if \"cron_pattern\" in signature.parameters:\n params[\"cron_pattern\"] = None\n if \"periodically_resetting\" in signature.parameters:\n params[\"periodically_resetting\"] = False\n if \"sensor_always_available\" in signature.parameters:\n params[\"sensor_always_available\"] = sensor_config.get(CONF_IGNORE_UNAVAILABLE_STATE) or False\n\n utility_meter = VirtualUtilityMeter(**params)\n utility_meter.rounding_digits = sensor_config.get(CONF_ENERGY_SENSOR_PRECISION) # type: ignore\n utility_meter.entity_id = entity_id\n\n return utility_meter\n\n\nclass VirtualUtilityMeter(UtilityMeterSensor, BaseEntity): # type: ignore\n rounding_digits: int = DEFAULT_ENERGY_SENSOR_PRECISION\n\n @property\n def unique_id(self) -> str | None:\n \"\"\"Return the unique id.\"\"\"\n return self._attr_unique_id\n\n @property\n def native_value(self) -> Decimal | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n if self.rounding_digits and self._state is not None:\n return round(self._state, self.rounding_digits)\n\n return self._state\n", "path": "custom_components/powercalc/sensors/utility_meter.py"}]}
2,367
143
gh_patches_debug_17993
rasdani/github-patches
git_diff
modin-project__modin-1532
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update Ray to 0.8.5 Ray 0.8.5 was released, we should test and update. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 import versioneer 3 import os 4 from setuptools.dist import Distribution 5 6 try: 7 from wheel.bdist_wheel import bdist_wheel 8 9 HAS_WHEEL = True 10 except ImportError: 11 HAS_WHEEL = False 12 13 with open("README.md", "r") as fh: 14 long_description = fh.read() 15 16 if HAS_WHEEL: 17 18 class ModinWheel(bdist_wheel): 19 def finalize_options(self): 20 bdist_wheel.finalize_options(self) 21 self.root_is_pure = False 22 23 def get_tag(self): 24 _, _, plat = bdist_wheel.get_tag(self) 25 py = "py3" 26 abi = "none" 27 return py, abi, plat 28 29 30 class ModinDistribution(Distribution): 31 def __init__(self, *attrs): 32 Distribution.__init__(self, *attrs) 33 if HAS_WHEEL: 34 self.cmdclass["bdist_wheel"] = ModinWheel 35 36 def is_pure(self): 37 return False 38 39 40 dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"] 41 ray_deps = ["ray==0.8.4", "pyarrow<0.17"] 42 if "SETUP_PLAT_NAME" in os.environ: 43 if "win" in os.environ["SETUP_PLAT_NAME"]: 44 all_deps = dask_deps 45 else: 46 all_deps = dask_deps + ray_deps 47 else: 48 all_deps = dask_deps if os.name == "nt" else dask_deps + ray_deps 49 50 setup( 51 name="modin", 52 version=versioneer.get_version(), 53 cmdclass=versioneer.get_cmdclass(), 54 distclass=ModinDistribution, 55 description="Modin: Make your pandas code run faster by changing one line of code.", 56 packages=find_packages(), 57 license="Apache 2", 58 url="https://github.com/modin-project/modin", 59 long_description=long_description, 60 long_description_content_type="text/markdown", 61 install_requires=["pandas==1.0.3", "packaging"], 62 extras_require={ 63 # can be installed by pip install modin[dask] 64 "dask": dask_deps, 65 "ray": ray_deps, 66 "all": all_deps, 67 }, 68 python_requires=">=3.5", 69 ) 70 ``` Path: `modin/__init__.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 import os 15 import sys 16 import warnings 17 from packaging import version 18 19 from ._version import get_versions 20 21 22 def custom_formatwarning(msg, category, *args, **kwargs): 23 # ignore everything except the message 24 return "{}: {}\n".format(category.__name__, msg) 25 26 27 warnings.formatwarning = custom_formatwarning 28 # Filter numpy version warnings because they are not relevant 29 warnings.filterwarnings("ignore", message="numpy.dtype size changed") 30 warnings.filterwarnings("ignore", message="Large object of size") 31 warnings.filterwarnings( 32 "ignore", 33 message="The pandas.datetime class is deprecated and will be removed from pandas in a future version. " 34 "Import from datetime module instead.", 35 ) 36 37 38 def get_execution_engine(): 39 # In the future, when there are multiple engines and different ways of 40 # backing the DataFrame, there will have to be some changed logic here to 41 # decide these things. In the meantime, we will use the currently supported 42 # execution engine + backing (Pandas + Ray). 43 if "MODIN_ENGINE" in os.environ: 44 # .title allows variants like ray, RAY, Ray 45 return os.environ["MODIN_ENGINE"].title() 46 else: 47 if "MODIN_DEBUG" in os.environ: 48 return "Python" 49 else: 50 if sys.platform != "win32": 51 try: 52 import ray 53 54 except ImportError: 55 pass 56 else: 57 if version.parse(ray.__version__) != version.parse("0.8.4"): 58 raise ImportError( 59 "Please `pip install modin[ray]` to install compatible Ray version." 60 ) 61 return "Ray" 62 try: 63 import dask 64 import distributed 65 66 except ImportError: 67 raise ImportError( 68 "Please `pip install {}modin[dask]` to install an engine".format( 69 "modin[ray]` or `" if sys.platform != "win32" else "" 70 ) 71 ) 72 else: 73 if version.parse(dask.__version__) < version.parse( 74 "2.1.0" 75 ) or version.parse(distributed.__version__) < version.parse("2.3.2"): 76 raise ImportError( 77 "Please `pip install modin[dask]` to install compatible Dask version." 78 ) 79 return "Dask" 80 81 82 def get_partition_format(): 83 # See note above about engine + backing. 84 return os.environ.get("MODIN_BACKEND", "Pandas").title() 85 86 87 __version__ = "0.6.3" 88 __execution_engine__ = get_execution_engine() 89 __partition_format__ = get_partition_format() 90 91 # We don't want these used outside of this file. 92 del get_execution_engine 93 del get_partition_format 94 95 __version__ = get_versions()["version"] 96 del get_versions 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modin/__init__.py b/modin/__init__.py --- a/modin/__init__.py +++ b/modin/__init__.py @@ -54,7 +54,7 @@ except ImportError: pass else: - if version.parse(ray.__version__) != version.parse("0.8.4"): + if version.parse(ray.__version__) != version.parse("0.8.5"): raise ImportError( "Please `pip install modin[ray]` to install compatible Ray version." ) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"] -ray_deps = ["ray==0.8.4", "pyarrow<0.17"] +ray_deps = ["ray==0.8.5", "pyarrow<0.17"] if "SETUP_PLAT_NAME" in os.environ: if "win" in os.environ["SETUP_PLAT_NAME"]: all_deps = dask_deps
{"golden_diff": "diff --git a/modin/__init__.py b/modin/__init__.py\n--- a/modin/__init__.py\n+++ b/modin/__init__.py\n@@ -54,7 +54,7 @@\n except ImportError:\n pass\n else:\n- if version.parse(ray.__version__) != version.parse(\"0.8.4\"):\n+ if version.parse(ray.__version__) != version.parse(\"0.8.5\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,7 +38,7 @@\n \n \n dask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\n-ray_deps = [\"ray==0.8.4\", \"pyarrow<0.17\"]\n+ray_deps = [\"ray==0.8.5\", \"pyarrow<0.17\"]\n if \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n", "issue": "Update Ray to 0.8.5\nRay 0.8.5 was released, we should test and update.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport os\nfrom setuptools.dist import Distribution\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n HAS_WHEEL = True\nexcept ImportError:\n HAS_WHEEL = False\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif HAS_WHEEL:\n\n class ModinWheel(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n _, _, plat = bdist_wheel.get_tag(self)\n py = \"py3\"\n abi = \"none\"\n return py, abi, plat\n\n\nclass ModinDistribution(Distribution):\n def __init__(self, *attrs):\n Distribution.__init__(self, *attrs)\n if HAS_WHEEL:\n self.cmdclass[\"bdist_wheel\"] = ModinWheel\n\n def is_pure(self):\n return False\n\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.4\", \"pyarrow<0.17\"]\nif \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n else:\n all_deps = dask_deps + ray_deps\nelse:\n all_deps = dask_deps if os.name == \"nt\" else dask_deps + ray_deps\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n distclass=ModinDistribution,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport warnings\nfrom packaging import version\n\nfrom ._version import get_versions\n\n\ndef custom_formatwarning(msg, category, *args, **kwargs):\n # ignore everything except the message\n return \"{}: {}\\n\".format(category.__name__, msg)\n\n\nwarnings.formatwarning = custom_formatwarning\n# Filter numpy version warnings because they are not relevant\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"Large object of size\")\nwarnings.filterwarnings(\n \"ignore\",\n message=\"The pandas.datetime class is deprecated and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n)\n\n\ndef get_execution_engine():\n # In the future, when there are multiple engines and different ways of\n # backing the DataFrame, there will have to be some changed logic here to\n # decide these things. In the meantime, we will use the currently supported\n # execution engine + backing (Pandas + Ray).\n if \"MODIN_ENGINE\" in os.environ:\n # .title allows variants like ray, RAY, Ray\n return os.environ[\"MODIN_ENGINE\"].title()\n else:\n if \"MODIN_DEBUG\" in os.environ:\n return \"Python\"\n else:\n if sys.platform != \"win32\":\n try:\n import ray\n\n except ImportError:\n pass\n else:\n if version.parse(ray.__version__) != version.parse(\"0.8.4\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\n return \"Ray\"\n try:\n import dask\n import distributed\n\n except ImportError:\n raise ImportError(\n \"Please `pip install {}modin[dask]` to install an engine\".format(\n \"modin[ray]` or `\" if sys.platform != \"win32\" else \"\"\n )\n )\n else:\n if version.parse(dask.__version__) < version.parse(\n \"2.1.0\"\n ) or version.parse(distributed.__version__) < version.parse(\"2.3.2\"):\n raise ImportError(\n \"Please `pip install modin[dask]` to install compatible Dask version.\"\n )\n return \"Dask\"\n\n\ndef get_partition_format():\n # See note above about engine + backing.\n return os.environ.get(\"MODIN_BACKEND\", \"Pandas\").title()\n\n\n__version__ = \"0.6.3\"\n__execution_engine__ = get_execution_engine()\n__partition_format__ = get_partition_format()\n\n# We don't want these used outside of this file.\ndel get_execution_engine\ndel get_partition_format\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n", "path": "modin/__init__.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport os\nfrom setuptools.dist import Distribution\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n HAS_WHEEL = True\nexcept ImportError:\n HAS_WHEEL = False\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif HAS_WHEEL:\n\n class ModinWheel(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n _, _, plat = bdist_wheel.get_tag(self)\n py = \"py3\"\n abi = \"none\"\n return py, abi, plat\n\n\nclass ModinDistribution(Distribution):\n def __init__(self, *attrs):\n Distribution.__init__(self, *attrs)\n if HAS_WHEEL:\n self.cmdclass[\"bdist_wheel\"] = ModinWheel\n\n def is_pure(self):\n return False\n\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.5\", \"pyarrow<0.17\"]\nif \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n else:\n all_deps = dask_deps + ray_deps\nelse:\n all_deps = dask_deps if os.name == \"nt\" else dask_deps + ray_deps\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n distclass=ModinDistribution,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport warnings\nfrom packaging import version\n\nfrom ._version import get_versions\n\n\ndef custom_formatwarning(msg, category, *args, **kwargs):\n # ignore everything except the message\n return \"{}: {}\\n\".format(category.__name__, msg)\n\n\nwarnings.formatwarning = custom_formatwarning\n# Filter numpy version warnings because they are not relevant\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"Large object of size\")\nwarnings.filterwarnings(\n \"ignore\",\n message=\"The pandas.datetime class is deprecated and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n)\n\n\ndef get_execution_engine():\n # In the future, when there are multiple engines and different ways of\n # backing the DataFrame, there will have to be some changed logic here to\n # decide these things. In the meantime, we will use the currently supported\n # execution engine + backing (Pandas + Ray).\n if \"MODIN_ENGINE\" in os.environ:\n # .title allows variants like ray, RAY, Ray\n return os.environ[\"MODIN_ENGINE\"].title()\n else:\n if \"MODIN_DEBUG\" in os.environ:\n return \"Python\"\n else:\n if sys.platform != \"win32\":\n try:\n import ray\n\n except ImportError:\n pass\n else:\n if version.parse(ray.__version__) != version.parse(\"0.8.5\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\n return \"Ray\"\n try:\n import dask\n import distributed\n\n except ImportError:\n raise ImportError(\n \"Please `pip install {}modin[dask]` to install an engine\".format(\n \"modin[ray]` or `\" if sys.platform != \"win32\" else \"\"\n )\n )\n else:\n if version.parse(dask.__version__) < version.parse(\n \"2.1.0\"\n ) or version.parse(distributed.__version__) < version.parse(\"2.3.2\"):\n raise ImportError(\n \"Please `pip install modin[dask]` to install compatible Dask version.\"\n )\n return \"Dask\"\n\n\ndef get_partition_format():\n # See note above about engine + backing.\n return os.environ.get(\"MODIN_BACKEND\", \"Pandas\").title()\n\n\n__version__ = \"0.6.3\"\n__execution_engine__ = get_execution_engine()\n__partition_format__ = get_partition_format()\n\n# We don't want these used outside of this file.\ndel get_execution_engine\ndel get_partition_format\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n", "path": "modin/__init__.py"}]}
1,867
254
gh_patches_debug_19392
rasdani/github-patches
git_diff
CTFd__CTFd-889
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Imports may need to go through the application factory process again If the CTF has plugins that create tables, those tables may not be re-created when importing/resetting. This might be leading to issues where tables don't exist in the database but are referred to by plugins. Imports may need to go through the application factory process again If the CTF has plugins that create tables, those tables may not be re-created when importing/resetting. This might be leading to issues where tables don't exist in the database but are referred to by plugins. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/utils/exports/__init__.py` Content: ``` 1 from CTFd.utils import get_app_config 2 from CTFd.utils.migrations import get_current_revision, create_database, drop_database, upgrade, stamp 3 from CTFd.utils.uploads import get_uploader 4 from CTFd.models import db 5 from CTFd.cache import cache 6 from datafreeze.format import SERIALIZERS 7 from flask import current_app as app 8 from datafreeze.format.fjson import JSONSerializer, JSONEncoder 9 import dataset 10 import datafreeze 11 import datetime 12 import json 13 import os 14 import re 15 import six 16 import zipfile 17 import tempfile 18 19 20 class CTFdSerializer(JSONSerializer): 21 """ 22 Slightly modified datafreeze serializer so that we can properly 23 export the CTFd database into a zip file. 24 """ 25 26 def close(self): 27 for path, result in self.buckets.items(): 28 result = self.wrap(result) 29 30 if self.fileobj is None: 31 fh = open(path, 'wb') 32 else: 33 fh = self.fileobj 34 35 data = json.dumps(result, 36 cls=JSONEncoder, 37 indent=self.export.get_int('indent')) 38 39 callback = self.export.get('callback') 40 if callback: 41 data = "%s && %s(%s);" % (callback, callback, data) 42 43 if six.PY3: 44 fh.write(bytes(data, encoding='utf-8')) 45 else: 46 fh.write(data) 47 if self.fileobj is None: 48 fh.close() 49 50 51 SERIALIZERS['ctfd'] = CTFdSerializer # Load the custom serializer 52 53 54 def export_ctf(): 55 # TODO: For some unknown reason dataset is only able to see alembic_version during tests. 56 # Even using a real sqlite database. This makes this test impossible to pass in sqlite. 57 db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI')) 58 59 # Backup database 60 backup = tempfile.NamedTemporaryFile() 61 62 backup_zip = zipfile.ZipFile(backup, 'w') 63 64 tables = db.tables 65 for table in tables: 66 result = db[table].all() 67 result_file = six.BytesIO() 68 datafreeze.freeze(result, format='ctfd', fileobj=result_file) 69 result_file.seek(0) 70 backup_zip.writestr('db/{}.json'.format(table), result_file.read()) 71 72 # # Guarantee that alembic_version is saved into the export 73 if 'alembic_version' not in tables: 74 result = { 75 "count": 1, 76 "results": [ 77 { 78 "version_num": get_current_revision() 79 } 80 ], 81 "meta": {} 82 } 83 result_file = six.BytesIO() 84 json.dump(result, result_file) 85 result_file.seek(0) 86 backup_zip.writestr('db/alembic_version.json', result_file.read()) 87 88 # Backup uploads 89 uploader = get_uploader() 90 uploader.sync() 91 92 upload_folder = os.path.join(os.path.normpath(app.root_path), app.config.get('UPLOAD_FOLDER')) 93 for root, dirs, files in os.walk(upload_folder): 94 for file in files: 95 parent_dir = os.path.basename(root) 96 backup_zip.write(os.path.join(root, file), arcname=os.path.join('uploads', parent_dir, file)) 97 98 backup_zip.close() 99 backup.seek(0) 100 return backup 101 102 103 def import_ctf(backup, erase=True): 104 if not zipfile.is_zipfile(backup): 105 raise zipfile.BadZipfile 106 107 if erase: 108 drop_database() 109 create_database() 110 # We explicitly do not want to upgrade or stamp here. 111 # The import will have this information. 112 113 side_db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI')) 114 sqlite = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('sqlite') 115 postgres = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('postgres') 116 117 backup = zipfile.ZipFile(backup) 118 119 members = backup.namelist() 120 max_content_length = get_app_config('MAX_CONTENT_LENGTH') 121 for f in members: 122 if f.startswith('/') or '..' in f: 123 # Abort on malicious zip files 124 raise zipfile.BadZipfile 125 info = backup.getinfo(f) 126 if max_content_length: 127 if info.file_size > max_content_length: 128 raise zipfile.LargeZipFile 129 130 first = [ 131 'db/teams.json', 132 'db/users.json', 133 'db/challenges.json', 134 'db/dynamic_challenge.json', 135 136 'db/flags.json', 137 'db/hints.json', 138 'db/unlocks.json', 139 'db/awards.json', 140 'db/tags.json', 141 142 'db/submissions.json', 143 'db/solves.json', 144 145 'db/files.json', 146 147 'db/notifications.json', 148 'db/pages.json', 149 150 'db/tracking.json', 151 'db/config.json', 152 ] 153 154 for item in first: 155 if item in members: 156 members.remove(item) 157 158 members = first + members 159 160 alembic_version = json.loads(backup.open('db/alembic_version.json').read())["results"][0]["version_num"] 161 upgrade(revision=alembic_version) 162 members.remove('db/alembic_version.json') 163 164 for member in members: 165 if member.startswith('db/'): 166 table_name = member[3:-5] 167 168 try: 169 # Try to open a file but skip if it doesn't exist. 170 data = backup.open(member).read() 171 except KeyError: 172 continue 173 174 if data: 175 table = side_db[table_name] 176 177 saved = json.loads(data) 178 for entry in saved['results']: 179 # This is a hack to get SQLite to properly accept datetime values from dataset 180 # See Issue #246 181 if sqlite: 182 for k, v in entry.items(): 183 if isinstance(v, six.string_types): 184 match = re.match(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d", v) 185 if match: 186 entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f') 187 continue 188 match = re.match(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", v) 189 if match: 190 entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S') 191 continue 192 table.insert(entry) 193 db.session.commit() 194 if postgres: 195 # TODO: This should be sanitized even though exports are basically SQL dumps 196 # Databases are so hard 197 # https://stackoverflow.com/a/37972960 198 side_db.engine.execute( 199 "SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM {table_name}".format( 200 table_name=table_name 201 ) 202 ) 203 204 # Extracting files 205 files = [f for f in backup.namelist() if f.startswith('uploads/')] 206 uploader = get_uploader() 207 for f in files: 208 filename = f.split(os.sep, 1) 209 210 if len(filename) < 2: # just an empty uploads directory (e.g. uploads/) 211 continue 212 213 filename = filename[1] # Get the second entry in the list (the actual filename) 214 source = backup.open(f) 215 uploader.store(fileobj=source, filename=filename) 216 217 cache.clear() 218 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/utils/exports/__init__.py b/CTFd/utils/exports/__init__.py --- a/CTFd/utils/exports/__init__.py +++ b/CTFd/utils/exports/__init__.py @@ -6,6 +6,7 @@ from datafreeze.format import SERIALIZERS from flask import current_app as app from datafreeze.format.fjson import JSONSerializer, JSONEncoder +from sqlalchemy.exc import OperationalError import dataset import datafreeze import datetime @@ -159,6 +160,16 @@ alembic_version = json.loads(backup.open('db/alembic_version.json').read())["results"][0]["version_num"] upgrade(revision=alembic_version) + + # Create tables created by plugins + try: + app.db.create_all() + except OperationalError as e: + if not postgres: + raise e + else: + print("Allowing error during app.db.create_all() due to Postgres") + members.remove('db/alembic_version.json') for member in members:
{"golden_diff": "diff --git a/CTFd/utils/exports/__init__.py b/CTFd/utils/exports/__init__.py\n--- a/CTFd/utils/exports/__init__.py\n+++ b/CTFd/utils/exports/__init__.py\n@@ -6,6 +6,7 @@\n from datafreeze.format import SERIALIZERS\n from flask import current_app as app\n from datafreeze.format.fjson import JSONSerializer, JSONEncoder\n+from sqlalchemy.exc import OperationalError\n import dataset\n import datafreeze\n import datetime\n@@ -159,6 +160,16 @@\n \n alembic_version = json.loads(backup.open('db/alembic_version.json').read())[\"results\"][0][\"version_num\"]\n upgrade(revision=alembic_version)\n+\n+ # Create tables created by plugins\n+ try:\n+ app.db.create_all()\n+ except OperationalError as e:\n+ if not postgres:\n+ raise e\n+ else:\n+ print(\"Allowing error during app.db.create_all() due to Postgres\")\n+\n members.remove('db/alembic_version.json')\n \n for member in members:\n", "issue": "Imports may need to go through the application factory process again\nIf the CTF has plugins that create tables, those tables may not be re-created when importing/resetting. This might be leading to issues where tables don't exist in the database but are referred to by plugins. \nImports may need to go through the application factory process again\nIf the CTF has plugins that create tables, those tables may not be re-created when importing/resetting. This might be leading to issues where tables don't exist in the database but are referred to by plugins. \n", "before_files": [{"content": "from CTFd.utils import get_app_config\nfrom CTFd.utils.migrations import get_current_revision, create_database, drop_database, upgrade, stamp\nfrom CTFd.utils.uploads import get_uploader\nfrom CTFd.models import db\nfrom CTFd.cache import cache\nfrom datafreeze.format import SERIALIZERS\nfrom flask import current_app as app\nfrom datafreeze.format.fjson import JSONSerializer, JSONEncoder\nimport dataset\nimport datafreeze\nimport datetime\nimport json\nimport os\nimport re\nimport six\nimport zipfile\nimport tempfile\n\n\nclass CTFdSerializer(JSONSerializer):\n \"\"\"\n Slightly modified datafreeze serializer so that we can properly\n export the CTFd database into a zip file.\n \"\"\"\n\n def close(self):\n for path, result in self.buckets.items():\n result = self.wrap(result)\n\n if self.fileobj is None:\n fh = open(path, 'wb')\n else:\n fh = self.fileobj\n\n data = json.dumps(result,\n cls=JSONEncoder,\n indent=self.export.get_int('indent'))\n\n callback = self.export.get('callback')\n if callback:\n data = \"%s && %s(%s);\" % (callback, callback, data)\n\n if six.PY3:\n fh.write(bytes(data, encoding='utf-8'))\n else:\n fh.write(data)\n if self.fileobj is None:\n fh.close()\n\n\nSERIALIZERS['ctfd'] = CTFdSerializer # Load the custom serializer\n\n\ndef export_ctf():\n # TODO: For some unknown reason dataset is only able to see alembic_version during tests.\n # Even using a real sqlite database. This makes this test impossible to pass in sqlite.\n db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI'))\n\n # Backup database\n backup = tempfile.NamedTemporaryFile()\n\n backup_zip = zipfile.ZipFile(backup, 'w')\n\n tables = db.tables\n for table in tables:\n result = db[table].all()\n result_file = six.BytesIO()\n datafreeze.freeze(result, format='ctfd', fileobj=result_file)\n result_file.seek(0)\n backup_zip.writestr('db/{}.json'.format(table), result_file.read())\n\n # # Guarantee that alembic_version is saved into the export\n if 'alembic_version' not in tables:\n result = {\n \"count\": 1,\n \"results\": [\n {\n \"version_num\": get_current_revision()\n }\n ],\n \"meta\": {}\n }\n result_file = six.BytesIO()\n json.dump(result, result_file)\n result_file.seek(0)\n backup_zip.writestr('db/alembic_version.json', result_file.read())\n\n # Backup uploads\n uploader = get_uploader()\n uploader.sync()\n\n upload_folder = os.path.join(os.path.normpath(app.root_path), app.config.get('UPLOAD_FOLDER'))\n for root, dirs, files in os.walk(upload_folder):\n for file in files:\n parent_dir = os.path.basename(root)\n backup_zip.write(os.path.join(root, file), arcname=os.path.join('uploads', parent_dir, file))\n\n backup_zip.close()\n backup.seek(0)\n return backup\n\n\ndef import_ctf(backup, erase=True):\n if not zipfile.is_zipfile(backup):\n raise zipfile.BadZipfile\n\n if erase:\n drop_database()\n create_database()\n # We explicitly do not want to upgrade or stamp here.\n # The import will have this information.\n\n side_db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI'))\n sqlite = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('sqlite')\n postgres = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('postgres')\n\n backup = zipfile.ZipFile(backup)\n\n members = backup.namelist()\n max_content_length = get_app_config('MAX_CONTENT_LENGTH')\n for f in members:\n if f.startswith('/') or '..' in f:\n # Abort on malicious zip files\n raise zipfile.BadZipfile\n info = backup.getinfo(f)\n if max_content_length:\n if info.file_size > max_content_length:\n raise zipfile.LargeZipFile\n\n first = [\n 'db/teams.json',\n 'db/users.json',\n 'db/challenges.json',\n 'db/dynamic_challenge.json',\n\n 'db/flags.json',\n 'db/hints.json',\n 'db/unlocks.json',\n 'db/awards.json',\n 'db/tags.json',\n\n 'db/submissions.json',\n 'db/solves.json',\n\n 'db/files.json',\n\n 'db/notifications.json',\n 'db/pages.json',\n\n 'db/tracking.json',\n 'db/config.json',\n ]\n\n for item in first:\n if item in members:\n members.remove(item)\n\n members = first + members\n\n alembic_version = json.loads(backup.open('db/alembic_version.json').read())[\"results\"][0][\"version_num\"]\n upgrade(revision=alembic_version)\n members.remove('db/alembic_version.json')\n\n for member in members:\n if member.startswith('db/'):\n table_name = member[3:-5]\n\n try:\n # Try to open a file but skip if it doesn't exist.\n data = backup.open(member).read()\n except KeyError:\n continue\n\n if data:\n table = side_db[table_name]\n\n saved = json.loads(data)\n for entry in saved['results']:\n # This is a hack to get SQLite to properly accept datetime values from dataset\n # See Issue #246\n if sqlite:\n for k, v in entry.items():\n if isinstance(v, six.string_types):\n match = re.match(r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d\", v)\n if match:\n entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f')\n continue\n match = re.match(r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\", v)\n if match:\n entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')\n continue\n table.insert(entry)\n db.session.commit()\n if postgres:\n # TODO: This should be sanitized even though exports are basically SQL dumps\n # Databases are so hard\n # https://stackoverflow.com/a/37972960\n side_db.engine.execute(\n \"SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM {table_name}\".format(\n table_name=table_name\n )\n )\n\n # Extracting files\n files = [f for f in backup.namelist() if f.startswith('uploads/')]\n uploader = get_uploader()\n for f in files:\n filename = f.split(os.sep, 1)\n\n if len(filename) < 2: # just an empty uploads directory (e.g. uploads/)\n continue\n\n filename = filename[1] # Get the second entry in the list (the actual filename)\n source = backup.open(f)\n uploader.store(fileobj=source, filename=filename)\n\n cache.clear()\n", "path": "CTFd/utils/exports/__init__.py"}], "after_files": [{"content": "from CTFd.utils import get_app_config\nfrom CTFd.utils.migrations import get_current_revision, create_database, drop_database, upgrade, stamp\nfrom CTFd.utils.uploads import get_uploader\nfrom CTFd.models import db\nfrom CTFd.cache import cache\nfrom datafreeze.format import SERIALIZERS\nfrom flask import current_app as app\nfrom datafreeze.format.fjson import JSONSerializer, JSONEncoder\nfrom sqlalchemy.exc import OperationalError\nimport dataset\nimport datafreeze\nimport datetime\nimport json\nimport os\nimport re\nimport six\nimport zipfile\nimport tempfile\n\n\nclass CTFdSerializer(JSONSerializer):\n \"\"\"\n Slightly modified datafreeze serializer so that we can properly\n export the CTFd database into a zip file.\n \"\"\"\n\n def close(self):\n for path, result in self.buckets.items():\n result = self.wrap(result)\n\n if self.fileobj is None:\n fh = open(path, 'wb')\n else:\n fh = self.fileobj\n\n data = json.dumps(result,\n cls=JSONEncoder,\n indent=self.export.get_int('indent'))\n\n callback = self.export.get('callback')\n if callback:\n data = \"%s && %s(%s);\" % (callback, callback, data)\n\n if six.PY3:\n fh.write(bytes(data, encoding='utf-8'))\n else:\n fh.write(data)\n if self.fileobj is None:\n fh.close()\n\n\nSERIALIZERS['ctfd'] = CTFdSerializer # Load the custom serializer\n\n\ndef export_ctf():\n # TODO: For some unknown reason dataset is only able to see alembic_version during tests.\n # Even using a real sqlite database. This makes this test impossible to pass in sqlite.\n db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI'))\n\n # Backup database\n backup = tempfile.NamedTemporaryFile()\n\n backup_zip = zipfile.ZipFile(backup, 'w')\n\n tables = db.tables\n for table in tables:\n result = db[table].all()\n result_file = six.BytesIO()\n datafreeze.freeze(result, format='ctfd', fileobj=result_file)\n result_file.seek(0)\n backup_zip.writestr('db/{}.json'.format(table), result_file.read())\n\n # # Guarantee that alembic_version is saved into the export\n if 'alembic_version' not in tables:\n result = {\n \"count\": 1,\n \"results\": [\n {\n \"version_num\": get_current_revision()\n }\n ],\n \"meta\": {}\n }\n result_file = six.BytesIO()\n json.dump(result, result_file)\n result_file.seek(0)\n backup_zip.writestr('db/alembic_version.json', result_file.read())\n\n # Backup uploads\n uploader = get_uploader()\n uploader.sync()\n\n upload_folder = os.path.join(os.path.normpath(app.root_path), app.config.get('UPLOAD_FOLDER'))\n for root, dirs, files in os.walk(upload_folder):\n for file in files:\n parent_dir = os.path.basename(root)\n backup_zip.write(os.path.join(root, file), arcname=os.path.join('uploads', parent_dir, file))\n\n backup_zip.close()\n backup.seek(0)\n return backup\n\n\ndef import_ctf(backup, erase=True):\n if not zipfile.is_zipfile(backup):\n raise zipfile.BadZipfile\n\n if erase:\n drop_database()\n create_database()\n # We explicitly do not want to upgrade or stamp here.\n # The import will have this information.\n\n side_db = dataset.connect(get_app_config('SQLALCHEMY_DATABASE_URI'))\n sqlite = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('sqlite')\n postgres = get_app_config('SQLALCHEMY_DATABASE_URI').startswith('postgres')\n\n backup = zipfile.ZipFile(backup)\n\n members = backup.namelist()\n max_content_length = get_app_config('MAX_CONTENT_LENGTH')\n for f in members:\n if f.startswith('/') or '..' in f:\n # Abort on malicious zip files\n raise zipfile.BadZipfile\n info = backup.getinfo(f)\n if max_content_length:\n if info.file_size > max_content_length:\n raise zipfile.LargeZipFile\n\n first = [\n 'db/teams.json',\n 'db/users.json',\n 'db/challenges.json',\n 'db/dynamic_challenge.json',\n\n 'db/flags.json',\n 'db/hints.json',\n 'db/unlocks.json',\n 'db/awards.json',\n 'db/tags.json',\n\n 'db/submissions.json',\n 'db/solves.json',\n\n 'db/files.json',\n\n 'db/notifications.json',\n 'db/pages.json',\n\n 'db/tracking.json',\n 'db/config.json',\n ]\n\n for item in first:\n if item in members:\n members.remove(item)\n\n members = first + members\n\n alembic_version = json.loads(backup.open('db/alembic_version.json').read())[\"results\"][0][\"version_num\"]\n upgrade(revision=alembic_version)\n\n # Create tables created by plugins\n try:\n app.db.create_all()\n except OperationalError as e:\n if not postgres:\n raise e\n else:\n print(\"Allowing error during app.db.create_all() due to Postgres\")\n\n members.remove('db/alembic_version.json')\n\n for member in members:\n if member.startswith('db/'):\n table_name = member[3:-5]\n\n try:\n # Try to open a file but skip if it doesn't exist.\n data = backup.open(member).read()\n except KeyError:\n continue\n\n if data:\n table = side_db[table_name]\n\n saved = json.loads(data)\n for entry in saved['results']:\n # This is a hack to get SQLite to properly accept datetime values from dataset\n # See Issue #246\n if sqlite:\n for k, v in entry.items():\n if isinstance(v, six.string_types):\n match = re.match(r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d\", v)\n if match:\n entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f')\n continue\n match = re.match(r\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\", v)\n if match:\n entry[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')\n continue\n table.insert(entry)\n db.session.commit()\n if postgres:\n # TODO: This should be sanitized even though exports are basically SQL dumps\n # Databases are so hard\n # https://stackoverflow.com/a/37972960\n side_db.engine.execute(\n \"SELECT setval(pg_get_serial_sequence('{table_name}', 'id'), coalesce(max(id)+1,1), false) FROM {table_name}\".format(\n table_name=table_name\n )\n )\n\n # Extracting files\n files = [f for f in backup.namelist() if f.startswith('uploads/')]\n uploader = get_uploader()\n for f in files:\n filename = f.split(os.sep, 1)\n\n if len(filename) < 2: # just an empty uploads directory (e.g. uploads/)\n continue\n\n filename = filename[1] # Get the second entry in the list (the actual filename)\n source = backup.open(f)\n uploader.store(fileobj=source, filename=filename)\n\n cache.clear()\n", "path": "CTFd/utils/exports/__init__.py"}]}
2,533
248
gh_patches_debug_9011
rasdani/github-patches
git_diff
freedomofpress__securedrop-5827
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 1.8.0 translation feedback: Focal upgrade warning ## Description AO [suggested](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=c775f74150ac3872) changing `A manual update is urgently required to remain safe.` to `A manual upgrade is urgently required to remain safe.` in: ``` <strong>Critical Security:</strong>&nbsp;&nbsp;The operating system used by your SecureDrop servers will reach its end-of-life on April 30, 2021. A manual update is urgently required to remain safe. Please contact your adminstrator. <a href="//securedrop.org/xenial-eol" rel="noreferrer">Learn More</a> ``` as well as [here](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=60e0284e103e2c51): ``` <strong>Critical Security:</strong>&nbsp;&nbsp;The operating system used by your SecureDrop servers has reached its end-of-life. A manual update is required to re-enable the Source Interface and remain safe. Please contact your administrator. <a href="//securedrop.org/xenial-eol" rel="noreferrer">Learn More</a> ``` There's also a typo in the first string: "adminstrator" needs another "i". --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/journalist_app/col.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from pathlib import Path 4 5 from flask import ( 6 Blueprint, 7 abort, 8 current_app, 9 flash, 10 g, 11 redirect, 12 render_template, 13 request, 14 send_file, 15 url_for, 16 Markup, 17 escape, 18 ) 19 import werkzeug 20 from flask_babel import gettext 21 from sqlalchemy.orm.exc import NoResultFound 22 23 from db import db 24 from models import Reply, Submission 25 from journalist_app.forms import ReplyForm 26 from journalist_app.utils import (make_star_true, make_star_false, get_source, 27 delete_collection, col_download_unread, 28 col_download_all, col_star, col_un_star, 29 col_delete, col_delete_data, mark_seen) 30 from sdconfig import SDConfig 31 32 33 def make_blueprint(config: SDConfig) -> Blueprint: 34 view = Blueprint('col', __name__) 35 36 @view.route('/add_star/<filesystem_id>', methods=('POST',)) 37 def add_star(filesystem_id: str) -> werkzeug.Response: 38 make_star_true(filesystem_id) 39 db.session.commit() 40 return redirect(url_for('main.index')) 41 42 @view.route("/remove_star/<filesystem_id>", methods=('POST',)) 43 def remove_star(filesystem_id: str) -> werkzeug.Response: 44 make_star_false(filesystem_id) 45 db.session.commit() 46 return redirect(url_for('main.index')) 47 48 @view.route('/<filesystem_id>') 49 def col(filesystem_id: str) -> str: 50 form = ReplyForm() 51 source = get_source(filesystem_id) 52 source.has_key = current_app.crypto_util.get_fingerprint(filesystem_id) 53 return render_template("col.html", filesystem_id=filesystem_id, 54 source=source, form=form) 55 56 @view.route('/delete/<filesystem_id>', methods=('POST',)) 57 def delete_single(filesystem_id: str) -> werkzeug.Response: 58 """deleting a single collection from its /col page""" 59 source = get_source(filesystem_id) 60 try: 61 delete_collection(filesystem_id) 62 except ValueError as e: 63 current_app.logger.error("error deleting collection: %s", e) 64 abort(500) 65 66 flash( 67 Markup( 68 "<b>{}</b> {}".format( 69 # Translators: Here, "Success!" appears before a message 70 # confirming the success of an operation. 71 escape(gettext("Success!")), 72 escape(gettext( 73 "The account and data for the source {} has been deleted.").format( 74 source.journalist_designation)) 75 ) 76 ), 'success') 77 78 return redirect(url_for('main.index')) 79 80 @view.route('/process', methods=('POST',)) 81 def process() -> werkzeug.Response: 82 actions = {'download-unread': col_download_unread, 83 'download-all': col_download_all, 'star': col_star, 84 'un-star': col_un_star, 'delete': col_delete, 85 'delete-data': col_delete_data} 86 if 'cols_selected' not in request.form: 87 flash( 88 Markup("<b>{}</b> {}".format( 89 # Translators: Here, "Nothing Selected" appears before a message 90 # asking the user to select one or more items. 91 escape(gettext('Nothing Selected')), 92 escape(gettext('You must select one or more items.')) 93 ) 94 ), 'error') 95 return redirect(url_for('main.index')) 96 97 # getlist is cgi.FieldStorage.getlist 98 cols_selected = request.form.getlist('cols_selected') 99 action = request.form['action'] 100 101 if action not in actions: 102 return abort(500) 103 104 method = actions[action] 105 return method(cols_selected) 106 107 @view.route('/<filesystem_id>/<fn>') 108 def download_single_file(filesystem_id: str, fn: str) -> werkzeug.Response: 109 """ 110 Marks the file being download (the file being downloaded is either a submission message, 111 submission file attachement, or journalist reply) as seen by the current logged-in user and 112 send the file to a client to be saved or opened. 113 """ 114 if '..' in fn or fn.startswith('/'): 115 abort(404) 116 117 file = current_app.storage.path(filesystem_id, fn) 118 if not Path(file).is_file(): 119 flash( 120 gettext( 121 "Your download failed because a file could not be found. An admin can find " 122 + "more information in the system and monitoring logs." 123 ), 124 "error" 125 ) 126 current_app.logger.error("File {} not found".format(file)) 127 return redirect(url_for("col.col", filesystem_id=filesystem_id)) 128 129 # mark as seen by the current user 130 try: 131 journalist = g.get("user") 132 if fn.endswith("reply.gpg"): 133 reply = Reply.query.filter(Reply.filename == fn).one() 134 mark_seen([reply], journalist) 135 elif fn.endswith("-doc.gz.gpg") or fn.endswith("doc.zip.gpg"): 136 file = Submission.query.filter(Submission.filename == fn).one() 137 mark_seen([file], journalist) 138 else: 139 message = Submission.query.filter(Submission.filename == fn).one() 140 mark_seen([message], journalist) 141 except NoResultFound as e: 142 current_app.logger.error("Could not mark {} as seen: {}".format(fn, e)) 143 144 return send_file(current_app.storage.path(filesystem_id, fn), 145 mimetype="application/pgp-encrypted") 146 147 return view 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py --- a/securedrop/journalist_app/col.py +++ b/securedrop/journalist_app/col.py @@ -70,7 +70,7 @@ # confirming the success of an operation. escape(gettext("Success!")), escape(gettext( - "The account and data for the source {} has been deleted.").format( + "The account and data for the source {} have been deleted.").format( source.journalist_designation)) ) ), 'success')
{"golden_diff": "diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py\n--- a/securedrop/journalist_app/col.py\n+++ b/securedrop/journalist_app/col.py\n@@ -70,7 +70,7 @@\n # confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\n- \"The account and data for the source {} has been deleted.\").format(\n+ \"The account and data for the source {} have been deleted.\").format(\n source.journalist_designation))\n )\n ), 'success')\n", "issue": "1.8.0 translation feedback: Focal upgrade warning\n## Description\r\n\r\nAO [suggested](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=c775f74150ac3872) changing `A manual update is urgently required to remain safe.` to `A manual upgrade is urgently required to remain safe.` in:\r\n\r\n```\r\n<strong>Critical Security:</strong>&nbsp;&nbsp;The operating system used by your SecureDrop servers \r\nwill reach its end-of-life on April 30, 2021. A manual update is urgently required to remain safe. \r\nPlease contact your adminstrator. <a href=\"//securedrop.org/xenial-eol\" rel=\"noreferrer\">Learn More</a>\r\n```\r\n\r\nas well as [here](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=60e0284e103e2c51):\r\n```\r\n<strong>Critical Security:</strong>&nbsp;&nbsp;The operating system used by your SecureDrop servers \r\nhas reached its end-of-life. A manual update is required to re-enable the Source Interface and remain safe. \r\nPlease contact your administrator. <a href=\"//securedrop.org/xenial-eol\" rel=\"noreferrer\">Learn More</a>\r\n```\r\n\r\nThere's also a typo in the first string: \"adminstrator\" needs another \"i\".\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\n\nfrom flask import (\n Blueprint,\n abort,\n current_app,\n flash,\n g,\n redirect,\n render_template,\n request,\n send_file,\n url_for,\n Markup,\n escape,\n)\nimport werkzeug\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Reply, Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete, col_delete_data, mark_seen)\nfrom sdconfig import SDConfig\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id: str) -> werkzeug.Response:\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id: str) -> werkzeug.Response:\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id: str) -> str:\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.get_fingerprint(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id: str) -> werkzeug.Response:\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n try:\n delete_collection(filesystem_id)\n except ValueError as e:\n current_app.logger.error(\"error deleting collection: %s\", e)\n abort(500)\n\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Here, \"Success!\" appears before a message\n # confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\n \"The account and data for the source {} has been deleted.\").format(\n source.journalist_designation))\n )\n ), 'success')\n\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process() -> werkzeug.Response:\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete,\n 'delete-data': col_delete_data}\n if 'cols_selected' not in request.form:\n flash(\n Markup(\"<b>{}</b> {}\".format(\n # Translators: Here, \"Nothing Selected\" appears before a message\n # asking the user to select one or more items.\n escape(gettext('Nothing Selected')),\n escape(gettext('You must select one or more items.'))\n )\n ), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_file(filesystem_id: str, fn: str) -> werkzeug.Response:\n \"\"\"\n Marks the file being download (the file being downloaded is either a submission message,\n submission file attachement, or journalist reply) as seen by the current logged-in user and\n send the file to a client to be saved or opened.\n \"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n file = current_app.storage.path(filesystem_id, fn)\n if not Path(file).is_file():\n flash(\n gettext(\n \"Your download failed because a file could not be found. An admin can find \"\n + \"more information in the system and monitoring logs.\"\n ),\n \"error\"\n )\n current_app.logger.error(\"File {} not found\".format(file))\n return redirect(url_for(\"col.col\", filesystem_id=filesystem_id))\n\n # mark as seen by the current user\n try:\n journalist = g.get(\"user\")\n if fn.endswith(\"reply.gpg\"):\n reply = Reply.query.filter(Reply.filename == fn).one()\n mark_seen([reply], journalist)\n elif fn.endswith(\"-doc.gz.gpg\") or fn.endswith(\"doc.zip.gpg\"):\n file = Submission.query.filter(Submission.filename == fn).one()\n mark_seen([file], journalist)\n else:\n message = Submission.query.filter(Submission.filename == fn).one()\n mark_seen([message], journalist)\n except NoResultFound as e:\n current_app.logger.error(\"Could not mark {} as seen: {}\".format(fn, e))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\n\nfrom flask import (\n Blueprint,\n abort,\n current_app,\n flash,\n g,\n redirect,\n render_template,\n request,\n send_file,\n url_for,\n Markup,\n escape,\n)\nimport werkzeug\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Reply, Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete, col_delete_data, mark_seen)\nfrom sdconfig import SDConfig\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id: str) -> werkzeug.Response:\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id: str) -> werkzeug.Response:\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id: str) -> str:\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.get_fingerprint(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id: str) -> werkzeug.Response:\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n try:\n delete_collection(filesystem_id)\n except ValueError as e:\n current_app.logger.error(\"error deleting collection: %s\", e)\n abort(500)\n\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Here, \"Success!\" appears before a message\n # confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\n \"The account and data for the source {} have been deleted.\").format(\n source.journalist_designation))\n )\n ), 'success')\n\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process() -> werkzeug.Response:\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete,\n 'delete-data': col_delete_data}\n if 'cols_selected' not in request.form:\n flash(\n Markup(\"<b>{}</b> {}\".format(\n # Translators: Here, \"Nothing Selected\" appears before a message\n # asking the user to select one or more items.\n escape(gettext('Nothing Selected')),\n escape(gettext('You must select one or more items.'))\n )\n ), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_file(filesystem_id: str, fn: str) -> werkzeug.Response:\n \"\"\"\n Marks the file being download (the file being downloaded is either a submission message,\n submission file attachement, or journalist reply) as seen by the current logged-in user and\n send the file to a client to be saved or opened.\n \"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n file = current_app.storage.path(filesystem_id, fn)\n if not Path(file).is_file():\n flash(\n gettext(\n \"Your download failed because a file could not be found. An admin can find \"\n + \"more information in the system and monitoring logs.\"\n ),\n \"error\"\n )\n current_app.logger.error(\"File {} not found\".format(file))\n return redirect(url_for(\"col.col\", filesystem_id=filesystem_id))\n\n # mark as seen by the current user\n try:\n journalist = g.get(\"user\")\n if fn.endswith(\"reply.gpg\"):\n reply = Reply.query.filter(Reply.filename == fn).one()\n mark_seen([reply], journalist)\n elif fn.endswith(\"-doc.gz.gpg\") or fn.endswith(\"doc.zip.gpg\"):\n file = Submission.query.filter(Submission.filename == fn).one()\n mark_seen([file], journalist)\n else:\n message = Submission.query.filter(Submission.filename == fn).one()\n mark_seen([message], journalist)\n except NoResultFound as e:\n current_app.logger.error(\"Could not mark {} as seen: {}\".format(fn, e))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}]}
2,061
135
gh_patches_debug_40179
rasdani/github-patches
git_diff
Project-MONAI__MONAI-3464
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `reduction` for `ContrastiveLoss` **Describe the bug** the error messages and docstring should be consistent https://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L89-L91 https://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L58 **Expected behavior** implementing the option `reduction="none"`? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `monai/losses/contrastive.py` Content: ``` 1 # Copyright 2020 - 2021 MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from typing import Union 13 14 import torch 15 from torch.nn import functional as F 16 from torch.nn.modules.loss import _Loss 17 18 from monai.utils import LossReduction 19 20 21 class ContrastiveLoss(_Loss): 22 23 """ 24 Compute the Contrastive loss defined in: 25 26 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International 27 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html) 28 29 Adapted from: 30 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5 31 32 """ 33 34 def __init__( 35 self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM 36 ) -> None: 37 """ 38 Args: 39 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5. 40 41 Raises: 42 AssertionError: When an input of dimension length > 2 is passed 43 AssertionError: When input and target are of different shapes 44 45 """ 46 super().__init__(reduction=LossReduction(reduction).value) 47 48 self.batch_size = batch_size 49 self.temperature = temperature 50 51 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: 52 """ 53 Args: 54 input: the shape should be B[F]. 55 target: the shape should be B[F]. 56 57 Raises: 58 ValueError: When ``self.reduction`` is not one of ["sum", "none"]. 59 """ 60 if len(target.shape) > 2 or len(input.shape) > 2: 61 raise AssertionError( 62 f"Either target or input has dimensions greater than 2 where target " 63 f"shape is ({target.shape}) and input shape is ({input.shape})" 64 ) 65 66 if target.shape != input.shape: 67 raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") 68 69 temperature_tensor = torch.tensor(self.temperature).to(input.device) 70 71 norm_i = F.normalize(input, dim=1) 72 norm_j = F.normalize(target, dim=1) 73 74 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool) 75 negatives_mask = torch.tensor(negatives_mask, dtype=torch.float) 76 negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device) 77 78 repr = torch.cat([norm_i, norm_j], dim=0) 79 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2) 80 sim_ij = torch.diag(sim_matrix, self.batch_size) 81 sim_ji = torch.diag(sim_matrix, -self.batch_size) 82 83 positives = torch.cat([sim_ij, sim_ji], dim=0) 84 nominator = torch.exp(positives / temperature_tensor) 85 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor) 86 87 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1)) 88 89 if self.reduction == LossReduction.SUM.value: 90 return torch.sum(loss_partial) / (2 * self.batch_size) 91 raise ValueError(f"Unsupported reduction: {self.reduction}, " f'available options are ["mean", "sum", "none"].') 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py --- a/monai/losses/contrastive.py +++ b/monai/losses/contrastive.py @@ -9,13 +9,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union - import torch from torch.nn import functional as F from torch.nn.modules.loss import _Loss -from monai.utils import LossReduction +from monai.utils import deprecated_arg class ContrastiveLoss(_Loss): @@ -31,19 +29,23 @@ """ - def __init__( - self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM - ) -> None: + @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.") + def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None: """ Args: temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5. + batch_size: The number of samples. Raises: - AssertionError: When an input of dimension length > 2 is passed - AssertionError: When input and target are of different shapes + ValueError: When an input of dimension length > 2 is passed + ValueError: When input and target are of different shapes + + .. deprecated:: 0.8.0 + + `reduction` is no longer supported. """ - super().__init__(reduction=LossReduction(reduction).value) + super().__init__() self.batch_size = batch_size self.temperature = temperature @@ -53,18 +55,15 @@ Args: input: the shape should be B[F]. target: the shape should be B[F]. - - Raises: - ValueError: When ``self.reduction`` is not one of ["sum", "none"]. """ if len(target.shape) > 2 or len(input.shape) > 2: - raise AssertionError( + raise ValueError( f"Either target or input has dimensions greater than 2 where target " f"shape is ({target.shape}) and input shape is ({input.shape})" ) if target.shape != input.shape: - raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") + raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})") temperature_tensor = torch.tensor(self.temperature).to(input.device) @@ -86,6 +85,4 @@ loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1)) - if self.reduction == LossReduction.SUM.value: - return torch.sum(loss_partial) / (2 * self.batch_size) - raise ValueError(f"Unsupported reduction: {self.reduction}, " f'available options are ["mean", "sum", "none"].') + return torch.sum(loss_partial) / (2 * self.batch_size)
{"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -9,13 +9,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Union\n-\n import torch\n from torch.nn import functional as F\n from torch.nn.modules.loss import _Loss\n \n-from monai.utils import LossReduction\n+from monai.utils import deprecated_arg\n \n \n class ContrastiveLoss(_Loss):\n@@ -31,19 +29,23 @@\n \n \"\"\"\n \n- def __init__(\n- self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM\n- ) -> None:\n+ @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n+ def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n+ batch_size: The number of samples.\n \n Raises:\n- AssertionError: When an input of dimension length > 2 is passed\n- AssertionError: When input and target are of different shapes\n+ ValueError: When an input of dimension length > 2 is passed\n+ ValueError: When input and target are of different shapes\n+\n+ .. deprecated:: 0.8.0\n+\n+ `reduction` is no longer supported.\n \n \"\"\"\n- super().__init__(reduction=LossReduction(reduction).value)\n+ super().__init__()\n \n self.batch_size = batch_size\n self.temperature = temperature\n@@ -53,18 +55,15 @@\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n-\n- Raises:\n- ValueError: When ``self.reduction`` is not one of [\"sum\", \"none\"].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n- raise AssertionError(\n+ raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n \n if target.shape != input.shape:\n- raise AssertionError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n+ raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n \n@@ -86,6 +85,4 @@\n \n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n \n- if self.reduction == LossReduction.SUM.value:\n- return torch.sum(loss_partial) / (2 * self.batch_size)\n- raise ValueError(f\"Unsupported reduction: {self.reduction}, \" f'available options are [\"mean\", \"sum\", \"none\"].')\n+ return torch.sum(loss_partial) / (2 * self.batch_size)\n", "issue": "`reduction` for `ContrastiveLoss`\n**Describe the bug**\r\nthe error messages and docstring should be consistent\r\nhttps://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L89-L91\r\n\r\nhttps://github.com/Project-MONAI/MONAI/blob/a7bc4a3cbaeaa3c505a25ca2ddf6922bda8ea7dc/monai/losses/contrastive.py#L58\r\n\r\n**Expected behavior**\r\nimplementing the option `reduction=\"none\"`?\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import LossReduction\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n def __init__(\n self, temperature: float = 0.5, batch_size: int = 1, reduction: Union[LossReduction, str] = LossReduction.SUM\n ) -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n\n Raises:\n AssertionError: When an input of dimension length > 2 is passed\n AssertionError: When input and target are of different shapes\n\n \"\"\"\n super().__init__(reduction=LossReduction(reduction).value)\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"sum\", \"none\"].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise AssertionError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise AssertionError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n if self.reduction == LossReduction.SUM.value:\n return torch.sum(loss_partial) / (2 * self.batch_size)\n raise ValueError(f\"Unsupported reduction: {self.reduction}, \" f'available options are [\"mean\", \"sum\", \"none\"].')\n", "path": "monai/losses/contrastive.py"}], "after_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}]}
1,507
738
gh_patches_debug_23457
rasdani/github-patches
git_diff
bokeh__bokeh-5817
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- deprecate bokeh.models.layouts HBox and VBox --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bokeh/models/layouts.py` Content: ``` 1 ''' Various kinds of layout components. 2 3 ''' 4 from __future__ import absolute_import 5 6 import logging 7 logger = logging.getLogger(__name__) 8 9 from ..core.enums import SizingMode 10 from ..core.has_props import abstract 11 from ..core.properties import Bool, Enum, Int, Instance, List, Seq, String 12 from ..core.validation import warning 13 from ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT 14 from ..embed import notebook_div 15 from ..model import Model 16 17 @abstract 18 class LayoutDOM(Model): 19 ''' An abstract base class for layout components. 20 21 ''' 22 23 width = Int(help=""" 24 An optional width for the component (in pixels). 25 """) 26 27 height = Int(help=""" 28 An optional height for the component (in pixels). 29 """) 30 31 disabled = Bool(False, help=""" 32 Whether the widget will be disabled when rendered. If ``True``, 33 the widget will be greyed-out, and not respond to UI events. 34 """) 35 36 sizing_mode = Enum(SizingMode, default="fixed", help=""" 37 How the item being displayed should size itself. Possible values are 38 ``"fixed"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"``, and 39 ``"stretch_both"``. 40 41 ``"stretch_both"`` elements are completely responsive (independently in width and height) and 42 will resize to occupy all available space, even if this changes the aspect ratio of the element. 43 This is sometimes called outside-in, and is a typical behavior for desktop applications. 44 45 ``"fixed"`` elements are not responsive. They will retain their original width and height 46 regardless of any subsequent browser window resize events. 47 48 ``"scale_width"`` elements will responsively resize to fit to the width available, *while 49 maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a 50 ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained. 51 52 ``"scale_height"`` elements will responsively resize to fit to the height available, *while 53 maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio 54 ``plot_width/plot_height`` is maintained. A plot with ``"scale_height"`` mode needs 55 to be wrapped in a ``Row`` or ``Column`` to be responsive. 56 57 ``"scale_both"`` elements will responsively resize to for both the width and height available, 58 *while maintaining the original aspect ratio*. 59 60 """) 61 62 css_classes = Seq(String, help=""" 63 A list of css class names to add to this DOM element. Note: the class names are 64 simply added as-is, no other guarantees are provided. 65 """) 66 67 @property 68 def html(self): 69 from IPython.core.display import HTML 70 return HTML(notebook_div(self)) 71 72 73 class Spacer(LayoutDOM): 74 ''' A container for space used to fill an empty spot in a row or column. 75 76 ''' 77 78 79 class WidgetBox(LayoutDOM): 80 ''' A container for widgets that are part of a layout. 81 82 ''' 83 def __init__(self, *args, **kwargs): 84 if len(args) > 0 and "children" in kwargs: 85 raise ValueError("'children' keyword cannot be used with positional arguments") 86 elif len(args) > 0: 87 kwargs["children"] = list(args) 88 super(WidgetBox, self).__init__(**kwargs) 89 90 @warning(EMPTY_LAYOUT) 91 def _check_empty_layout(self): 92 from itertools import chain 93 if not list(chain(self.children)): 94 return str(self) 95 96 @warning(BOTH_CHILD_AND_ROOT) 97 def _check_child_is_also_root(self): 98 problems = [] 99 for c in self.children: 100 if c.document is not None and c in c.document.roots: 101 problems.append(str(c)) 102 if problems: 103 return ", ".join(problems) 104 else: 105 return None 106 107 children = List(Instance('bokeh.models.widgets.Widget'), help=""" 108 The list of widgets to put in the layout box. 109 """) 110 111 112 @abstract 113 class Box(LayoutDOM): 114 ''' Abstract base class for Row and Column. Do not use directly. 115 116 ''' 117 118 def __init__(self, *args, **kwargs): 119 120 if len(args) > 0 and "children" in kwargs: 121 raise ValueError("'children' keyword cannot be used with positional arguments") 122 elif len(args) > 0: 123 kwargs["children"] = list(args) 124 125 unwrapped_children = kwargs.get("children", []) 126 kwargs["children"] = self._wrap_children(unwrapped_children) 127 super(Box, self).__init__(**kwargs) 128 129 def _wrap_children(self, children): 130 ''' Wrap any Widgets of a list of child layouts in a WidgetBox. 131 This allows for the convenience of just spelling Row(button1, button2). 132 ''' 133 from .widgets.widget import Widget 134 wrapped_children = [] 135 for child in children: 136 if isinstance(child, Widget): 137 child = WidgetBox( 138 children=[child], 139 sizing_mode=child.sizing_mode, 140 width=child.width, 141 height=child.height, 142 disabled=child.disabled 143 ) 144 wrapped_children.append(child) 145 return wrapped_children 146 147 @warning(EMPTY_LAYOUT) 148 def _check_empty_layout(self): 149 from itertools import chain 150 if not list(chain(self.children)): 151 return str(self) 152 153 @warning(BOTH_CHILD_AND_ROOT) 154 def _check_child_is_also_root(self): 155 problems = [] 156 for c in self.children: 157 if c.document is not None and c in c.document.roots: 158 problems.append(str(c)) 159 if problems: 160 return ", ".join(problems) 161 else: 162 return None 163 164 #TODO Debating the following instead to prevent people adding just a plain 165 # widget into a box, which sometimes works and sometimes looks disastrous 166 #children = List( 167 # Either( 168 # Instance('bokeh.models.layouts.Row'), 169 # Instance('bokeh.models.layouts.Column'), 170 # Instance('bokeh.models.plots.Plot'), 171 # Instance('bokeh.models.layouts.WidgetBox') 172 # ), help=""" 173 # The list of children, which can be other components including plots, rows, columns, and widgets. 174 #""") 175 children = List(Instance(LayoutDOM), help=""" 176 The list of children, which can be other components including plots, rows, columns, and widgets. 177 """) 178 179 180 class Row(Box): 181 ''' Lay out child components in a single horizontal row. 182 183 Children can be specified as positional arguments, as a single argument 184 that is a sequence, or using the ``children`` keyword argument. 185 ''' 186 187 188 class Column(Box): 189 ''' Lay out child components in a single vertical row. 190 191 Children can be specified as positional arguments, as a single argument 192 that is a sequence, or using the ``children`` keyword argument. 193 ''' 194 195 196 def HBox(*args, **kwargs): 197 ''' Lay out child components in a single horizontal row. 198 199 Children can be specified as positional arguments, as a single argument 200 that is a sequence, or using the ``children`` keyword argument. 201 202 Returns a Row instance. 203 ''' 204 return Row(*args, **kwargs) 205 206 207 def VBox(*args, **kwargs): 208 ''' Lay out child components in a single vertical row. 209 210 Children can be specified as positional arguments, as a single argument 211 that is a sequence, or using the ``children`` keyword argument. 212 213 Returns a Column instance. 214 ''' 215 return Column(*args, **kwargs) 216 ``` Path: `bokeh/models/widgets/panels.py` Content: ``` 1 ''' Various kinds of panel widgets. 2 3 ''' 4 from __future__ import absolute_import 5 6 from ...core.properties import Bool, Instance, Int, List, String 7 8 from ..callbacks import Callback 9 from ..layouts import LayoutDOM 10 11 from .widget import Widget 12 13 class Panel(Widget): 14 ''' A single-widget container with title bar and controls. 15 16 ''' 17 18 title = String(default="", help=""" 19 An optional text title of the panel. 20 """) 21 22 child = Instance(LayoutDOM, help=""" 23 The child widget. If you need more children, use a layout widget, 24 e.g. ``HBox`` or ``VBox``. 25 """) 26 27 closable = Bool(False, help=""" 28 Whether this panel is closeable or not. If True, an "x" button will 29 appear. 30 """) 31 32 class Tabs(Widget): 33 ''' A panel widget with navigation tabs. 34 35 ''' 36 37 __example__ = "sphinx/source/docs/user_guide/examples/interaction_tab_panes.py" 38 39 tabs = List(Instance(Panel), help=""" 40 The list of child panel widgets. 41 """) 42 43 active = Int(0, help=""" 44 The index of the active tab. 45 """) 46 47 callback = Instance(Callback, help=""" 48 A callback to run in the browser whenever the button is activated. 49 """) 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py --- a/bokeh/models/layouts.py +++ b/bokeh/models/layouts.py @@ -13,6 +13,7 @@ from ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT from ..embed import notebook_div from ..model import Model +from ..util.deprecation import deprecated @abstract class LayoutDOM(Model): @@ -201,6 +202,7 @@ Returns a Row instance. ''' + deprecated((0, 12, 5), 'bokeh.models.HBox', 'bokeh.models.Row') return Row(*args, **kwargs) @@ -212,4 +214,5 @@ Returns a Column instance. ''' + deprecated((0, 12, 5), 'bokeh.models.VBox', 'bokeh.models.Column') return Column(*args, **kwargs) diff --git a/bokeh/models/widgets/panels.py b/bokeh/models/widgets/panels.py --- a/bokeh/models/widgets/panels.py +++ b/bokeh/models/widgets/panels.py @@ -21,7 +21,7 @@ child = Instance(LayoutDOM, help=""" The child widget. If you need more children, use a layout widget, - e.g. ``HBox`` or ``VBox``. + e.g. ``Row`` or ``Column``. """) closable = Bool(False, help="""
{"golden_diff": "diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py\n--- a/bokeh/models/layouts.py\n+++ b/bokeh/models/layouts.py\n@@ -13,6 +13,7 @@\n from ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT\n from ..embed import notebook_div\n from ..model import Model\n+from ..util.deprecation import deprecated\n \n @abstract\n class LayoutDOM(Model):\n@@ -201,6 +202,7 @@\n \n Returns a Row instance.\n '''\n+ deprecated((0, 12, 5), 'bokeh.models.HBox', 'bokeh.models.Row')\n return Row(*args, **kwargs)\n \n \n@@ -212,4 +214,5 @@\n \n Returns a Column instance.\n '''\n+ deprecated((0, 12, 5), 'bokeh.models.VBox', 'bokeh.models.Column')\n return Column(*args, **kwargs)\ndiff --git a/bokeh/models/widgets/panels.py b/bokeh/models/widgets/panels.py\n--- a/bokeh/models/widgets/panels.py\n+++ b/bokeh/models/widgets/panels.py\n@@ -21,7 +21,7 @@\n \n child = Instance(LayoutDOM, help=\"\"\"\n The child widget. If you need more children, use a layout widget,\n- e.g. ``HBox`` or ``VBox``.\n+ e.g. ``Row`` or ``Column``.\n \"\"\")\n \n closable = Bool(False, help=\"\"\"\n", "issue": "deprecate bokeh.models.layouts HBox and VBox\n\n", "before_files": [{"content": "''' Various kinds of layout components.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom ..core.enums import SizingMode\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Enum, Int, Instance, List, Seq, String\nfrom ..core.validation import warning\nfrom ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT\nfrom ..embed import notebook_div\nfrom ..model import Model\n\n@abstract\nclass LayoutDOM(Model):\n ''' An abstract base class for layout components.\n\n '''\n\n width = Int(help=\"\"\"\n An optional width for the component (in pixels).\n \"\"\")\n\n height = Int(help=\"\"\"\n An optional height for the component (in pixels).\n \"\"\")\n\n disabled = Bool(False, help=\"\"\"\n Whether the widget will be disabled when rendered. If ``True``,\n the widget will be greyed-out, and not respond to UI events.\n \"\"\")\n\n sizing_mode = Enum(SizingMode, default=\"fixed\", help=\"\"\"\n How the item being displayed should size itself. Possible values are\n ``\"fixed\"``, ``\"scale_width\"``, ``\"scale_height\"``, ``\"scale_both\"``, and\n ``\"stretch_both\"``.\n\n ``\"stretch_both\"`` elements are completely responsive (independently in width and height) and\n will resize to occupy all available space, even if this changes the aspect ratio of the element.\n This is sometimes called outside-in, and is a typical behavior for desktop applications.\n\n ``\"fixed\"`` elements are not responsive. They will retain their original width and height\n regardless of any subsequent browser window resize events.\n\n ``\"scale_width\"`` elements will responsively resize to fit to the width available, *while\n maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a\n ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.\n\n ``\"scale_height\"`` elements will responsively resize to fit to the height available, *while\n maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio\n ``plot_width/plot_height`` is maintained. A plot with ``\"scale_height\"`` mode needs\n to be wrapped in a ``Row`` or ``Column`` to be responsive.\n\n ``\"scale_both\"`` elements will responsively resize to for both the width and height available,\n *while maintaining the original aspect ratio*.\n\n \"\"\")\n\n css_classes = Seq(String, help=\"\"\"\n A list of css class names to add to this DOM element. Note: the class names are\n simply added as-is, no other guarantees are provided.\n \"\"\")\n\n @property\n def html(self):\n from IPython.core.display import HTML\n return HTML(notebook_div(self))\n\n\nclass Spacer(LayoutDOM):\n ''' A container for space used to fill an empty spot in a row or column.\n\n '''\n\n\nclass WidgetBox(LayoutDOM):\n ''' A container for widgets that are part of a layout.\n\n '''\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n super(WidgetBox, self).__init__(**kwargs)\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n children = List(Instance('bokeh.models.widgets.Widget'), help=\"\"\"\n The list of widgets to put in the layout box.\n \"\"\")\n\n\n@abstract\nclass Box(LayoutDOM):\n ''' Abstract base class for Row and Column. Do not use directly.\n\n '''\n\n def __init__(self, *args, **kwargs):\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n\n unwrapped_children = kwargs.get(\"children\", [])\n kwargs[\"children\"] = self._wrap_children(unwrapped_children)\n super(Box, self).__init__(**kwargs)\n\n def _wrap_children(self, children):\n ''' Wrap any Widgets of a list of child layouts in a WidgetBox.\n This allows for the convenience of just spelling Row(button1, button2).\n '''\n from .widgets.widget import Widget\n wrapped_children = []\n for child in children:\n if isinstance(child, Widget):\n child = WidgetBox(\n children=[child],\n sizing_mode=child.sizing_mode,\n width=child.width,\n height=child.height,\n disabled=child.disabled\n )\n wrapped_children.append(child)\n return wrapped_children\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n #TODO Debating the following instead to prevent people adding just a plain\n # widget into a box, which sometimes works and sometimes looks disastrous\n #children = List(\n # Either(\n # Instance('bokeh.models.layouts.Row'),\n # Instance('bokeh.models.layouts.Column'),\n # Instance('bokeh.models.plots.Plot'),\n # Instance('bokeh.models.layouts.WidgetBox')\n # ), help=\"\"\"\n # The list of children, which can be other components including plots, rows, columns, and widgets.\n #\"\"\")\n children = List(Instance(LayoutDOM), help=\"\"\"\n The list of children, which can be other components including plots, rows, columns, and widgets.\n \"\"\")\n\n\nclass Row(Box):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\nclass Column(Box):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\ndef HBox(*args, **kwargs):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n\n Returns a Row instance.\n '''\n return Row(*args, **kwargs)\n\n\ndef VBox(*args, **kwargs):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n\n Returns a Column instance.\n '''\n return Column(*args, **kwargs)\n", "path": "bokeh/models/layouts.py"}, {"content": "''' Various kinds of panel widgets.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ...core.properties import Bool, Instance, Int, List, String\n\nfrom ..callbacks import Callback\nfrom ..layouts import LayoutDOM\n\nfrom .widget import Widget\n\nclass Panel(Widget):\n ''' A single-widget container with title bar and controls.\n\n '''\n\n title = String(default=\"\", help=\"\"\"\n An optional text title of the panel.\n \"\"\")\n\n child = Instance(LayoutDOM, help=\"\"\"\n The child widget. If you need more children, use a layout widget,\n e.g. ``HBox`` or ``VBox``.\n \"\"\")\n\n closable = Bool(False, help=\"\"\"\n Whether this panel is closeable or not. If True, an \"x\" button will\n appear.\n \"\"\")\n\nclass Tabs(Widget):\n ''' A panel widget with navigation tabs.\n\n '''\n\n __example__ = \"sphinx/source/docs/user_guide/examples/interaction_tab_panes.py\"\n\n tabs = List(Instance(Panel), help=\"\"\"\n The list of child panel widgets.\n \"\"\")\n\n active = Int(0, help=\"\"\"\n The index of the active tab.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the button is activated.\n \"\"\")\n", "path": "bokeh/models/widgets/panels.py"}], "after_files": [{"content": "''' Various kinds of layout components.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom ..core.enums import SizingMode\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Enum, Int, Instance, List, Seq, String\nfrom ..core.validation import warning\nfrom ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT\nfrom ..embed import notebook_div\nfrom ..model import Model\nfrom ..util.deprecation import deprecated\n\n@abstract\nclass LayoutDOM(Model):\n ''' An abstract base class for layout components.\n\n '''\n\n width = Int(help=\"\"\"\n An optional width for the component (in pixels).\n \"\"\")\n\n height = Int(help=\"\"\"\n An optional height for the component (in pixels).\n \"\"\")\n\n disabled = Bool(False, help=\"\"\"\n Whether the widget will be disabled when rendered. If ``True``,\n the widget will be greyed-out, and not respond to UI events.\n \"\"\")\n\n sizing_mode = Enum(SizingMode, default=\"fixed\", help=\"\"\"\n How the item being displayed should size itself. Possible values are\n ``\"fixed\"``, ``\"scale_width\"``, ``\"scale_height\"``, ``\"scale_both\"``, and\n ``\"stretch_both\"``.\n\n ``\"stretch_both\"`` elements are completely responsive (independently in width and height) and\n will resize to occupy all available space, even if this changes the aspect ratio of the element.\n This is sometimes called outside-in, and is a typical behavior for desktop applications.\n\n ``\"fixed\"`` elements are not responsive. They will retain their original width and height\n regardless of any subsequent browser window resize events.\n\n ``\"scale_width\"`` elements will responsively resize to fit to the width available, *while\n maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a\n ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.\n\n ``\"scale_height\"`` elements will responsively resize to fit to the height available, *while\n maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio\n ``plot_width/plot_height`` is maintained. A plot with ``\"scale_height\"`` mode needs\n to be wrapped in a ``Row`` or ``Column`` to be responsive.\n\n ``\"scale_both\"`` elements will responsively resize to for both the width and height available,\n *while maintaining the original aspect ratio*.\n\n \"\"\")\n\n css_classes = Seq(String, help=\"\"\"\n A list of css class names to add to this DOM element. Note: the class names are\n simply added as-is, no other guarantees are provided.\n \"\"\")\n\n @property\n def html(self):\n from IPython.core.display import HTML\n return HTML(notebook_div(self))\n\n\nclass Spacer(LayoutDOM):\n ''' A container for space used to fill an empty spot in a row or column.\n\n '''\n\n\nclass WidgetBox(LayoutDOM):\n ''' A container for widgets that are part of a layout.\n\n '''\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n super(WidgetBox, self).__init__(**kwargs)\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n children = List(Instance('bokeh.models.widgets.Widget'), help=\"\"\"\n The list of widgets to put in the layout box.\n \"\"\")\n\n\n@abstract\nclass Box(LayoutDOM):\n ''' Abstract base class for Row and Column. Do not use directly.\n\n '''\n\n def __init__(self, *args, **kwargs):\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n\n unwrapped_children = kwargs.get(\"children\", [])\n kwargs[\"children\"] = self._wrap_children(unwrapped_children)\n super(Box, self).__init__(**kwargs)\n\n def _wrap_children(self, children):\n ''' Wrap any Widgets of a list of child layouts in a WidgetBox.\n This allows for the convenience of just spelling Row(button1, button2).\n '''\n from .widgets.widget import Widget\n wrapped_children = []\n for child in children:\n if isinstance(child, Widget):\n child = WidgetBox(\n children=[child],\n sizing_mode=child.sizing_mode,\n width=child.width,\n height=child.height,\n disabled=child.disabled\n )\n wrapped_children.append(child)\n return wrapped_children\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n #TODO Debating the following instead to prevent people adding just a plain\n # widget into a box, which sometimes works and sometimes looks disastrous\n #children = List(\n # Either(\n # Instance('bokeh.models.layouts.Row'),\n # Instance('bokeh.models.layouts.Column'),\n # Instance('bokeh.models.plots.Plot'),\n # Instance('bokeh.models.layouts.WidgetBox')\n # ), help=\"\"\"\n # The list of children, which can be other components including plots, rows, columns, and widgets.\n #\"\"\")\n children = List(Instance(LayoutDOM), help=\"\"\"\n The list of children, which can be other components including plots, rows, columns, and widgets.\n \"\"\")\n\n\nclass Row(Box):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\nclass Column(Box):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\ndef HBox(*args, **kwargs):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n\n Returns a Row instance.\n '''\n deprecated((0, 12, 5), 'bokeh.models.HBox', 'bokeh.models.Row')\n return Row(*args, **kwargs)\n\n\ndef VBox(*args, **kwargs):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n\n Returns a Column instance.\n '''\n deprecated((0, 12, 5), 'bokeh.models.VBox', 'bokeh.models.Column')\n return Column(*args, **kwargs)\n", "path": "bokeh/models/layouts.py"}, {"content": "''' Various kinds of panel widgets.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ...core.properties import Bool, Instance, Int, List, String\n\nfrom ..callbacks import Callback\nfrom ..layouts import LayoutDOM\n\nfrom .widget import Widget\n\nclass Panel(Widget):\n ''' A single-widget container with title bar and controls.\n\n '''\n\n title = String(default=\"\", help=\"\"\"\n An optional text title of the panel.\n \"\"\")\n\n child = Instance(LayoutDOM, help=\"\"\"\n The child widget. If you need more children, use a layout widget,\n e.g. ``Row`` or ``Column``.\n \"\"\")\n\n closable = Bool(False, help=\"\"\"\n Whether this panel is closeable or not. If True, an \"x\" button will\n appear.\n \"\"\")\n\nclass Tabs(Widget):\n ''' A panel widget with navigation tabs.\n\n '''\n\n __example__ = \"sphinx/source/docs/user_guide/examples/interaction_tab_panes.py\"\n\n tabs = List(Instance(Panel), help=\"\"\"\n The list of child panel widgets.\n \"\"\")\n\n active = Int(0, help=\"\"\"\n The index of the active tab.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the button is activated.\n \"\"\")\n", "path": "bokeh/models/widgets/panels.py"}]}
2,845
335
gh_patches_debug_16082
rasdani/github-patches
git_diff
mlflow__mlflow-6687
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [FR] Add MlflowException on the top level ### Willingness to contribute No. I cannot contribute this feature at this time. ### Proposal Summary Make `mlflow.exceptions.MlflowException` importable as `mlflow.MlflowException`. ### Motivation > #### What is the use case for this feature? Make it easier for users to catch MLflow errors. > #### Why is this use case valuable to support for MLflow users in general? ^ > #### Why is this use case valuable to support for your project(s) or organization? ^ > #### Why is it currently difficult to achieve this use case? The `mlflow.exceptions` module is not documented, so it's difficult for users to find out how to import and catch `MlflowException`. ### Details _No response_ ### What component(s) does this bug affect? - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/pipelines`: Pipelines, Pipeline APIs, Pipeline configs, Pipeline Templates - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs - [ ] `area/server-infra`: MLflow Tracking server backend - [X] `area/tracking`: Tracking Service, tracking client APIs, autologging ### What interface(s) does this bug affect? - [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support ### What language(s) does this bug affect? - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages ### What integration(s) does this bug affect? - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/__init__.py` Content: ``` 1 # pylint: disable=wrong-import-position 2 """ 3 The ``mlflow`` module provides a high-level "fluent" API for starting and managing MLflow runs. 4 For example: 5 6 .. code:: python 7 8 import mlflow 9 10 mlflow.start_run() 11 mlflow.log_param("my", "param") 12 mlflow.log_metric("score", 100) 13 mlflow.end_run() 14 15 You can also use the context manager syntax like this: 16 17 .. code:: python 18 19 with mlflow.start_run() as run: 20 mlflow.log_param("my", "param") 21 mlflow.log_metric("score", 100) 22 23 which automatically terminates the run at the end of the ``with`` block. 24 25 The fluent tracking API is not currently threadsafe. Any concurrent callers to the tracking API must 26 implement mutual exclusion manually. 27 28 For a lower level API, see the :py:mod:`mlflow.client` module. 29 """ 30 from mlflow.version import VERSION as __version__ # pylint: disable=unused-import 31 from mlflow.utils.logging_utils import _configure_mlflow_loggers 32 import mlflow.tracking._model_registry.fluent 33 import mlflow.tracking.fluent 34 35 # Filter annoying Cython warnings that serve no good purpose, and so before 36 # importing other modules. 37 # See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7 38 import warnings 39 40 warnings.filterwarnings("ignore", message="numpy.dtype size changed") 41 warnings.filterwarnings("ignore", message="numpy.ufunc size changed") 42 43 from mlflow import projects 44 from mlflow import tracking 45 import mlflow.models 46 import mlflow.artifacts 47 import mlflow.pipelines 48 import mlflow.client 49 50 # model flavors 51 _model_flavors_supported = [] 52 try: 53 # pylint: disable=unused-import 54 from mlflow import catboost 55 from mlflow import fastai 56 from mlflow import gluon 57 from mlflow import h2o 58 from mlflow import keras 59 from mlflow import lightgbm 60 from mlflow import mleap 61 from mlflow import onnx 62 from mlflow import pyfunc 63 from mlflow import pytorch 64 from mlflow import sklearn 65 from mlflow import spacy 66 from mlflow import spark 67 from mlflow import statsmodels 68 from mlflow import tensorflow 69 from mlflow import xgboost 70 from mlflow import shap 71 from mlflow import pyspark 72 from mlflow import paddle 73 from mlflow import prophet 74 from mlflow import pmdarima 75 from mlflow import diviner 76 77 _model_flavors_supported = [ 78 "catboost", 79 "fastai", 80 "gluon", 81 "h2o", 82 "keras", 83 "lightgbm", 84 "mleap", 85 "onnx", 86 "pyfunc", 87 "pytorch", 88 "sklearn", 89 "spacy", 90 "spark", 91 "statsmodels", 92 "tensorflow", 93 "xgboost", 94 "shap", 95 "paddle", 96 "prophet", 97 "pmdarima", 98 "diviner", 99 ] 100 except ImportError as e: 101 # We are conditional loading these commands since the skinny client does 102 # not support them due to the pandas and numpy dependencies of MLflow Models 103 pass 104 105 106 _configure_mlflow_loggers(root_module_name=__name__) 107 108 # TODO: Comment out this block when we deprecate support for python 3.7. 109 # _major = 3 110 # _minor = 7 111 # _deprecated_version = (_major, _minor) 112 # _min_supported_version = (_major, _minor + 1) 113 114 # if sys.version_info[:2] == _deprecated_version: 115 # warnings.warn( 116 # "MLflow support for Python {dep_ver} is deprecated and will be dropped in " 117 # "an upcoming release. At that point, existing Python {dep_ver} workflows " 118 # "that use MLflow will continue to work without modification, but Python {dep_ver} " 119 # "users will no longer get access to the latest MLflow features and bugfixes. " 120 # "We recommend that you upgrade to Python {min_ver} or newer.".format( 121 # dep_ver=".".join(map(str, _deprecated_version)), 122 # min_ver=".".join(map(str, _min_supported_version)), 123 # ), 124 # FutureWarning, 125 # stacklevel=2, 126 # ) 127 128 ActiveRun = mlflow.tracking.fluent.ActiveRun 129 log_param = mlflow.tracking.fluent.log_param 130 log_metric = mlflow.tracking.fluent.log_metric 131 set_tag = mlflow.tracking.fluent.set_tag 132 delete_tag = mlflow.tracking.fluent.delete_tag 133 log_artifacts = mlflow.tracking.fluent.log_artifacts 134 log_artifact = mlflow.tracking.fluent.log_artifact 135 log_text = mlflow.tracking.fluent.log_text 136 log_dict = mlflow.tracking.fluent.log_dict 137 log_image = mlflow.tracking.fluent.log_image 138 log_figure = mlflow.tracking.fluent.log_figure 139 active_run = mlflow.tracking.fluent.active_run 140 get_run = mlflow.tracking.fluent.get_run 141 start_run = mlflow.tracking.fluent.start_run 142 end_run = mlflow.tracking.fluent.end_run 143 search_runs = mlflow.tracking.fluent.search_runs 144 list_run_infos = mlflow.tracking.fluent.list_run_infos 145 get_artifact_uri = mlflow.tracking.fluent.get_artifact_uri 146 set_tracking_uri = tracking.set_tracking_uri 147 set_registry_uri = tracking.set_registry_uri 148 get_experiment = mlflow.tracking.fluent.get_experiment 149 get_experiment_by_name = mlflow.tracking.fluent.get_experiment_by_name 150 list_experiments = mlflow.tracking.fluent.list_experiments 151 search_experiments = mlflow.tracking.fluent.search_experiments 152 get_tracking_uri = tracking.get_tracking_uri 153 get_registry_uri = tracking.get_registry_uri 154 is_tracking_uri_set = tracking.is_tracking_uri_set 155 create_experiment = mlflow.tracking.fluent.create_experiment 156 set_experiment = mlflow.tracking.fluent.set_experiment 157 log_params = mlflow.tracking.fluent.log_params 158 log_metrics = mlflow.tracking.fluent.log_metrics 159 set_experiment_tags = mlflow.tracking.fluent.set_experiment_tags 160 set_experiment_tag = mlflow.tracking.fluent.set_experiment_tag 161 set_tags = mlflow.tracking.fluent.set_tags 162 delete_experiment = mlflow.tracking.fluent.delete_experiment 163 delete_run = mlflow.tracking.fluent.delete_run 164 register_model = mlflow.tracking._model_registry.fluent.register_model 165 autolog = mlflow.tracking.fluent.autolog 166 evaluate = mlflow.models.evaluate 167 last_active_run = mlflow.tracking.fluent.last_active_run 168 MlflowClient = mlflow.client.MlflowClient 169 170 run = projects.run 171 172 __all__ = [ 173 "ActiveRun", 174 "log_param", 175 "log_params", 176 "log_metric", 177 "log_metrics", 178 "set_experiment_tags", 179 "set_experiment_tag", 180 "set_tag", 181 "set_tags", 182 "delete_tag", 183 "log_artifacts", 184 "log_artifact", 185 "log_text", 186 "log_dict", 187 "log_figure", 188 "log_image", 189 "active_run", 190 "start_run", 191 "end_run", 192 "search_runs", 193 "get_artifact_uri", 194 "get_tracking_uri", 195 "set_tracking_uri", 196 "is_tracking_uri_set", 197 "get_experiment", 198 "get_experiment_by_name", 199 "list_experiments", 200 "search_experiments", 201 "create_experiment", 202 "set_experiment", 203 "delete_experiment", 204 "get_run", 205 "delete_run", 206 "run", 207 "register_model", 208 "get_registry_uri", 209 "set_registry_uri", 210 "list_run_infos", 211 "autolog", 212 "evaluate", 213 "last_active_run", 214 "MlflowClient", 215 ] + _model_flavors_supported 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/__init__.py b/mlflow/__init__.py --- a/mlflow/__init__.py +++ b/mlflow/__init__.py @@ -46,6 +46,8 @@ import mlflow.artifacts import mlflow.pipelines import mlflow.client +import mlflow.exceptions + # model flavors _model_flavors_supported = [] @@ -166,6 +168,7 @@ evaluate = mlflow.models.evaluate last_active_run = mlflow.tracking.fluent.last_active_run MlflowClient = mlflow.client.MlflowClient +MlflowException = mlflow.exceptions.MlflowException run = projects.run @@ -212,4 +215,5 @@ "evaluate", "last_active_run", "MlflowClient", + "MlflowException", ] + _model_flavors_supported
{"golden_diff": "diff --git a/mlflow/__init__.py b/mlflow/__init__.py\n--- a/mlflow/__init__.py\n+++ b/mlflow/__init__.py\n@@ -46,6 +46,8 @@\n import mlflow.artifacts\n import mlflow.pipelines\n import mlflow.client\n+import mlflow.exceptions\n+\n \n # model flavors\n _model_flavors_supported = []\n@@ -166,6 +168,7 @@\n evaluate = mlflow.models.evaluate\n last_active_run = mlflow.tracking.fluent.last_active_run\n MlflowClient = mlflow.client.MlflowClient\n+MlflowException = mlflow.exceptions.MlflowException\n \n run = projects.run\n \n@@ -212,4 +215,5 @@\n \"evaluate\",\n \"last_active_run\",\n \"MlflowClient\",\n+ \"MlflowException\",\n ] + _model_flavors_supported\n", "issue": "[FR] Add MlflowException on the top level\n### Willingness to contribute\n\nNo. I cannot contribute this feature at this time.\n\n### Proposal Summary\n\nMake `mlflow.exceptions.MlflowException` importable as `mlflow.MlflowException`.\n\n### Motivation\n\n> #### What is the use case for this feature?\r\n\r\nMake it easier for users to catch MLflow errors.\r\n\r\n> #### Why is this use case valuable to support for MLflow users in general?\r\n\r\n^\r\n\r\n> #### Why is this use case valuable to support for your project(s) or organization?\r\n\r\n^\r\n\r\n> #### Why is it currently difficult to achieve this use case?\r\n\r\nThe `mlflow.exceptions` module is not documented, so it's difficult for users to find out how to import and catch `MlflowException`.\n\n### Details\n\n_No response_\n\n### What component(s) does this bug affect?\n\n- [ ] `area/artifacts`: Artifact stores and artifact logging\n- [ ] `area/build`: Build and test infrastructure for MLflow\n- [ ] `area/docs`: MLflow documentation pages\n- [ ] `area/examples`: Example code\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\n- [ ] `area/pipelines`: Pipelines, Pipeline APIs, Pipeline configs, Pipeline Templates\n- [ ] `area/projects`: MLproject format, project running backends\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\n- [ ] `area/server-infra`: MLflow Tracking server backend\n- [X] `area/tracking`: Tracking Service, tracking client APIs, autologging\n\n### What interface(s) does this bug affect?\n\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\n- [ ] `area/windows`: Windows support\n\n### What language(s) does this bug affect?\n\n- [ ] `language/r`: R APIs and clients\n- [ ] `language/java`: Java APIs and clients\n- [ ] `language/new`: Proposals for new client languages\n\n### What integration(s) does this bug affect?\n\n- [ ] `integrations/azure`: Azure and Azure ML integrations\n- [ ] `integrations/sagemaker`: SageMaker integrations\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "# pylint: disable=wrong-import-position\n\"\"\"\nThe ``mlflow`` module provides a high-level \"fluent\" API for starting and managing MLflow runs.\nFor example:\n\n.. code:: python\n\n import mlflow\n\n mlflow.start_run()\n mlflow.log_param(\"my\", \"param\")\n mlflow.log_metric(\"score\", 100)\n mlflow.end_run()\n\nYou can also use the context manager syntax like this:\n\n.. code:: python\n\n with mlflow.start_run() as run:\n mlflow.log_param(\"my\", \"param\")\n mlflow.log_metric(\"score\", 100)\n\nwhich automatically terminates the run at the end of the ``with`` block.\n\nThe fluent tracking API is not currently threadsafe. Any concurrent callers to the tracking API must\nimplement mutual exclusion manually.\n\nFor a lower level API, see the :py:mod:`mlflow.client` module.\n\"\"\"\nfrom mlflow.version import VERSION as __version__ # pylint: disable=unused-import\nfrom mlflow.utils.logging_utils import _configure_mlflow_loggers\nimport mlflow.tracking._model_registry.fluent\nimport mlflow.tracking.fluent\n\n# Filter annoying Cython warnings that serve no good purpose, and so before\n# importing other modules.\n# See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\nfrom mlflow import projects\nfrom mlflow import tracking\nimport mlflow.models\nimport mlflow.artifacts\nimport mlflow.pipelines\nimport mlflow.client\n\n# model flavors\n_model_flavors_supported = []\ntry:\n # pylint: disable=unused-import\n from mlflow import catboost\n from mlflow import fastai\n from mlflow import gluon\n from mlflow import h2o\n from mlflow import keras\n from mlflow import lightgbm\n from mlflow import mleap\n from mlflow import onnx\n from mlflow import pyfunc\n from mlflow import pytorch\n from mlflow import sklearn\n from mlflow import spacy\n from mlflow import spark\n from mlflow import statsmodels\n from mlflow import tensorflow\n from mlflow import xgboost\n from mlflow import shap\n from mlflow import pyspark\n from mlflow import paddle\n from mlflow import prophet\n from mlflow import pmdarima\n from mlflow import diviner\n\n _model_flavors_supported = [\n \"catboost\",\n \"fastai\",\n \"gluon\",\n \"h2o\",\n \"keras\",\n \"lightgbm\",\n \"mleap\",\n \"onnx\",\n \"pyfunc\",\n \"pytorch\",\n \"sklearn\",\n \"spacy\",\n \"spark\",\n \"statsmodels\",\n \"tensorflow\",\n \"xgboost\",\n \"shap\",\n \"paddle\",\n \"prophet\",\n \"pmdarima\",\n \"diviner\",\n ]\nexcept ImportError as e:\n # We are conditional loading these commands since the skinny client does\n # not support them due to the pandas and numpy dependencies of MLflow Models\n pass\n\n\n_configure_mlflow_loggers(root_module_name=__name__)\n\n# TODO: Comment out this block when we deprecate support for python 3.7.\n# _major = 3\n# _minor = 7\n# _deprecated_version = (_major, _minor)\n# _min_supported_version = (_major, _minor + 1)\n\n# if sys.version_info[:2] == _deprecated_version:\n# warnings.warn(\n# \"MLflow support for Python {dep_ver} is deprecated and will be dropped in \"\n# \"an upcoming release. At that point, existing Python {dep_ver} workflows \"\n# \"that use MLflow will continue to work without modification, but Python {dep_ver} \"\n# \"users will no longer get access to the latest MLflow features and bugfixes. \"\n# \"We recommend that you upgrade to Python {min_ver} or newer.\".format(\n# dep_ver=\".\".join(map(str, _deprecated_version)),\n# min_ver=\".\".join(map(str, _min_supported_version)),\n# ),\n# FutureWarning,\n# stacklevel=2,\n# )\n\nActiveRun = mlflow.tracking.fluent.ActiveRun\nlog_param = mlflow.tracking.fluent.log_param\nlog_metric = mlflow.tracking.fluent.log_metric\nset_tag = mlflow.tracking.fluent.set_tag\ndelete_tag = mlflow.tracking.fluent.delete_tag\nlog_artifacts = mlflow.tracking.fluent.log_artifacts\nlog_artifact = mlflow.tracking.fluent.log_artifact\nlog_text = mlflow.tracking.fluent.log_text\nlog_dict = mlflow.tracking.fluent.log_dict\nlog_image = mlflow.tracking.fluent.log_image\nlog_figure = mlflow.tracking.fluent.log_figure\nactive_run = mlflow.tracking.fluent.active_run\nget_run = mlflow.tracking.fluent.get_run\nstart_run = mlflow.tracking.fluent.start_run\nend_run = mlflow.tracking.fluent.end_run\nsearch_runs = mlflow.tracking.fluent.search_runs\nlist_run_infos = mlflow.tracking.fluent.list_run_infos\nget_artifact_uri = mlflow.tracking.fluent.get_artifact_uri\nset_tracking_uri = tracking.set_tracking_uri\nset_registry_uri = tracking.set_registry_uri\nget_experiment = mlflow.tracking.fluent.get_experiment\nget_experiment_by_name = mlflow.tracking.fluent.get_experiment_by_name\nlist_experiments = mlflow.tracking.fluent.list_experiments\nsearch_experiments = mlflow.tracking.fluent.search_experiments\nget_tracking_uri = tracking.get_tracking_uri\nget_registry_uri = tracking.get_registry_uri\nis_tracking_uri_set = tracking.is_tracking_uri_set\ncreate_experiment = mlflow.tracking.fluent.create_experiment\nset_experiment = mlflow.tracking.fluent.set_experiment\nlog_params = mlflow.tracking.fluent.log_params\nlog_metrics = mlflow.tracking.fluent.log_metrics\nset_experiment_tags = mlflow.tracking.fluent.set_experiment_tags\nset_experiment_tag = mlflow.tracking.fluent.set_experiment_tag\nset_tags = mlflow.tracking.fluent.set_tags\ndelete_experiment = mlflow.tracking.fluent.delete_experiment\ndelete_run = mlflow.tracking.fluent.delete_run\nregister_model = mlflow.tracking._model_registry.fluent.register_model\nautolog = mlflow.tracking.fluent.autolog\nevaluate = mlflow.models.evaluate\nlast_active_run = mlflow.tracking.fluent.last_active_run\nMlflowClient = mlflow.client.MlflowClient\n\nrun = projects.run\n\n__all__ = [\n \"ActiveRun\",\n \"log_param\",\n \"log_params\",\n \"log_metric\",\n \"log_metrics\",\n \"set_experiment_tags\",\n \"set_experiment_tag\",\n \"set_tag\",\n \"set_tags\",\n \"delete_tag\",\n \"log_artifacts\",\n \"log_artifact\",\n \"log_text\",\n \"log_dict\",\n \"log_figure\",\n \"log_image\",\n \"active_run\",\n \"start_run\",\n \"end_run\",\n \"search_runs\",\n \"get_artifact_uri\",\n \"get_tracking_uri\",\n \"set_tracking_uri\",\n \"is_tracking_uri_set\",\n \"get_experiment\",\n \"get_experiment_by_name\",\n \"list_experiments\",\n \"search_experiments\",\n \"create_experiment\",\n \"set_experiment\",\n \"delete_experiment\",\n \"get_run\",\n \"delete_run\",\n \"run\",\n \"register_model\",\n \"get_registry_uri\",\n \"set_registry_uri\",\n \"list_run_infos\",\n \"autolog\",\n \"evaluate\",\n \"last_active_run\",\n \"MlflowClient\",\n] + _model_flavors_supported\n", "path": "mlflow/__init__.py"}], "after_files": [{"content": "# pylint: disable=wrong-import-position\n\"\"\"\nThe ``mlflow`` module provides a high-level \"fluent\" API for starting and managing MLflow runs.\nFor example:\n\n.. code:: python\n\n import mlflow\n\n mlflow.start_run()\n mlflow.log_param(\"my\", \"param\")\n mlflow.log_metric(\"score\", 100)\n mlflow.end_run()\n\nYou can also use the context manager syntax like this:\n\n.. code:: python\n\n with mlflow.start_run() as run:\n mlflow.log_param(\"my\", \"param\")\n mlflow.log_metric(\"score\", 100)\n\nwhich automatically terminates the run at the end of the ``with`` block.\n\nThe fluent tracking API is not currently threadsafe. Any concurrent callers to the tracking API must\nimplement mutual exclusion manually.\n\nFor a lower level API, see the :py:mod:`mlflow.client` module.\n\"\"\"\nfrom mlflow.version import VERSION as __version__ # pylint: disable=unused-import\nfrom mlflow.utils.logging_utils import _configure_mlflow_loggers\nimport mlflow.tracking._model_registry.fluent\nimport mlflow.tracking.fluent\n\n# Filter annoying Cython warnings that serve no good purpose, and so before\n# importing other modules.\n# See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\nfrom mlflow import projects\nfrom mlflow import tracking\nimport mlflow.models\nimport mlflow.artifacts\nimport mlflow.pipelines\nimport mlflow.client\nimport mlflow.exceptions\n\n\n# model flavors\n_model_flavors_supported = []\ntry:\n # pylint: disable=unused-import\n from mlflow import catboost\n from mlflow import fastai\n from mlflow import gluon\n from mlflow import h2o\n from mlflow import keras\n from mlflow import lightgbm\n from mlflow import mleap\n from mlflow import onnx\n from mlflow import pyfunc\n from mlflow import pytorch\n from mlflow import sklearn\n from mlflow import spacy\n from mlflow import spark\n from mlflow import statsmodels\n from mlflow import tensorflow\n from mlflow import xgboost\n from mlflow import shap\n from mlflow import pyspark\n from mlflow import paddle\n from mlflow import prophet\n from mlflow import pmdarima\n from mlflow import diviner\n\n _model_flavors_supported = [\n \"catboost\",\n \"fastai\",\n \"gluon\",\n \"h2o\",\n \"keras\",\n \"lightgbm\",\n \"mleap\",\n \"onnx\",\n \"pyfunc\",\n \"pytorch\",\n \"sklearn\",\n \"spacy\",\n \"spark\",\n \"statsmodels\",\n \"tensorflow\",\n \"xgboost\",\n \"shap\",\n \"paddle\",\n \"prophet\",\n \"pmdarima\",\n \"diviner\",\n ]\nexcept ImportError as e:\n # We are conditional loading these commands since the skinny client does\n # not support them due to the pandas and numpy dependencies of MLflow Models\n pass\n\n\n_configure_mlflow_loggers(root_module_name=__name__)\n\n# TODO: Comment out this block when we deprecate support for python 3.7.\n# _major = 3\n# _minor = 7\n# _deprecated_version = (_major, _minor)\n# _min_supported_version = (_major, _minor + 1)\n\n# if sys.version_info[:2] == _deprecated_version:\n# warnings.warn(\n# \"MLflow support for Python {dep_ver} is deprecated and will be dropped in \"\n# \"an upcoming release. At that point, existing Python {dep_ver} workflows \"\n# \"that use MLflow will continue to work without modification, but Python {dep_ver} \"\n# \"users will no longer get access to the latest MLflow features and bugfixes. \"\n# \"We recommend that you upgrade to Python {min_ver} or newer.\".format(\n# dep_ver=\".\".join(map(str, _deprecated_version)),\n# min_ver=\".\".join(map(str, _min_supported_version)),\n# ),\n# FutureWarning,\n# stacklevel=2,\n# )\n\nActiveRun = mlflow.tracking.fluent.ActiveRun\nlog_param = mlflow.tracking.fluent.log_param\nlog_metric = mlflow.tracking.fluent.log_metric\nset_tag = mlflow.tracking.fluent.set_tag\ndelete_tag = mlflow.tracking.fluent.delete_tag\nlog_artifacts = mlflow.tracking.fluent.log_artifacts\nlog_artifact = mlflow.tracking.fluent.log_artifact\nlog_text = mlflow.tracking.fluent.log_text\nlog_dict = mlflow.tracking.fluent.log_dict\nlog_image = mlflow.tracking.fluent.log_image\nlog_figure = mlflow.tracking.fluent.log_figure\nactive_run = mlflow.tracking.fluent.active_run\nget_run = mlflow.tracking.fluent.get_run\nstart_run = mlflow.tracking.fluent.start_run\nend_run = mlflow.tracking.fluent.end_run\nsearch_runs = mlflow.tracking.fluent.search_runs\nlist_run_infos = mlflow.tracking.fluent.list_run_infos\nget_artifact_uri = mlflow.tracking.fluent.get_artifact_uri\nset_tracking_uri = tracking.set_tracking_uri\nset_registry_uri = tracking.set_registry_uri\nget_experiment = mlflow.tracking.fluent.get_experiment\nget_experiment_by_name = mlflow.tracking.fluent.get_experiment_by_name\nlist_experiments = mlflow.tracking.fluent.list_experiments\nsearch_experiments = mlflow.tracking.fluent.search_experiments\nget_tracking_uri = tracking.get_tracking_uri\nget_registry_uri = tracking.get_registry_uri\nis_tracking_uri_set = tracking.is_tracking_uri_set\ncreate_experiment = mlflow.tracking.fluent.create_experiment\nset_experiment = mlflow.tracking.fluent.set_experiment\nlog_params = mlflow.tracking.fluent.log_params\nlog_metrics = mlflow.tracking.fluent.log_metrics\nset_experiment_tags = mlflow.tracking.fluent.set_experiment_tags\nset_experiment_tag = mlflow.tracking.fluent.set_experiment_tag\nset_tags = mlflow.tracking.fluent.set_tags\ndelete_experiment = mlflow.tracking.fluent.delete_experiment\ndelete_run = mlflow.tracking.fluent.delete_run\nregister_model = mlflow.tracking._model_registry.fluent.register_model\nautolog = mlflow.tracking.fluent.autolog\nevaluate = mlflow.models.evaluate\nlast_active_run = mlflow.tracking.fluent.last_active_run\nMlflowClient = mlflow.client.MlflowClient\nMlflowException = mlflow.exceptions.MlflowException\n\nrun = projects.run\n\n__all__ = [\n \"ActiveRun\",\n \"log_param\",\n \"log_params\",\n \"log_metric\",\n \"log_metrics\",\n \"set_experiment_tags\",\n \"set_experiment_tag\",\n \"set_tag\",\n \"set_tags\",\n \"delete_tag\",\n \"log_artifacts\",\n \"log_artifact\",\n \"log_text\",\n \"log_dict\",\n \"log_figure\",\n \"log_image\",\n \"active_run\",\n \"start_run\",\n \"end_run\",\n \"search_runs\",\n \"get_artifact_uri\",\n \"get_tracking_uri\",\n \"set_tracking_uri\",\n \"is_tracking_uri_set\",\n \"get_experiment\",\n \"get_experiment_by_name\",\n \"list_experiments\",\n \"search_experiments\",\n \"create_experiment\",\n \"set_experiment\",\n \"delete_experiment\",\n \"get_run\",\n \"delete_run\",\n \"run\",\n \"register_model\",\n \"get_registry_uri\",\n \"set_registry_uri\",\n \"list_run_infos\",\n \"autolog\",\n \"evaluate\",\n \"last_active_run\",\n \"MlflowClient\",\n \"MlflowException\",\n] + _model_flavors_supported\n", "path": "mlflow/__init__.py"}]}
3,091
198
gh_patches_debug_18257
rasdani/github-patches
git_diff
Kinto__kinto-1232
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cache delete method should return deleted value or None Ref https://github.com/Kinto/kinto/pull/1218#discussion_r116852457 Cache delete method should return deleted value or None Ref https://github.com/Kinto/kinto/pull/1218#discussion_r116852457 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/cache/memory.py` Content: ``` 1 import logging 2 3 from kinto.core.cache import CacheBase 4 from kinto.core.utils import msec_time 5 from kinto.core.decorators import synchronized 6 7 8 logger = logging.getLogger(__name__) 9 10 11 class Cache(CacheBase): 12 """Cache backend implementation in local process memory. 13 14 Enable in configuration:: 15 16 kinto.cache_backend = kinto.core.cache.memory 17 18 :noindex: 19 """ 20 21 def __init__(self, *args, **kwargs): 22 super().__init__(*args, **kwargs) 23 self.flush() 24 25 def initialize_schema(self, dry_run=False): 26 # Nothing to do. 27 pass 28 29 def flush(self): 30 self._created_at = {} 31 self._ttl = {} 32 self._store = {} 33 self._quota = 0 34 35 def _clean_expired(self): 36 current = msec_time() 37 expired = [k for k, v in self._ttl.items() if current >= v] 38 for expired_item_key in expired: 39 self.delete(expired_item_key[len(self.prefix):]) 40 41 def _clean_oversized(self): 42 if self._quota < self.max_size_bytes: 43 return 44 45 for key, value in sorted(self._created_at.items(), key=lambda k: k[1]): 46 if self._quota < (self.max_size_bytes * 0.8): 47 break 48 self.delete(key[len(self.prefix):]) 49 50 @synchronized 51 def ttl(self, key): 52 ttl = self._ttl.get(self.prefix + key) 53 if ttl is not None: 54 return (ttl - msec_time()) / 1000.0 55 return -1 56 57 @synchronized 58 def expire(self, key, ttl): 59 self._ttl[self.prefix + key] = msec_time() + int(ttl * 1000.0) 60 61 @synchronized 62 def set(self, key, value, ttl): 63 if isinstance(value, bytes): 64 raise TypeError("a string-like object is required, not 'bytes'") 65 self._clean_expired() 66 self._clean_oversized() 67 self.expire(key, ttl) 68 item_key = self.prefix + key 69 self._store[item_key] = value 70 self._created_at[item_key] = msec_time() 71 self._quota += size_of(item_key, value) 72 73 @synchronized 74 def get(self, key): 75 self._clean_expired() 76 return self._store.get(self.prefix + key) 77 78 @synchronized 79 def delete(self, key): 80 key = self.prefix + key 81 self._ttl.pop(key, None) 82 self._created_at.pop(key, None) 83 value = self._store.pop(key, None) 84 self._quota -= size_of(key, value) 85 86 87 def load_from_config(config): 88 settings = config.get_settings() 89 return Cache(cache_prefix=settings['cache_prefix'], 90 cache_max_size_bytes=settings['cache_max_size_bytes']) 91 92 93 def size_of(key, value): 94 # Key used for ttl, created_at and store. 95 # Int size is 24 bytes one for ttl and one for created_at values 96 return len(key) * 3 + len(str(value)) + 24 * 2 97 ``` Path: `kinto/core/cache/postgresql/__init__.py` Content: ``` 1 import logging 2 import os 3 4 from kinto.core.cache import CacheBase 5 from kinto.core.storage.postgresql.client import create_from_config 6 from kinto.core.utils import json 7 8 9 logger = logging.getLogger(__name__) 10 11 12 class Cache(CacheBase): 13 """Cache backend using PostgreSQL. 14 15 Enable in configuration:: 16 17 kinto.cache_backend = kinto.core.cache.postgresql 18 19 Database location URI can be customized:: 20 21 kinto.cache_url = postgres://user:[email protected]:5432/dbname 22 23 Alternatively, username and password could also rely on system user ident 24 or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*). 25 26 .. note:: 27 28 Some tables and indices are created when ``kinto migrate`` is run. 29 This requires some privileges on the database, or some error will 30 be raised. 31 32 **Alternatively**, the schema can be initialized outside the 33 python application, using the SQL file located in 34 :file:`kinto/core/cache/postgresql/schema.sql`. This allows to 35 distinguish schema manipulation privileges from schema usage. 36 37 38 A connection pool is enabled by default:: 39 40 kinto.cache_pool_size = 10 41 kinto.cache_maxoverflow = 10 42 kinto.cache_max_backlog = -1 43 kinto.cache_pool_recycle = -1 44 kinto.cache_pool_timeout = 30 45 kinto.cache_poolclass = 46 kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog 47 48 The ``max_backlog`` limits the number of threads that can be in the queue 49 waiting for a connection. Once this limit has been reached, any further 50 attempts to acquire a connection will be rejected immediately, instead of 51 locking up all threads by keeping them waiting in the queue. 52 53 See `dedicated section in SQLAlchemy documentation 54 <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_ 55 for default values and behaviour. 56 57 .. note:: 58 59 Using a `dedicated connection pool <http://pgpool.net>`_ is still 60 recommended to allow load balancing, replication or limit the number 61 of connections used in a multi-process deployment. 62 63 :noindex: 64 """ # NOQA 65 def __init__(self, client, *args, **kwargs): 66 super().__init__(*args, **kwargs) 67 self.client = client 68 69 def initialize_schema(self, dry_run=False): 70 # Check if cache table exists. 71 query = """ 72 SELECT 1 73 FROM information_schema.tables 74 WHERE table_name = 'cache'; 75 """ 76 with self.client.connect(readonly=True) as conn: 77 result = conn.execute(query) 78 if result.rowcount > 0: 79 logger.info("PostgreSQL cache schema is up-to-date.") 80 return 81 82 # Create schema 83 here = os.path.abspath(os.path.dirname(__file__)) 84 sql_file = os.path.join(here, 'schema.sql') 85 86 if dry_run: 87 logger.info("Create cache schema from '{}'".format(sql_file)) 88 return 89 90 # Since called outside request, force commit. 91 with open(sql_file) as f: 92 schema = f.read() 93 with self.client.connect(force_commit=True) as conn: 94 conn.execute(schema) 95 logger.info('Created PostgreSQL cache tables') 96 97 def flush(self): 98 query = """ 99 DELETE FROM cache; 100 """ 101 # Since called outside request (e.g. tests), force commit. 102 with self.client.connect(force_commit=True) as conn: 103 conn.execute(query) 104 logger.debug('Flushed PostgreSQL cache tables') 105 106 def ttl(self, key): 107 query = """ 108 SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl 109 FROM cache 110 WHERE key = :key 111 AND ttl IS NOT NULL; 112 """ 113 with self.client.connect(readonly=True) as conn: 114 result = conn.execute(query, dict(key=self.prefix + key)) 115 if result.rowcount > 0: 116 return result.fetchone()['ttl'] 117 return -1 118 119 def expire(self, key, ttl): 120 query = """ 121 UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key; 122 """ 123 with self.client.connect() as conn: 124 conn.execute(query, dict(ttl=ttl, key=self.prefix + key)) 125 126 def set(self, key, value, ttl): 127 if isinstance(value, bytes): 128 raise TypeError("a string-like object is required, not 'bytes'") 129 130 query = """ 131 INSERT INTO cache (key, value, ttl) 132 VALUES (:key, :value, sec2ttl(:ttl)) 133 ON CONFLICT (key) DO UPDATE 134 SET value = :value, 135 ttl = sec2ttl(:ttl); 136 """ 137 value = json.dumps(value) 138 with self.client.connect() as conn: 139 conn.execute(query, dict(key=self.prefix + key, 140 value=value, ttl=ttl)) 141 142 def get(self, key): 143 purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;" 144 query = "SELECT value FROM cache WHERE key = :key;" 145 with self.client.connect() as conn: 146 conn.execute(purge) 147 result = conn.execute(query, dict(key=self.prefix + key)) 148 if result.rowcount > 0: 149 value = result.fetchone()['value'] 150 return json.loads(value) 151 152 def delete(self, key): 153 query = "DELETE FROM cache WHERE key = :key" 154 with self.client.connect() as conn: 155 conn.execute(query, dict(key=self.prefix + key)) 156 157 158 def load_from_config(config): 159 settings = config.get_settings() 160 client = create_from_config(config, prefix='cache_', with_transaction=False) 161 return Cache(client=client, cache_prefix=settings['cache_prefix']) 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/cache/memory.py b/kinto/core/cache/memory.py --- a/kinto/core/cache/memory.py +++ b/kinto/core/cache/memory.py @@ -82,6 +82,7 @@ self._created_at.pop(key, None) value = self._store.pop(key, None) self._quota -= size_of(key, value) + return value def load_from_config(config): diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py --- a/kinto/core/cache/postgresql/__init__.py +++ b/kinto/core/cache/postgresql/__init__.py @@ -150,9 +150,13 @@ return json.loads(value) def delete(self, key): - query = "DELETE FROM cache WHERE key = :key" + query = "DELETE FROM cache WHERE key = :key RETURNING value;" with self.client.connect() as conn: - conn.execute(query, dict(key=self.prefix + key)) + result = conn.execute(query, dict(key=self.prefix + key)) + if result.rowcount > 0: + value = result.fetchone()['value'] + return json.loads(value) + return None def load_from_config(config):
{"golden_diff": "diff --git a/kinto/core/cache/memory.py b/kinto/core/cache/memory.py\n--- a/kinto/core/cache/memory.py\n+++ b/kinto/core/cache/memory.py\n@@ -82,6 +82,7 @@\n self._created_at.pop(key, None)\n value = self._store.pop(key, None)\n self._quota -= size_of(key, value)\n+ return value\n \n \n def load_from_config(config):\ndiff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py\n--- a/kinto/core/cache/postgresql/__init__.py\n+++ b/kinto/core/cache/postgresql/__init__.py\n@@ -150,9 +150,13 @@\n return json.loads(value)\n \n def delete(self, key):\n- query = \"DELETE FROM cache WHERE key = :key\"\n+ query = \"DELETE FROM cache WHERE key = :key RETURNING value;\"\n with self.client.connect() as conn:\n- conn.execute(query, dict(key=self.prefix + key))\n+ result = conn.execute(query, dict(key=self.prefix + key))\n+ if result.rowcount > 0:\n+ value = result.fetchone()['value']\n+ return json.loads(value)\n+ return None\n \n \n def load_from_config(config):\n", "issue": "Cache delete method should return deleted value or None\nRef https://github.com/Kinto/kinto/pull/1218#discussion_r116852457\nCache delete method should return deleted value or None\nRef https://github.com/Kinto/kinto/pull/1218#discussion_r116852457\n", "before_files": [{"content": "import logging\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.utils import msec_time\nfrom kinto.core.decorators import synchronized\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._created_at = {}\n self._ttl = {}\n self._store = {}\n self._quota = 0\n\n def _clean_expired(self):\n current = msec_time()\n expired = [k for k, v in self._ttl.items() if current >= v]\n for expired_item_key in expired:\n self.delete(expired_item_key[len(self.prefix):])\n\n def _clean_oversized(self):\n if self._quota < self.max_size_bytes:\n return\n\n for key, value in sorted(self._created_at.items(), key=lambda k: k[1]):\n if self._quota < (self.max_size_bytes * 0.8):\n break\n self.delete(key[len(self.prefix):])\n\n @synchronized\n def ttl(self, key):\n ttl = self._ttl.get(self.prefix + key)\n if ttl is not None:\n return (ttl - msec_time()) / 1000.0\n return -1\n\n @synchronized\n def expire(self, key, ttl):\n self._ttl[self.prefix + key] = msec_time() + int(ttl * 1000.0)\n\n @synchronized\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n self._clean_expired()\n self._clean_oversized()\n self.expire(key, ttl)\n item_key = self.prefix + key\n self._store[item_key] = value\n self._created_at[item_key] = msec_time()\n self._quota += size_of(item_key, value)\n\n @synchronized\n def get(self, key):\n self._clean_expired()\n return self._store.get(self.prefix + key)\n\n @synchronized\n def delete(self, key):\n key = self.prefix + key\n self._ttl.pop(key, None)\n self._created_at.pop(key, None)\n value = self._store.pop(key, None)\n self._quota -= size_of(key, value)\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n return Cache(cache_prefix=settings['cache_prefix'],\n cache_max_size_bytes=settings['cache_max_size_bytes'])\n\n\ndef size_of(key, value):\n # Key used for ttl, created_at and store.\n # Int size is 24 bytes one for ttl and one for created_at values\n return len(key) * 3 + len(str(value)) + 24 * 2\n", "path": "kinto/core/cache/memory.py"}, {"content": "import logging\nimport os\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from '{}'\".format(sql_file))\n return\n\n # Since called outside request, force commit.\n with open(sql_file) as f:\n schema = f.read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n\n query = \"\"\"\n INSERT INTO cache (key, value, ttl)\n VALUES (:key, :value, sec2ttl(:ttl))\n ON CONFLICT (key) DO UPDATE\n SET value = :value,\n ttl = sec2ttl(:ttl);\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key\"\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key))\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}], "after_files": [{"content": "import logging\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.utils import msec_time\nfrom kinto.core.decorators import synchronized\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._created_at = {}\n self._ttl = {}\n self._store = {}\n self._quota = 0\n\n def _clean_expired(self):\n current = msec_time()\n expired = [k for k, v in self._ttl.items() if current >= v]\n for expired_item_key in expired:\n self.delete(expired_item_key[len(self.prefix):])\n\n def _clean_oversized(self):\n if self._quota < self.max_size_bytes:\n return\n\n for key, value in sorted(self._created_at.items(), key=lambda k: k[1]):\n if self._quota < (self.max_size_bytes * 0.8):\n break\n self.delete(key[len(self.prefix):])\n\n @synchronized\n def ttl(self, key):\n ttl = self._ttl.get(self.prefix + key)\n if ttl is not None:\n return (ttl - msec_time()) / 1000.0\n return -1\n\n @synchronized\n def expire(self, key, ttl):\n self._ttl[self.prefix + key] = msec_time() + int(ttl * 1000.0)\n\n @synchronized\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n self._clean_expired()\n self._clean_oversized()\n self.expire(key, ttl)\n item_key = self.prefix + key\n self._store[item_key] = value\n self._created_at[item_key] = msec_time()\n self._quota += size_of(item_key, value)\n\n @synchronized\n def get(self, key):\n self._clean_expired()\n return self._store.get(self.prefix + key)\n\n @synchronized\n def delete(self, key):\n key = self.prefix + key\n self._ttl.pop(key, None)\n self._created_at.pop(key, None)\n value = self._store.pop(key, None)\n self._quota -= size_of(key, value)\n return value\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n return Cache(cache_prefix=settings['cache_prefix'],\n cache_max_size_bytes=settings['cache_max_size_bytes'])\n\n\ndef size_of(key, value):\n # Key used for ttl, created_at and store.\n # Int size is 24 bytes one for ttl and one for created_at values\n return len(key) * 3 + len(str(value)) + 24 * 2\n", "path": "kinto/core/cache/memory.py"}, {"content": "import logging\nimport os\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from '{}'\".format(sql_file))\n return\n\n # Since called outside request, force commit.\n with open(sql_file) as f:\n schema = f.read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n\n query = \"\"\"\n INSERT INTO cache (key, value, ttl)\n VALUES (:key, :value, sec2ttl(:ttl))\n ON CONFLICT (key) DO UPDATE\n SET value = :value,\n ttl = sec2ttl(:ttl);\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key RETURNING value;\"\n with self.client.connect() as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n return None\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}]}
2,844
279
gh_patches_debug_7874
rasdani/github-patches
git_diff
google__openhtf-239
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error Attaching Wav Files using test.AttachFromFile(path_to_wav) results in UnicodeDecodeError: 'utf8' codec can't decode byte 0xa4 in position 5: invalid start byte --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openhtf/exe/phase_data.py` Content: ``` 1 # Copyright 2014 Google Inc. All Rights Reserved. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 """Module encapsulating test phase data. 17 18 HTF tests are comprised of a series of test phases. These test phases are 19 wrapped in openhtf.PhaseInfo objects to keep track of some necessary 20 state. This wrapping happens by decorating a method with any of various 21 supported decorators. 22 """ 23 24 import contextlib2 25 import copy 26 import logging 27 import mimetypes 28 29 import mutablerecords 30 31 from openhtf import util 32 from openhtf.io import test_record 33 from openhtf.util import measurements 34 35 _LOG = logging.getLogger(__name__) 36 37 38 class DuplicateAttachmentError(Exception): 39 """Raised when two attachments are attached with the same name.""" 40 41 42 class OutcomeWrapper(mutablerecords.Record( 43 'OutcomeWrapper', [], {'outcome': None})): 44 """Wrapper so yielded object can receive an outcome.""" 45 def SetOutcome(self, outcome): 46 self.outcome = outcome 47 48 49 class PhaseData(object): # pylint: disable=too-many-instance-attributes 50 """The phase data object passed to test phases as the first argument. 51 52 Fields: 53 logger: A python logger that goes to the testrun proto, with functions like 54 debug, info, warn, error, and exception. 55 state: A dictionary for passing state data along to future phases. 56 plug_map: Dict mapping plug types to instances to use in phases. 57 measurements: A measurements.Collection for setting measurement values. 58 context: A contextlib.ExitStack, which simplifies context managers in a 59 phase. This stack is pop'd after each phase. 60 test_record: The test_record.TestRecord for the currently running test. 61 """ 62 def __init__(self, logger, plug_map, record): 63 self.logger = logger 64 self.plug_map = plug_map 65 self.test_record = record 66 self.state = {} 67 self.measurements = None # Will be populated per-phase. 68 self.attachments = {} 69 self.context = contextlib2.ExitStack() 70 71 def _asdict(self): 72 """Return a dict of this PhaseData's public data.""" 73 return {'measurements': self.measurements, 74 'attachments': self.attachments.keys(), 75 'plugs': { 76 k.__module__ + '.' + k.__name__: str(v) 77 for k, v in self.plug_map.iteritems()}} 78 79 def Attach(self, name, data, mimetype=None): 80 """Store the given data as an attachment with the given name. 81 82 Args: 83 name: Attachment name under which to store this data. 84 data: Data to attach. 85 mimetype: If provided, will be saved in the attachment. 86 87 Raises: 88 DuplicateAttachmentError: Raised if there is already an attachment with 89 the given name. 90 """ 91 if name in self.attachments: 92 raise DuplicateAttachmentError('Duplicate attachment for %s' % name) 93 if mimetype and not mimetypes.guess_extension(mimetype): 94 _LOG.warning('Unrecognized MIME type: "%s" for attachment "%s"', 95 mimetype, name) 96 self.attachments[name] = test_record.Attachment(data, mimetype) 97 98 def AttachFromFile(self, filename, name=None, mimetype=None): 99 """Store the contents of the given filename as an attachment. 100 101 Args: 102 filename: The file to read data from to attach. 103 name: If provided, override the attachment name, otherwise it will 104 default to the filename. 105 mimetype: If provided, override the attachment mime type, otherwise the 106 mime type will be guessed based on the file extension. 107 108 Raises: 109 DuplicateAttachmentError: Raised if there is already an attachment with 110 the given name. 111 IOError: Raised if the given filename couldn't be opened. 112 """ 113 with open(filename, 'r') as f: # pylint: disable=invalid-name 114 self.Attach( 115 name if name is not None else filename, f.read(), 116 mimetype=mimetype if mimetype is not None else mimetypes.guess_type( 117 filename)[0]) 118 119 @contextlib2.contextmanager 120 def RecordPhaseTiming(self, phase, test_state): 121 """Context manager for the execution of a single phase.""" 122 123 # Check for measurement descriptors and track them in the PhaseRecord. 124 measurement_map = { 125 measurement.name: copy.deepcopy(measurement) 126 for measurement in phase.measurements 127 } 128 129 # Populate dummy declaration list for frontend API. 130 test_state.running_phase_record.measurements = { 131 measurement.name: measurement._asdict() 132 for measurement in measurement_map.itervalues() 133 } 134 test_state.phase_data.measurements = ( 135 measurements.Collection(measurement_map)) 136 test_state.phase_data.attachments = {} 137 test_state.running_phase_record.start_time_millis = util.TimeMillis() 138 139 outcome_wrapper = OutcomeWrapper() 140 141 try: 142 yield outcome_wrapper 143 finally: 144 # Serialize measurements and measured values, validate as we go. 145 values = dict(test_state.phase_data.measurements) 146 147 # Initialize with already-validated and UNSET measurements. 148 validated_measurements = { 149 name: measurement for name, measurement in measurement_map.iteritems() 150 if measurement.outcome is not measurements.Outcome.PARTIALLY_SET 151 } 152 153 # Validate multi-dimensional measurements now that we have all values. 154 validated_measurements.update({ 155 name: measurement.Validate(values[name]) 156 for name, measurement in measurement_map.iteritems() 157 if measurement.outcome is measurements.Outcome.PARTIALLY_SET 158 }) 159 160 # Fill out and append the PhaseRecord to our test_record. 161 test_state.running_phase_record.measured_values = values 162 test_state.running_phase_record.measurements = validated_measurements 163 test_state.running_phase_record.end_time_millis = util.TimeMillis() 164 test_state.running_phase_record.result = outcome_wrapper.outcome 165 test_state.running_phase_record.attachments.update(self.attachments) 166 self.test_record.phases.append(test_state.running_phase_record) 167 168 # Clear these between uses for the frontend API. 169 test_state.phase_data.measurements = None 170 test_state.phase_data.attachments = {} 171 test_state.running_phase_record = None 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openhtf/exe/phase_data.py b/openhtf/exe/phase_data.py --- a/openhtf/exe/phase_data.py +++ b/openhtf/exe/phase_data.py @@ -110,7 +110,7 @@ the given name. IOError: Raised if the given filename couldn't be opened. """ - with open(filename, 'r') as f: # pylint: disable=invalid-name + with open(filename, 'rb') as f: # pylint: disable=invalid-name self.Attach( name if name is not None else filename, f.read(), mimetype=mimetype if mimetype is not None else mimetypes.guess_type(
{"golden_diff": "diff --git a/openhtf/exe/phase_data.py b/openhtf/exe/phase_data.py\n--- a/openhtf/exe/phase_data.py\n+++ b/openhtf/exe/phase_data.py\n@@ -110,7 +110,7 @@\n the given name.\n IOError: Raised if the given filename couldn't be opened.\n \"\"\"\n- with open(filename, 'r') as f: # pylint: disable=invalid-name\n+ with open(filename, 'rb') as f: # pylint: disable=invalid-name\n self.Attach(\n name if name is not None else filename, f.read(),\n mimetype=mimetype if mimetype is not None else mimetypes.guess_type(\n", "issue": "Error Attaching Wav Files\nusing test.AttachFromFile(path_to_wav) results in UnicodeDecodeError: 'utf8' codec can't decode byte 0xa4 in position 5: invalid start byte\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module encapsulating test phase data.\n\nHTF tests are comprised of a series of test phases. These test phases are\nwrapped in openhtf.PhaseInfo objects to keep track of some necessary\nstate. This wrapping happens by decorating a method with any of various\nsupported decorators.\n\"\"\"\n\nimport contextlib2\nimport copy\nimport logging\nimport mimetypes\n\nimport mutablerecords\n\nfrom openhtf import util\nfrom openhtf.io import test_record\nfrom openhtf.util import measurements\n\n_LOG = logging.getLogger(__name__)\n\n\nclass DuplicateAttachmentError(Exception):\n \"\"\"Raised when two attachments are attached with the same name.\"\"\"\n\n\nclass OutcomeWrapper(mutablerecords.Record(\n 'OutcomeWrapper', [], {'outcome': None})):\n \"\"\"Wrapper so yielded object can receive an outcome.\"\"\"\n def SetOutcome(self, outcome):\n self.outcome = outcome\n\n\nclass PhaseData(object): # pylint: disable=too-many-instance-attributes\n \"\"\"The phase data object passed to test phases as the first argument.\n\n Fields:\n logger: A python logger that goes to the testrun proto, with functions like\n debug, info, warn, error, and exception.\n state: A dictionary for passing state data along to future phases.\n plug_map: Dict mapping plug types to instances to use in phases.\n measurements: A measurements.Collection for setting measurement values.\n context: A contextlib.ExitStack, which simplifies context managers in a\n phase. This stack is pop'd after each phase.\n test_record: The test_record.TestRecord for the currently running test.\n \"\"\"\n def __init__(self, logger, plug_map, record):\n self.logger = logger\n self.plug_map = plug_map\n self.test_record = record\n self.state = {}\n self.measurements = None # Will be populated per-phase.\n self.attachments = {}\n self.context = contextlib2.ExitStack()\n\n def _asdict(self):\n \"\"\"Return a dict of this PhaseData's public data.\"\"\"\n return {'measurements': self.measurements,\n 'attachments': self.attachments.keys(),\n 'plugs': {\n k.__module__ + '.' + k.__name__: str(v)\n for k, v in self.plug_map.iteritems()}}\n\n def Attach(self, name, data, mimetype=None):\n \"\"\"Store the given data as an attachment with the given name.\n\n Args:\n name: Attachment name under which to store this data.\n data: Data to attach.\n mimetype: If provided, will be saved in the attachment.\n\n Raises:\n DuplicateAttachmentError: Raised if there is already an attachment with\n the given name.\n \"\"\"\n if name in self.attachments:\n raise DuplicateAttachmentError('Duplicate attachment for %s' % name)\n if mimetype and not mimetypes.guess_extension(mimetype):\n _LOG.warning('Unrecognized MIME type: \"%s\" for attachment \"%s\"',\n mimetype, name)\n self.attachments[name] = test_record.Attachment(data, mimetype)\n\n def AttachFromFile(self, filename, name=None, mimetype=None):\n \"\"\"Store the contents of the given filename as an attachment.\n\n Args:\n filename: The file to read data from to attach.\n name: If provided, override the attachment name, otherwise it will\n default to the filename.\n mimetype: If provided, override the attachment mime type, otherwise the\n mime type will be guessed based on the file extension.\n\n Raises:\n DuplicateAttachmentError: Raised if there is already an attachment with\n the given name.\n IOError: Raised if the given filename couldn't be opened.\n \"\"\"\n with open(filename, 'r') as f: # pylint: disable=invalid-name\n self.Attach(\n name if name is not None else filename, f.read(),\n mimetype=mimetype if mimetype is not None else mimetypes.guess_type(\n filename)[0])\n\n @contextlib2.contextmanager\n def RecordPhaseTiming(self, phase, test_state):\n \"\"\"Context manager for the execution of a single phase.\"\"\"\n\n # Check for measurement descriptors and track them in the PhaseRecord.\n measurement_map = {\n measurement.name: copy.deepcopy(measurement)\n for measurement in phase.measurements\n }\n\n # Populate dummy declaration list for frontend API.\n test_state.running_phase_record.measurements = {\n measurement.name: measurement._asdict()\n for measurement in measurement_map.itervalues()\n }\n test_state.phase_data.measurements = (\n measurements.Collection(measurement_map))\n test_state.phase_data.attachments = {}\n test_state.running_phase_record.start_time_millis = util.TimeMillis()\n\n outcome_wrapper = OutcomeWrapper()\n\n try:\n yield outcome_wrapper\n finally:\n # Serialize measurements and measured values, validate as we go.\n values = dict(test_state.phase_data.measurements)\n\n # Initialize with already-validated and UNSET measurements.\n validated_measurements = {\n name: measurement for name, measurement in measurement_map.iteritems()\n if measurement.outcome is not measurements.Outcome.PARTIALLY_SET\n }\n\n # Validate multi-dimensional measurements now that we have all values.\n validated_measurements.update({\n name: measurement.Validate(values[name])\n for name, measurement in measurement_map.iteritems()\n if measurement.outcome is measurements.Outcome.PARTIALLY_SET\n })\n\n # Fill out and append the PhaseRecord to our test_record.\n test_state.running_phase_record.measured_values = values\n test_state.running_phase_record.measurements = validated_measurements\n test_state.running_phase_record.end_time_millis = util.TimeMillis()\n test_state.running_phase_record.result = outcome_wrapper.outcome\n test_state.running_phase_record.attachments.update(self.attachments)\n self.test_record.phases.append(test_state.running_phase_record)\n\n # Clear these between uses for the frontend API.\n test_state.phase_data.measurements = None\n test_state.phase_data.attachments = {}\n test_state.running_phase_record = None\n", "path": "openhtf/exe/phase_data.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module encapsulating test phase data.\n\nHTF tests are comprised of a series of test phases. These test phases are\nwrapped in openhtf.PhaseInfo objects to keep track of some necessary\nstate. This wrapping happens by decorating a method with any of various\nsupported decorators.\n\"\"\"\n\nimport contextlib2\nimport copy\nimport logging\nimport mimetypes\n\nimport mutablerecords\n\nfrom openhtf import util\nfrom openhtf.io import test_record\nfrom openhtf.util import measurements\n\n_LOG = logging.getLogger(__name__)\n\n\nclass DuplicateAttachmentError(Exception):\n \"\"\"Raised when two attachments are attached with the same name.\"\"\"\n\n\nclass OutcomeWrapper(mutablerecords.Record(\n 'OutcomeWrapper', [], {'outcome': None})):\n \"\"\"Wrapper so yielded object can receive an outcome.\"\"\"\n def SetOutcome(self, outcome):\n self.outcome = outcome\n\n\nclass PhaseData(object): # pylint: disable=too-many-instance-attributes\n \"\"\"The phase data object passed to test phases as the first argument.\n\n Fields:\n logger: A python logger that goes to the testrun proto, with functions like\n debug, info, warn, error, and exception.\n state: A dictionary for passing state data along to future phases.\n plug_map: Dict mapping plug types to instances to use in phases.\n measurements: A measurements.Collection for setting measurement values.\n context: A contextlib.ExitStack, which simplifies context managers in a\n phase. This stack is pop'd after each phase.\n test_record: The test_record.TestRecord for the currently running test.\n \"\"\"\n def __init__(self, logger, plug_map, record):\n self.logger = logger\n self.plug_map = plug_map\n self.test_record = record\n self.state = {}\n self.measurements = None # Will be populated per-phase.\n self.attachments = {}\n self.context = contextlib2.ExitStack()\n\n def _asdict(self):\n \"\"\"Return a dict of this PhaseData's public data.\"\"\"\n return {'measurements': self.measurements,\n 'attachments': self.attachments.keys(),\n 'plugs': {\n k.__module__ + '.' + k.__name__: str(v)\n for k, v in self.plug_map.iteritems()}}\n\n def Attach(self, name, data, mimetype=None):\n \"\"\"Store the given data as an attachment with the given name.\n\n Args:\n name: Attachment name under which to store this data.\n data: Data to attach.\n mimetype: If provided, will be saved in the attachment.\n\n Raises:\n DuplicateAttachmentError: Raised if there is already an attachment with\n the given name.\n \"\"\"\n if name in self.attachments:\n raise DuplicateAttachmentError('Duplicate attachment for %s' % name)\n if mimetype and not mimetypes.guess_extension(mimetype):\n _LOG.warning('Unrecognized MIME type: \"%s\" for attachment \"%s\"',\n mimetype, name)\n self.attachments[name] = test_record.Attachment(data, mimetype)\n\n def AttachFromFile(self, filename, name=None, mimetype=None):\n \"\"\"Store the contents of the given filename as an attachment.\n\n Args:\n filename: The file to read data from to attach.\n name: If provided, override the attachment name, otherwise it will\n default to the filename.\n mimetype: If provided, override the attachment mime type, otherwise the\n mime type will be guessed based on the file extension.\n\n Raises:\n DuplicateAttachmentError: Raised if there is already an attachment with\n the given name.\n IOError: Raised if the given filename couldn't be opened.\n \"\"\"\n with open(filename, 'rb') as f: # pylint: disable=invalid-name\n self.Attach(\n name if name is not None else filename, f.read(),\n mimetype=mimetype if mimetype is not None else mimetypes.guess_type(\n filename)[0])\n\n @contextlib2.contextmanager\n def RecordPhaseTiming(self, phase, test_state):\n \"\"\"Context manager for the execution of a single phase.\"\"\"\n\n # Check for measurement descriptors and track them in the PhaseRecord.\n measurement_map = {\n measurement.name: copy.deepcopy(measurement)\n for measurement in phase.measurements\n }\n\n # Populate dummy declaration list for frontend API.\n test_state.running_phase_record.measurements = {\n measurement.name: measurement._asdict()\n for measurement in measurement_map.itervalues()\n }\n test_state.phase_data.measurements = (\n measurements.Collection(measurement_map))\n test_state.phase_data.attachments = {}\n test_state.running_phase_record.start_time_millis = util.TimeMillis()\n\n outcome_wrapper = OutcomeWrapper()\n\n try:\n yield outcome_wrapper\n finally:\n # Serialize measurements and measured values, validate as we go.\n values = dict(test_state.phase_data.measurements)\n\n # Initialize with already-validated and UNSET measurements.\n validated_measurements = {\n name: measurement for name, measurement in measurement_map.iteritems()\n if measurement.outcome is not measurements.Outcome.PARTIALLY_SET\n }\n\n # Validate multi-dimensional measurements now that we have all values.\n validated_measurements.update({\n name: measurement.Validate(values[name])\n for name, measurement in measurement_map.iteritems()\n if measurement.outcome is measurements.Outcome.PARTIALLY_SET\n })\n\n # Fill out and append the PhaseRecord to our test_record.\n test_state.running_phase_record.measured_values = values\n test_state.running_phase_record.measurements = validated_measurements\n test_state.running_phase_record.end_time_millis = util.TimeMillis()\n test_state.running_phase_record.result = outcome_wrapper.outcome\n test_state.running_phase_record.attachments.update(self.attachments)\n self.test_record.phases.append(test_state.running_phase_record)\n\n # Clear these between uses for the frontend API.\n test_state.phase_data.measurements = None\n test_state.phase_data.attachments = {}\n test_state.running_phase_record = None\n", "path": "openhtf/exe/phase_data.py"}]}
2,127
157
gh_patches_debug_20192
rasdani/github-patches
git_diff
certbot__certbot-7163
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update SSL session cache size to match Mozilla recommendations This is a followup from the research issue at #6903. Ideally, https://github.com/mozilla/server-side-tls/issues/198 is resolved and Mozilla updates their recommendations. If not, I think we should update our value in https://github.com/certbot/certbot/blob/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf. Exactly what these values should be is up for discussion, however, nginx's default timeout of 5 minutes seems like a reasonable place to start to me. I don't know of the top of my head how I think the cache should be configured. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `certbot-nginx/certbot_nginx/constants.py` Content: ``` 1 """nginx plugin constants.""" 2 import platform 3 4 FREEBSD_DARWIN_SERVER_ROOT = "/usr/local/etc/nginx" 5 LINUX_SERVER_ROOT = "/etc/nginx" 6 7 if platform.system() in ('FreeBSD', 'Darwin'): 8 server_root_tmp = FREEBSD_DARWIN_SERVER_ROOT 9 else: 10 server_root_tmp = LINUX_SERVER_ROOT 11 12 CLI_DEFAULTS = dict( 13 server_root=server_root_tmp, 14 ctl="nginx", 15 ) 16 """CLI defaults.""" 17 18 19 MOD_SSL_CONF_DEST = "options-ssl-nginx.conf" 20 """Name of the mod_ssl config file as saved in `IConfig.config_dir`.""" 21 22 UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-nginx-conf-digest.txt" 23 """Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`.""" 24 25 SSL_OPTIONS_HASHES_NEW = [ 26 '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf', 27 ] 28 """SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9""" 29 30 ALL_SSL_OPTIONS_HASHES = [ 31 '0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c', 32 '9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e', 33 'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937', 34 '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff', 35 '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d', 36 '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16', 37 ] + SSL_OPTIONS_HASHES_NEW 38 """SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC""" 39 40 def os_constant(key): 41 # XXX TODO: In the future, this could return different constants 42 # based on what OS we are running under. To see an 43 # approach to how to handle different OSes, see the 44 # apache version of this file. Currently, we do not 45 # actually have any OS-specific constants on Nginx. 46 """ 47 Get a constant value for operating system 48 49 :param key: name of cli constant 50 :return: value of constant for active os 51 """ 52 return CLI_DEFAULTS[key] 53 54 HSTS_ARGS = ['\"max-age=31536000\"', ' ', 'always'] 55 56 HEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS} 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/certbot-nginx/certbot_nginx/constants.py b/certbot-nginx/certbot_nginx/constants.py --- a/certbot-nginx/certbot_nginx/constants.py +++ b/certbot-nginx/certbot_nginx/constants.py @@ -24,6 +24,7 @@ SSL_OPTIONS_HASHES_NEW = [ '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf', + '2901debc7ecbc10917edd9084c05464c9c5930b463677571eaf8c94bffd11ae2', ] """SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9""" @@ -34,6 +35,7 @@ '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff', '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d', '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16', + 'c052ffff0ad683f43bffe105f7c606b339536163490930e2632a335c8d191cc4', ] + SSL_OPTIONS_HASHES_NEW """SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC"""
{"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/constants.py b/certbot-nginx/certbot_nginx/constants.py\n--- a/certbot-nginx/certbot_nginx/constants.py\n+++ b/certbot-nginx/certbot_nginx/constants.py\n@@ -24,6 +24,7 @@\n \n SSL_OPTIONS_HASHES_NEW = [\n '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',\n+ '2901debc7ecbc10917edd9084c05464c9c5930b463677571eaf8c94bffd11ae2',\n ]\n \"\"\"SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9\"\"\"\n \n@@ -34,6 +35,7 @@\n '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',\n '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',\n '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',\n+ 'c052ffff0ad683f43bffe105f7c606b339536163490930e2632a335c8d191cc4',\n ] + SSL_OPTIONS_HASHES_NEW\n \"\"\"SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC\"\"\"\n", "issue": "Update SSL session cache size to match Mozilla recommendations\nThis is a followup from the research issue at #6903.\r\n\r\nIdeally, https://github.com/mozilla/server-side-tls/issues/198 is resolved and Mozilla updates their recommendations. If not, I think we should update our value in https://github.com/certbot/certbot/blob/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf.\r\n\r\nExactly what these values should be is up for discussion, however, nginx's default timeout of 5 minutes seems like a reasonable place to start to me. I don't know of the top of my head how I think the cache should be configured.\n", "before_files": [{"content": "\"\"\"nginx plugin constants.\"\"\"\nimport platform\n\nFREEBSD_DARWIN_SERVER_ROOT = \"/usr/local/etc/nginx\"\nLINUX_SERVER_ROOT = \"/etc/nginx\"\n\nif platform.system() in ('FreeBSD', 'Darwin'):\n server_root_tmp = FREEBSD_DARWIN_SERVER_ROOT\nelse:\n server_root_tmp = LINUX_SERVER_ROOT\n\nCLI_DEFAULTS = dict(\n server_root=server_root_tmp,\n ctl=\"nginx\",\n)\n\"\"\"CLI defaults.\"\"\"\n\n\nMOD_SSL_CONF_DEST = \"options-ssl-nginx.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nUPDATED_MOD_SSL_CONF_DIGEST = \".updated-options-ssl-nginx-conf-digest.txt\"\n\"\"\"Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_OPTIONS_HASHES_NEW = [\n '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',\n]\n\"\"\"SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9\"\"\"\n\nALL_SSL_OPTIONS_HASHES = [\n '0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',\n '9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',\n 'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',\n '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',\n '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',\n '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',\n] + SSL_OPTIONS_HASHES_NEW\n\"\"\"SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC\"\"\"\n\ndef os_constant(key):\n # XXX TODO: In the future, this could return different constants\n # based on what OS we are running under. To see an\n # approach to how to handle different OSes, see the\n # apache version of this file. Currently, we do not\n # actually have any OS-specific constants on Nginx.\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n return CLI_DEFAULTS[key]\n\nHSTS_ARGS = ['\\\"max-age=31536000\\\"', ' ', 'always']\n\nHEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}\n", "path": "certbot-nginx/certbot_nginx/constants.py"}], "after_files": [{"content": "\"\"\"nginx plugin constants.\"\"\"\nimport platform\n\nFREEBSD_DARWIN_SERVER_ROOT = \"/usr/local/etc/nginx\"\nLINUX_SERVER_ROOT = \"/etc/nginx\"\n\nif platform.system() in ('FreeBSD', 'Darwin'):\n server_root_tmp = FREEBSD_DARWIN_SERVER_ROOT\nelse:\n server_root_tmp = LINUX_SERVER_ROOT\n\nCLI_DEFAULTS = dict(\n server_root=server_root_tmp,\n ctl=\"nginx\",\n)\n\"\"\"CLI defaults.\"\"\"\n\n\nMOD_SSL_CONF_DEST = \"options-ssl-nginx.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nUPDATED_MOD_SSL_CONF_DIGEST = \".updated-options-ssl-nginx-conf-digest.txt\"\n\"\"\"Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_OPTIONS_HASHES_NEW = [\n '63e2bddebb174a05c9d8a7cf2adf72f7af04349ba59a1a925fe447f73b2f1abf',\n '2901debc7ecbc10917edd9084c05464c9c5930b463677571eaf8c94bffd11ae2',\n]\n\"\"\"SHA256 hashes of the contents of versions of MOD_SSL_CONF_SRC for nginx >= 1.5.9\"\"\"\n\nALL_SSL_OPTIONS_HASHES = [\n '0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',\n '9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',\n 'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',\n '7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',\n '394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',\n '4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',\n 'c052ffff0ad683f43bffe105f7c606b339536163490930e2632a335c8d191cc4',\n] + SSL_OPTIONS_HASHES_NEW\n\"\"\"SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC\"\"\"\n\ndef os_constant(key):\n # XXX TODO: In the future, this could return different constants\n # based on what OS we are running under. To see an\n # approach to how to handle different OSes, see the\n # apache version of this file. Currently, we do not\n # actually have any OS-specific constants on Nginx.\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n return CLI_DEFAULTS[key]\n\nHSTS_ARGS = ['\\\"max-age=31536000\\\"', ' ', 'always']\n\nHEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}\n", "path": "certbot-nginx/certbot_nginx/constants.py"}]}
1,331
497
gh_patches_debug_10834
rasdani/github-patches
git_diff
getredash__redash-6561
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The 'Create your first Dashboard' newbie link will not dispear even I create dashboards ### Issue Summary The 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits. ### Steps to Reproduce 1. Create new dashboards. 2. The link still there. <img width="280" alt="image" src="https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704"> ### Technical details: * Redash Version: 23.10.0-dev (dev) * Browser/OS: Chrome 118 * How did you install Redash: Docker The 'Create your first Dashboard' newbie link will not dispear even I create dashboards ### Issue Summary The 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits. ### Steps to Reproduce 1. Create new dashboards. 2. The link still there. <img width="280" alt="image" src="https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704"> ### Technical details: * Redash Version: 23.10.0-dev (dev) * Browser/OS: Chrome 118 * How did you install Redash: Docker --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/handlers/organization.py` Content: ``` 1 from flask_login import current_user, login_required 2 3 from redash import models 4 from redash.authentication import current_org 5 from redash.handlers import routes 6 from redash.handlers.base import json_response, org_scoped_rule 7 8 9 @routes.route(org_scoped_rule("/api/organization/status"), methods=["GET"]) 10 @login_required 11 def organization_status(org_slug=None): 12 counters = { 13 "users": models.User.all(current_org).count(), 14 "alerts": models.Alert.all(group_ids=current_user.group_ids).count(), 15 "data_sources": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(), 16 "queries": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(), 17 "dashboards": models.Dashboard.query.filter( 18 models.Dashboard.org == current_org, models.Dashboard.is_archived is False 19 ).count(), 20 } 21 22 return json_response(dict(object_counters=counters)) 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/handlers/organization.py b/redash/handlers/organization.py --- a/redash/handlers/organization.py +++ b/redash/handlers/organization.py @@ -15,7 +15,7 @@ "data_sources": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(), "queries": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(), "dashboards": models.Dashboard.query.filter( - models.Dashboard.org == current_org, models.Dashboard.is_archived is False + models.Dashboard.org == current_org, models.Dashboard.is_archived.is_(False) ).count(), }
{"golden_diff": "diff --git a/redash/handlers/organization.py b/redash/handlers/organization.py\n--- a/redash/handlers/organization.py\n+++ b/redash/handlers/organization.py\n@@ -15,7 +15,7 @@\n \"data_sources\": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),\n \"queries\": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),\n \"dashboards\": models.Dashboard.query.filter(\n- models.Dashboard.org == current_org, models.Dashboard.is_archived is False\n+ models.Dashboard.org == current_org, models.Dashboard.is_archived.is_(False)\n ).count(),\n }\n", "issue": "The 'Create your first Dashboard' newbie link will not dispear even I create dashboards\n### Issue Summary\r\n\r\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new dashboards.\r\n2. The link still there.\r\n\r\n<img width=\"280\" alt=\"image\" src=\"https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704\">\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 23.10.0-dev (dev)\r\n* Browser/OS: Chrome 118\r\n* How did you install Redash: Docker\r\n\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards\n### Issue Summary\r\n\r\nThe 'Create your first Dashboard' newbie link will not dispear even I create dashboards. Other newbie link works fine. I tried a completely new Redash instance, this issue still exists. I remember there is a commit related to the newbie link recently, but I cannot find which. This issue does not exists in the previous Docker preview image, so I assume that it should be related to recent commits.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new dashboards.\r\n2. The link still there.\r\n\r\n<img width=\"280\" alt=\"image\" src=\"https://github.com/getredash/redash/assets/8188177/19555165-b2df-4b07-89cf-7443858ca704\">\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 23.10.0-dev (dev)\r\n* Browser/OS: Chrome 118\r\n* How did you install Redash: Docker\r\n\n", "before_files": [{"content": "from flask_login import current_user, login_required\n\nfrom redash import models\nfrom redash.authentication import current_org\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\n\n\[email protected](org_scoped_rule(\"/api/organization/status\"), methods=[\"GET\"])\n@login_required\ndef organization_status(org_slug=None):\n counters = {\n \"users\": models.User.all(current_org).count(),\n \"alerts\": models.Alert.all(group_ids=current_user.group_ids).count(),\n \"data_sources\": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),\n \"queries\": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),\n \"dashboards\": models.Dashboard.query.filter(\n models.Dashboard.org == current_org, models.Dashboard.is_archived is False\n ).count(),\n }\n\n return json_response(dict(object_counters=counters))\n", "path": "redash/handlers/organization.py"}], "after_files": [{"content": "from flask_login import current_user, login_required\n\nfrom redash import models\nfrom redash.authentication import current_org\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response, org_scoped_rule\n\n\[email protected](org_scoped_rule(\"/api/organization/status\"), methods=[\"GET\"])\n@login_required\ndef organization_status(org_slug=None):\n counters = {\n \"users\": models.User.all(current_org).count(),\n \"alerts\": models.Alert.all(group_ids=current_user.group_ids).count(),\n \"data_sources\": models.DataSource.all(current_org, group_ids=current_user.group_ids).count(),\n \"queries\": models.Query.all_queries(current_user.group_ids, current_user.id, include_drafts=True).count(),\n \"dashboards\": models.Dashboard.query.filter(\n models.Dashboard.org == current_org, models.Dashboard.is_archived.is_(False)\n ).count(),\n }\n\n return json_response(dict(object_counters=counters))\n", "path": "redash/handlers/organization.py"}]}
962
156
gh_patches_debug_8009
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2364
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Custom callable is ignored in retrieval metric aggregation ## 🐛 Bug When passing a custom aggregation to any `RetrievalMetric`, the callable is ignored. There is a bug in the retrieval aggregation function that defaults to using the maximum of the values by default https://github.com/Lightning-AI/torchmetrics/blob/1a2f406da17a01c13ce1add83835e097460cbce3/src/torchmetrics/retrieval/base.py#L38 ### To Reproduce The following custom aggregation should output the tensor of values without aggregation but returns 1, the max of the values. ``` >>> from torchmetrics.retrieval import RetrievalNormalizedDCG >>> indexes = tensor([0, 0, 0, 1, 1, 1, 1]) >>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2]) >>> target = tensor([False, False, True, False, True, False, True]) >>> ndcg = RetrievalNormalizedDCG(aggregation=lambda x, dim: x) >>> print(ndcg(preds, target, indexes=indexes)) tensor(1.) >>> ndcg = RetrievalNormalizedDCG(aggregation="max") >>> print(ndcg(preds, target, indexes=indexes)) tensor(1.) >>> ndcg = RetrievalNormalizedDCG() >>> print(ndcg(preds, target, indexes=indexes)) tensor(0.8467) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/retrieval/base.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from abc import ABC, abstractmethod 15 from typing import Any, Callable, List, Optional, Union 16 17 import torch 18 from torch import Tensor, tensor 19 from typing_extensions import Literal 20 21 from torchmetrics import Metric 22 from torchmetrics.utilities.checks import _check_retrieval_inputs 23 from torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat 24 25 26 def _retrieval_aggregate( 27 values: Tensor, 28 aggregation: Union[Literal["mean", "median", "min", "max"], Callable] = "mean", 29 dim: Optional[int] = None, 30 ) -> Tensor: 31 """Aggregate the final retrieval values into a single value.""" 32 if aggregation == "mean": 33 return values.mean() if dim is None else values.mean(dim=dim) 34 if aggregation == "median": 35 return values.median() if dim is None else values.median(dim=dim).values 36 if aggregation == "min": 37 return values.min() if dim is None else values.min(dim=dim).values 38 if aggregation: 39 return values.max() if dim is None else values.max(dim=dim).values 40 return aggregation(values, dim=dim) 41 42 43 class RetrievalMetric(Metric, ABC): 44 """Works with binary target data. Accepts float predictions from a model output. 45 46 As input to ``forward`` and ``update`` the metric accepts the following input: 47 48 - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` 49 - ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)`` 50 - ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a 51 prediction belongs 52 53 .. note:: ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten 54 to single dimension once provided. 55 56 .. note:: 57 Predictions will be first grouped by ``indexes`` and then the real metric, defined by overriding 58 the `_metric` method, will be computed as the mean of the scores over each query. 59 60 As output to ``forward`` and ``compute`` the metric returns the following output: 61 62 - ``metric`` (:class:`~torch.Tensor`): A tensor as computed by ``_metric`` if the number of positive targets is 63 at least 1, otherwise behave as specified by ``self.empty_target_action``. 64 65 Args: 66 empty_target_action: 67 Specify what to do with queries that do not have at least a positive 68 or negative (depend on metric) target. Choose from: 69 70 - ``'neg'``: those queries count as ``0.0`` (default) 71 - ``'pos'``: those queries count as ``1.0`` 72 - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned 73 - ``'error'``: raise a ``ValueError`` 74 75 ignore_index: 76 Ignore predictions where the target is equal to this number. 77 aggregation: 78 Specify how to aggregate over indexes. Can either a custom callable function that takes in a single tensor 79 and returns a scalar value or one of the following strings: 80 81 - ``'mean'``: average value is returned 82 - ``'median'``: median value is returned 83 - ``'max'``: max value is returned 84 - ``'min'``: min value is returned 85 86 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. 87 88 Raises: 89 ValueError: 90 If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``. 91 ValueError: 92 If ``ignore_index`` is not `None` or an integer. 93 94 """ 95 96 is_differentiable: bool = False 97 higher_is_better: bool = True 98 full_state_update: bool = False 99 100 indexes: List[Tensor] 101 preds: List[Tensor] 102 target: List[Tensor] 103 104 def __init__( 105 self, 106 empty_target_action: str = "neg", 107 ignore_index: Optional[int] = None, 108 aggregation: Union[Literal["mean", "median", "min", "max"], Callable] = "mean", 109 **kwargs: Any, 110 ) -> None: 111 super().__init__(**kwargs) 112 self.allow_non_binary_target = False 113 114 empty_target_action_options = ("error", "skip", "neg", "pos") 115 if empty_target_action not in empty_target_action_options: 116 raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.") 117 self.empty_target_action = empty_target_action 118 119 if ignore_index is not None and not isinstance(ignore_index, int): 120 raise ValueError("Argument `ignore_index` must be an integer or None.") 121 self.ignore_index = ignore_index 122 123 if not (aggregation in ("mean", "median", "min", "max") or callable(aggregation)): 124 raise ValueError( 125 "Argument `aggregation` must be one of `mean`, `median`, `min`, `max` or a custom callable function" 126 f"which takes tensor of values, but got {aggregation}." 127 ) 128 self.aggregation = aggregation 129 130 self.add_state("indexes", default=[], dist_reduce_fx=None) 131 self.add_state("preds", default=[], dist_reduce_fx=None) 132 self.add_state("target", default=[], dist_reduce_fx=None) 133 134 def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None: 135 """Check shape, check and convert dtypes, flatten and add to accumulators.""" 136 if indexes is None: 137 raise ValueError("Argument `indexes` cannot be None") 138 139 indexes, preds, target = _check_retrieval_inputs( 140 indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index 141 ) 142 143 self.indexes.append(indexes) 144 self.preds.append(preds) 145 self.target.append(target) 146 147 def compute(self) -> Tensor: 148 """First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists. 149 150 After that, compute list of groups that will help in keeping together predictions about the same query. Finally, 151 for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as 152 specified by ``self.empty_target_action``. 153 154 """ 155 indexes = dim_zero_cat(self.indexes) 156 preds = dim_zero_cat(self.preds) 157 target = dim_zero_cat(self.target) 158 159 indexes, indices = torch.sort(indexes) 160 preds = preds[indices] 161 target = target[indices] 162 163 split_sizes = _flexible_bincount(indexes).detach().cpu().tolist() 164 165 res = [] 166 for mini_preds, mini_target in zip( 167 torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0) 168 ): 169 if not mini_target.sum(): 170 if self.empty_target_action == "error": 171 raise ValueError("`compute` method was provided with a query with no positive target.") 172 if self.empty_target_action == "pos": 173 res.append(tensor(1.0)) 174 elif self.empty_target_action == "neg": 175 res.append(tensor(0.0)) 176 else: 177 # ensure list contains only float tensors 178 res.append(self._metric(mini_preds, mini_target)) 179 180 if res: 181 return _retrieval_aggregate(torch.stack([x.to(preds) for x in res]), self.aggregation) 182 return tensor(0.0).to(preds) 183 184 @abstractmethod 185 def _metric(self, preds: Tensor, target: Tensor) -> Tensor: 186 """Compute a metric over a predictions and target of a single group. 187 188 This method should be overridden by subclasses. 189 190 """ 191 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/retrieval/base.py b/src/torchmetrics/retrieval/base.py --- a/src/torchmetrics/retrieval/base.py +++ b/src/torchmetrics/retrieval/base.py @@ -35,7 +35,7 @@ return values.median() if dim is None else values.median(dim=dim).values if aggregation == "min": return values.min() if dim is None else values.min(dim=dim).values - if aggregation: + if aggregation == "max": return values.max() if dim is None else values.max(dim=dim).values return aggregation(values, dim=dim)
{"golden_diff": "diff --git a/src/torchmetrics/retrieval/base.py b/src/torchmetrics/retrieval/base.py\n--- a/src/torchmetrics/retrieval/base.py\n+++ b/src/torchmetrics/retrieval/base.py\n@@ -35,7 +35,7 @@\n return values.median() if dim is None else values.median(dim=dim).values\n if aggregation == \"min\":\n return values.min() if dim is None else values.min(dim=dim).values\n- if aggregation:\n+ if aggregation == \"max\":\n return values.max() if dim is None else values.max(dim=dim).values\n return aggregation(values, dim=dim)\n", "issue": "Custom callable is ignored in retrieval metric aggregation\n## \ud83d\udc1b Bug\r\n\r\nWhen passing a custom aggregation to any `RetrievalMetric`, the callable is ignored. There is a bug in the retrieval aggregation function that defaults to using the maximum of the values by default https://github.com/Lightning-AI/torchmetrics/blob/1a2f406da17a01c13ce1add83835e097460cbce3/src/torchmetrics/retrieval/base.py#L38\r\n\r\n### To Reproduce\r\n\r\nThe following custom aggregation should output the tensor of values without aggregation but returns 1, the max of the values.\r\n\r\n```\r\n>>> from torchmetrics.retrieval import RetrievalNormalizedDCG\r\n>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])\r\n>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])\r\n>>> target = tensor([False, False, True, False, True, False, True])\r\n>>> ndcg = RetrievalNormalizedDCG(aggregation=lambda x, dim: x)\r\n>>> print(ndcg(preds, target, indexes=indexes))\r\ntensor(1.)\r\n>>> ndcg = RetrievalNormalizedDCG(aggregation=\"max\")\r\n>>> print(ndcg(preds, target, indexes=indexes))\r\ntensor(1.)\r\n>>> ndcg = RetrievalNormalizedDCG()\r\n>>> print(ndcg(preds, target, indexes=indexes))\r\ntensor(0.8467)\r\n```\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, List, Optional, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities.checks import _check_retrieval_inputs\nfrom torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat\n\n\ndef _retrieval_aggregate(\n values: Tensor,\n aggregation: Union[Literal[\"mean\", \"median\", \"min\", \"max\"], Callable] = \"mean\",\n dim: Optional[int] = None,\n) -> Tensor:\n \"\"\"Aggregate the final retrieval values into a single value.\"\"\"\n if aggregation == \"mean\":\n return values.mean() if dim is None else values.mean(dim=dim)\n if aggregation == \"median\":\n return values.median() if dim is None else values.median(dim=dim).values\n if aggregation == \"min\":\n return values.min() if dim is None else values.min(dim=dim).values\n if aggregation:\n return values.max() if dim is None else values.max(dim=dim).values\n return aggregation(values, dim=dim)\n\n\nclass RetrievalMetric(Metric, ABC):\n \"\"\"Works with binary target data. Accepts float predictions from a model output.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``\n - ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``\n - ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a\n prediction belongs\n\n .. note:: ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten\n to single dimension once provided.\n\n .. note::\n Predictions will be first grouped by ``indexes`` and then the real metric, defined by overriding\n the `_metric` method, will be computed as the mean of the scores over each query.\n\n As output to ``forward`` and ``compute`` the metric returns the following output:\n\n - ``metric`` (:class:`~torch.Tensor`): A tensor as computed by ``_metric`` if the number of positive targets is\n at least 1, otherwise behave as specified by ``self.empty_target_action``.\n\n Args:\n empty_target_action:\n Specify what to do with queries that do not have at least a positive\n or negative (depend on metric) target. Choose from:\n\n - ``'neg'``: those queries count as ``0.0`` (default)\n - ``'pos'``: those queries count as ``1.0``\n - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned\n - ``'error'``: raise a ``ValueError``\n\n ignore_index:\n Ignore predictions where the target is equal to this number.\n aggregation:\n Specify how to aggregate over indexes. Can either a custom callable function that takes in a single tensor\n and returns a scalar value or one of the following strings:\n\n - ``'mean'``: average value is returned\n - ``'median'``: median value is returned\n - ``'max'``: max value is returned\n - ``'min'``: min value is returned\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.\n ValueError:\n If ``ignore_index`` is not `None` or an integer.\n\n \"\"\"\n\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = False\n\n indexes: List[Tensor]\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n empty_target_action: str = \"neg\",\n ignore_index: Optional[int] = None,\n aggregation: Union[Literal[\"mean\", \"median\", \"min\", \"max\"], Callable] = \"mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.allow_non_binary_target = False\n\n empty_target_action_options = (\"error\", \"skip\", \"neg\", \"pos\")\n if empty_target_action not in empty_target_action_options:\n raise ValueError(f\"Argument `empty_target_action` received a wrong value `{empty_target_action}`.\")\n self.empty_target_action = empty_target_action\n\n if ignore_index is not None and not isinstance(ignore_index, int):\n raise ValueError(\"Argument `ignore_index` must be an integer or None.\")\n self.ignore_index = ignore_index\n\n if not (aggregation in (\"mean\", \"median\", \"min\", \"max\") or callable(aggregation)):\n raise ValueError(\n \"Argument `aggregation` must be one of `mean`, `median`, `min`, `max` or a custom callable function\"\n f\"which takes tensor of values, but got {aggregation}.\"\n )\n self.aggregation = aggregation\n\n self.add_state(\"indexes\", default=[], dist_reduce_fx=None)\n self.add_state(\"preds\", default=[], dist_reduce_fx=None)\n self.add_state(\"target\", default=[], dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:\n \"\"\"Check shape, check and convert dtypes, flatten and add to accumulators.\"\"\"\n if indexes is None:\n raise ValueError(\"Argument `indexes` cannot be None\")\n\n indexes, preds, target = _check_retrieval_inputs(\n indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index\n )\n\n self.indexes.append(indexes)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists.\n\n After that, compute list of groups that will help in keeping together predictions about the same query. Finally,\n for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as\n specified by ``self.empty_target_action``.\n\n \"\"\"\n indexes = dim_zero_cat(self.indexes)\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n\n indexes, indices = torch.sort(indexes)\n preds = preds[indices]\n target = target[indices]\n\n split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()\n\n res = []\n for mini_preds, mini_target in zip(\n torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0)\n ):\n if not mini_target.sum():\n if self.empty_target_action == \"error\":\n raise ValueError(\"`compute` method was provided with a query with no positive target.\")\n if self.empty_target_action == \"pos\":\n res.append(tensor(1.0))\n elif self.empty_target_action == \"neg\":\n res.append(tensor(0.0))\n else:\n # ensure list contains only float tensors\n res.append(self._metric(mini_preds, mini_target))\n\n if res:\n return _retrieval_aggregate(torch.stack([x.to(preds) for x in res]), self.aggregation)\n return tensor(0.0).to(preds)\n\n @abstractmethod\n def _metric(self, preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute a metric over a predictions and target of a single group.\n\n This method should be overridden by subclasses.\n\n \"\"\"\n", "path": "src/torchmetrics/retrieval/base.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, List, Optional, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities.checks import _check_retrieval_inputs\nfrom torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat\n\n\ndef _retrieval_aggregate(\n values: Tensor,\n aggregation: Union[Literal[\"mean\", \"median\", \"min\", \"max\"], Callable] = \"mean\",\n dim: Optional[int] = None,\n) -> Tensor:\n \"\"\"Aggregate the final retrieval values into a single value.\"\"\"\n if aggregation == \"mean\":\n return values.mean() if dim is None else values.mean(dim=dim)\n if aggregation == \"median\":\n return values.median() if dim is None else values.median(dim=dim).values\n if aggregation == \"min\":\n return values.min() if dim is None else values.min(dim=dim).values\n if aggregation == \"max\":\n return values.max() if dim is None else values.max(dim=dim).values\n return aggregation(values, dim=dim)\n\n\nclass RetrievalMetric(Metric, ABC):\n \"\"\"Works with binary target data. Accepts float predictions from a model output.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``\n - ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``\n - ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a\n prediction belongs\n\n .. note:: ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten\n to single dimension once provided.\n\n .. note::\n Predictions will be first grouped by ``indexes`` and then the real metric, defined by overriding\n the `_metric` method, will be computed as the mean of the scores over each query.\n\n As output to ``forward`` and ``compute`` the metric returns the following output:\n\n - ``metric`` (:class:`~torch.Tensor`): A tensor as computed by ``_metric`` if the number of positive targets is\n at least 1, otherwise behave as specified by ``self.empty_target_action``.\n\n Args:\n empty_target_action:\n Specify what to do with queries that do not have at least a positive\n or negative (depend on metric) target. Choose from:\n\n - ``'neg'``: those queries count as ``0.0`` (default)\n - ``'pos'``: those queries count as ``1.0``\n - ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned\n - ``'error'``: raise a ``ValueError``\n\n ignore_index:\n Ignore predictions where the target is equal to this number.\n aggregation:\n Specify how to aggregate over indexes. Can either a custom callable function that takes in a single tensor\n and returns a scalar value or one of the following strings:\n\n - ``'mean'``: average value is returned\n - ``'median'``: median value is returned\n - ``'max'``: max value is returned\n - ``'min'``: min value is returned\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.\n ValueError:\n If ``ignore_index`` is not `None` or an integer.\n\n \"\"\"\n\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = False\n\n indexes: List[Tensor]\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n empty_target_action: str = \"neg\",\n ignore_index: Optional[int] = None,\n aggregation: Union[Literal[\"mean\", \"median\", \"min\", \"max\"], Callable] = \"mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.allow_non_binary_target = False\n\n empty_target_action_options = (\"error\", \"skip\", \"neg\", \"pos\")\n if empty_target_action not in empty_target_action_options:\n raise ValueError(f\"Argument `empty_target_action` received a wrong value `{empty_target_action}`.\")\n self.empty_target_action = empty_target_action\n\n if ignore_index is not None and not isinstance(ignore_index, int):\n raise ValueError(\"Argument `ignore_index` must be an integer or None.\")\n self.ignore_index = ignore_index\n\n if not (aggregation in (\"mean\", \"median\", \"min\", \"max\") or callable(aggregation)):\n raise ValueError(\n \"Argument `aggregation` must be one of `mean`, `median`, `min`, `max` or a custom callable function\"\n f\"which takes tensor of values, but got {aggregation}.\"\n )\n self.aggregation = aggregation\n\n self.add_state(\"indexes\", default=[], dist_reduce_fx=None)\n self.add_state(\"preds\", default=[], dist_reduce_fx=None)\n self.add_state(\"target\", default=[], dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:\n \"\"\"Check shape, check and convert dtypes, flatten and add to accumulators.\"\"\"\n if indexes is None:\n raise ValueError(\"Argument `indexes` cannot be None\")\n\n indexes, preds, target = _check_retrieval_inputs(\n indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index\n )\n\n self.indexes.append(indexes)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists.\n\n After that, compute list of groups that will help in keeping together predictions about the same query. Finally,\n for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as\n specified by ``self.empty_target_action``.\n\n \"\"\"\n indexes = dim_zero_cat(self.indexes)\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n\n indexes, indices = torch.sort(indexes)\n preds = preds[indices]\n target = target[indices]\n\n split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()\n\n res = []\n for mini_preds, mini_target in zip(\n torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0)\n ):\n if not mini_target.sum():\n if self.empty_target_action == \"error\":\n raise ValueError(\"`compute` method was provided with a query with no positive target.\")\n if self.empty_target_action == \"pos\":\n res.append(tensor(1.0))\n elif self.empty_target_action == \"neg\":\n res.append(tensor(0.0))\n else:\n # ensure list contains only float tensors\n res.append(self._metric(mini_preds, mini_target))\n\n if res:\n return _retrieval_aggregate(torch.stack([x.to(preds) for x in res]), self.aggregation)\n return tensor(0.0).to(preds)\n\n @abstractmethod\n def _metric(self, preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute a metric over a predictions and target of a single group.\n\n This method should be overridden by subclasses.\n\n \"\"\"\n", "path": "src/torchmetrics/retrieval/base.py"}]}
2,890
143
gh_patches_debug_13775
rasdani/github-patches
git_diff
docker__docker-py-2726
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Windows: remote docker context via SSH fails on Windows This bug-causing code was added in PR #2691 https://github.com/aiordache/docker-py/blob/f5531a94e1096f4c8456264f6511dfe89e1c825e/docker/transport/sshconn.py#L50 causes docker-compose fail on all commands when using a remote context using SSH on windows. SDK Version: 4.4.0 OS version: Windows 10 2004 Build 19041.630 Docker-compose version: 1.28.0-rc1 <details> <summary>docker version</summary> ``` Client: Version: 19.03.12 API version: 1.40 Go version: go1.13.12 Git commit: 0ed913b8- Built: 07/28/2020 16:36:03 OS/Arch: windows/amd64 Experimental: false Server: Docker Engine - Community Engine: Version: 19.03.14 API version: 1.40 (minimum version 1.12) Go version: go1.13.15 Git commit: 5eb3275d40 Built: Tue Dec 1 19:18:53 2020 OS/Arch: linux/amd64 Experimental: false containerd: Version: 1.3.9 GitCommit: ea765aba0d05254012b0b9e595e995c09186427f runc: Version: 1.0.0-rc10 GitCommit: dc9208a3303feef5b3839f4323d9beb36df0a9dd docker-init: Version: 0.18.0 GitCommit: fec3683 ``` </details> <details> <summary>docker-compose version</summary> ``` docker-compose version docker-compose version 1.28.0-rc1, build 1f469474 docker-py version: 4.4.0 CPython version: 3.9.0 OpenSSL version: OpenSSL 1.1.1g 21 Apr 2020 ``` </details> <details> <summary>Error thrown</summary> ``` PS C:\Users\....\Documents\Projects\....> $env:COMPOSE_PARAMIKO_SSH="0" && docker-compose ps Traceback (most recent call last): File "docker\api\client.py", line 214, in _retrieve_server_version File "docker\api\daemon.py", line 181, in version File "docker\utils\decorators.py", line 46, in inner File "docker\api\client.py", line 237, in _get File "requests\sessions.py", line 543, in get File "requests\sessions.py", line 530, in request File "requests\sessions.py", line 643, in send File "requests\adapters.py", line 439, in send File "urllib3\connectionpool.py", line 670, in urlopen File "urllib3\connectionpool.py", line 392, in _make_request File "http\client.py", line 1255, in request File "http\client.py", line 1301, in _send_request File "http\client.py", line 1250, in endheaders File "http\client.py", line 1010, in _send_output File "http\client.py", line 950, in send File "docker\transport\sshconn.py", line 105, in connect File "docker\transport\sshconn.py", line 45, in connect File "subprocess.py", line 778, in __init__ ValueError: preexec_fn is not supported on Windows platforms During handling of the above exception, another exception occurred: Traceback (most recent call last): File "docker-compose", line 3, in <module> File "compose\cli\main.py", line 68, in main File "compose\cli\main.py", line 124, in perform_command File "compose\cli\command.py", line 60, in project_from_options File "compose\cli\command.py", line 152, in get_project File "compose\cli\docker_client.py", line 41, in get_client File "compose\cli\docker_client.py", line 170, in docker_client File "docker\api\client.py", line 197, in __init__ File "docker\api\client.py", line 221, in _retrieve_server_version docker.errors.DockerException: Error while fetching server API version: preexec_fn is not supported on Windows platforms [580] Failed to execute script docker-compose ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/transport/sshconn.py` Content: ``` 1 import paramiko 2 import requests.adapters 3 import six 4 import logging 5 import os 6 import signal 7 import socket 8 import subprocess 9 10 from docker.transport.basehttpadapter import BaseHTTPAdapter 11 from .. import constants 12 13 if six.PY3: 14 import http.client as httplib 15 else: 16 import httplib 17 18 try: 19 import requests.packages.urllib3 as urllib3 20 except ImportError: 21 import urllib3 22 23 RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer 24 25 26 class SSHSocket(socket.socket): 27 def __init__(self, host): 28 super(SSHSocket, self).__init__( 29 socket.AF_INET, socket.SOCK_STREAM) 30 self.host = host 31 self.port = None 32 if ':' in host: 33 self.host, self.port = host.split(':') 34 self.proc = None 35 36 def connect(self, **kwargs): 37 port = '' if not self.port else '-p {}'.format(self.port) 38 args = [ 39 'ssh', 40 '-q', 41 self.host, 42 port, 43 'docker system dial-stdio' 44 ] 45 self.proc = subprocess.Popen( 46 ' '.join(args), 47 shell=True, 48 stdout=subprocess.PIPE, 49 stdin=subprocess.PIPE, 50 preexec_fn=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)) 51 52 def _write(self, data): 53 if not self.proc or self.proc.stdin.closed: 54 raise Exception('SSH subprocess not initiated.' 55 'connect() must be called first.') 56 written = self.proc.stdin.write(data) 57 self.proc.stdin.flush() 58 return written 59 60 def sendall(self, data): 61 self._write(data) 62 63 def send(self, data): 64 return self._write(data) 65 66 def recv(self, n): 67 if not self.proc: 68 raise Exception('SSH subprocess not initiated.' 69 'connect() must be called first.') 70 return self.proc.stdout.read(n) 71 72 def makefile(self, mode): 73 if not self.proc: 74 self.connect() 75 if six.PY3: 76 self.proc.stdout.channel = self 77 78 return self.proc.stdout 79 80 def close(self): 81 if not self.proc or self.proc.stdin.closed: 82 return 83 self.proc.stdin.write(b'\n\n') 84 self.proc.stdin.flush() 85 self.proc.terminate() 86 87 88 class SSHConnection(httplib.HTTPConnection, object): 89 def __init__(self, ssh_transport=None, timeout=60, host=None): 90 super(SSHConnection, self).__init__( 91 'localhost', timeout=timeout 92 ) 93 self.ssh_transport = ssh_transport 94 self.timeout = timeout 95 self.ssh_host = host 96 97 def connect(self): 98 if self.ssh_transport: 99 sock = self.ssh_transport.open_session() 100 sock.settimeout(self.timeout) 101 sock.exec_command('docker system dial-stdio') 102 else: 103 sock = SSHSocket(self.ssh_host) 104 sock.settimeout(self.timeout) 105 sock.connect() 106 107 self.sock = sock 108 109 110 class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): 111 scheme = 'ssh' 112 113 def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None): 114 super(SSHConnectionPool, self).__init__( 115 'localhost', timeout=timeout, maxsize=maxsize 116 ) 117 self.ssh_transport = None 118 self.timeout = timeout 119 if ssh_client: 120 self.ssh_transport = ssh_client.get_transport() 121 self.ssh_host = host 122 self.ssh_port = None 123 if ':' in host: 124 self.ssh_host, self.ssh_port = host.split(':') 125 126 def _new_conn(self): 127 return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host) 128 129 # When re-using connections, urllib3 calls fileno() on our 130 # SSH channel instance, quickly overloading our fd limit. To avoid this, 131 # we override _get_conn 132 def _get_conn(self, timeout): 133 conn = None 134 try: 135 conn = self.pool.get(block=self.block, timeout=timeout) 136 137 except AttributeError: # self.pool is None 138 raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") 139 140 except six.moves.queue.Empty: 141 if self.block: 142 raise urllib3.exceptions.EmptyPoolError( 143 self, 144 "Pool reached maximum size and no more " 145 "connections are allowed." 146 ) 147 pass # Oh well, we'll create a new connection then 148 149 return conn or self._new_conn() 150 151 152 class SSHHTTPAdapter(BaseHTTPAdapter): 153 154 __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ 155 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size' 156 ] 157 158 def __init__(self, base_url, timeout=60, 159 pool_connections=constants.DEFAULT_NUM_POOLS, 160 max_pool_size=constants.DEFAULT_MAX_POOL_SIZE, 161 shell_out=True): 162 self.ssh_client = None 163 if not shell_out: 164 self._create_paramiko_client(base_url) 165 self._connect() 166 167 self.ssh_host = base_url.lstrip('ssh://') 168 self.timeout = timeout 169 self.max_pool_size = max_pool_size 170 self.pools = RecentlyUsedContainer( 171 pool_connections, dispose_func=lambda p: p.close() 172 ) 173 super(SSHHTTPAdapter, self).__init__() 174 175 def _create_paramiko_client(self, base_url): 176 logging.getLogger("paramiko").setLevel(logging.WARNING) 177 self.ssh_client = paramiko.SSHClient() 178 base_url = six.moves.urllib_parse.urlparse(base_url) 179 self.ssh_params = { 180 "hostname": base_url.hostname, 181 "port": base_url.port, 182 "username": base_url.username 183 } 184 ssh_config_file = os.path.expanduser("~/.ssh/config") 185 if os.path.exists(ssh_config_file): 186 conf = paramiko.SSHConfig() 187 with open(ssh_config_file) as f: 188 conf.parse(f) 189 host_config = conf.lookup(base_url.hostname) 190 self.ssh_conf = host_config 191 if 'proxycommand' in host_config: 192 self.ssh_params["sock"] = paramiko.ProxyCommand( 193 self.ssh_conf['proxycommand'] 194 ) 195 if 'hostname' in host_config: 196 self.ssh_params['hostname'] = host_config['hostname'] 197 if base_url.port is None and 'port' in host_config: 198 self.ssh_params['port'] = self.ssh_conf['port'] 199 if base_url.username is None and 'user' in host_config: 200 self.ssh_params['username'] = self.ssh_conf['user'] 201 202 self.ssh_client.load_system_host_keys() 203 self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy()) 204 205 def _connect(self): 206 if self.ssh_client: 207 self.ssh_client.connect(**self.ssh_params) 208 209 def get_connection(self, url, proxies=None): 210 if not self.ssh_client: 211 return SSHConnectionPool( 212 ssh_client=self.ssh_client, 213 timeout=self.timeout, 214 maxsize=self.max_pool_size, 215 host=self.ssh_host 216 ) 217 with self.pools.lock: 218 pool = self.pools.get(url) 219 if pool: 220 return pool 221 222 # Connection is closed try a reconnect 223 if self.ssh_client and not self.ssh_client.get_transport(): 224 self._connect() 225 226 pool = SSHConnectionPool( 227 ssh_client=self.ssh_client, 228 timeout=self.timeout, 229 maxsize=self.max_pool_size, 230 host=self.ssh_host 231 ) 232 self.pools[url] = pool 233 234 return pool 235 236 def close(self): 237 super(SSHHTTPAdapter, self).close() 238 if self.ssh_client: 239 self.ssh_client.close() 240 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py --- a/docker/transport/sshconn.py +++ b/docker/transport/sshconn.py @@ -42,12 +42,17 @@ port, 'docker system dial-stdio' ] + + preexec_func = None + if not constants.IS_WINDOWS_PLATFORM: + preexec_func = lambda: signal.signal(signal.SIGINT, signal.SIG_IGN) + self.proc = subprocess.Popen( ' '.join(args), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, - preexec_fn=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)) + preexec_fn=preexec_func) def _write(self, data): if not self.proc or self.proc.stdin.closed:
{"golden_diff": "diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py\n--- a/docker/transport/sshconn.py\n+++ b/docker/transport/sshconn.py\n@@ -42,12 +42,17 @@\n port,\n 'docker system dial-stdio'\n ]\n+\n+ preexec_func = None\n+ if not constants.IS_WINDOWS_PLATFORM:\n+ preexec_func = lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)\n+\n self.proc = subprocess.Popen(\n ' '.join(args),\n shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n- preexec_fn=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))\n+ preexec_fn=preexec_func)\n \n def _write(self, data):\n if not self.proc or self.proc.stdin.closed:\n", "issue": "Windows: remote docker context via SSH fails on Windows\nThis bug-causing code was added in PR #2691 \r\n\r\nhttps://github.com/aiordache/docker-py/blob/f5531a94e1096f4c8456264f6511dfe89e1c825e/docker/transport/sshconn.py#L50\r\n\r\ncauses docker-compose fail on all commands when using a remote context using SSH on windows.\r\n\r\nSDK Version: 4.4.0\r\nOS version: Windows 10 2004 Build 19041.630\r\nDocker-compose version: 1.28.0-rc1\r\n\r\n<details>\r\n <summary>docker version</summary>\r\n\r\n```\r\nClient:\r\n Version: 19.03.12\r\n API version: 1.40\r\n Go version: go1.13.12\r\n Git commit: 0ed913b8-\r\n Built: 07/28/2020 16:36:03\r\n OS/Arch: windows/amd64\r\n Experimental: false\r\n\r\nServer: Docker Engine - Community\r\n Engine:\r\n Version: 19.03.14\r\n API version: 1.40 (minimum version 1.12)\r\n Go version: go1.13.15\r\n Git commit: 5eb3275d40\r\n Built: Tue Dec 1 19:18:53 2020\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n containerd:\r\n Version: 1.3.9\r\n GitCommit: ea765aba0d05254012b0b9e595e995c09186427f\r\n runc:\r\n Version: 1.0.0-rc10\r\n GitCommit: dc9208a3303feef5b3839f4323d9beb36df0a9dd\r\n docker-init:\r\n Version: 0.18.0\r\n GitCommit: fec3683\r\n```\r\n\r\n</details>\r\n\r\n\r\n<details>\r\n <summary>docker-compose version</summary>\r\n\r\n```\r\ndocker-compose version\r\ndocker-compose version 1.28.0-rc1, build 1f469474\r\ndocker-py version: 4.4.0\r\nCPython version: 3.9.0\r\nOpenSSL version: OpenSSL 1.1.1g 21 Apr 2020\r\n```\r\n\r\n</details>\r\n\r\n\r\n<details>\r\n <summary>Error thrown</summary>\r\n\r\n```\r\nPS C:\\Users\\....\\Documents\\Projects\\....> $env:COMPOSE_PARAMIKO_SSH=\"0\" && docker-compose ps\r\nTraceback (most recent call last):\r\n File \"docker\\api\\client.py\", line 214, in _retrieve_server_version\r\n File \"docker\\api\\daemon.py\", line 181, in version\r\n File \"docker\\utils\\decorators.py\", line 46, in inner\r\n File \"docker\\api\\client.py\", line 237, in _get\r\n File \"requests\\sessions.py\", line 543, in get\r\n File \"requests\\sessions.py\", line 530, in request\r\n File \"requests\\sessions.py\", line 643, in send\r\n File \"requests\\adapters.py\", line 439, in send\r\n File \"urllib3\\connectionpool.py\", line 670, in urlopen\r\n File \"urllib3\\connectionpool.py\", line 392, in _make_request\r\n File \"http\\client.py\", line 1255, in request\r\n File \"http\\client.py\", line 1301, in _send_request\r\n File \"http\\client.py\", line 1250, in endheaders\r\n File \"http\\client.py\", line 1010, in _send_output\r\n File \"http\\client.py\", line 950, in send\r\n File \"docker\\transport\\sshconn.py\", line 105, in connect\r\n File \"docker\\transport\\sshconn.py\", line 45, in connect\r\n File \"subprocess.py\", line 778, in __init__\r\nValueError: preexec_fn is not supported on Windows platforms\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"docker-compose\", line 3, in <module>\r\n File \"compose\\cli\\main.py\", line 68, in main\r\n File \"compose\\cli\\main.py\", line 124, in perform_command\r\n File \"compose\\cli\\command.py\", line 60, in project_from_options\r\n File \"compose\\cli\\command.py\", line 152, in get_project\r\n File \"compose\\cli\\docker_client.py\", line 41, in get_client\r\n File \"compose\\cli\\docker_client.py\", line 170, in docker_client\r\n File \"docker\\api\\client.py\", line 197, in __init__\r\n File \"docker\\api\\client.py\", line 221, in _retrieve_server_version\r\ndocker.errors.DockerException: Error while fetching server API version: preexec_fn is not supported on Windows platforms\r\n[580] Failed to execute script docker-compose\r\n```\r\n\r\n</details>\r\n\r\n\r\n\n", "before_files": [{"content": "import paramiko\nimport requests.adapters\nimport six\nimport logging\nimport os\nimport signal\nimport socket\nimport subprocess\n\nfrom docker.transport.basehttpadapter import BaseHTTPAdapter\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass SSHSocket(socket.socket):\n def __init__(self, host):\n super(SSHSocket, self).__init__(\n socket.AF_INET, socket.SOCK_STREAM)\n self.host = host\n self.port = None\n if ':' in host:\n self.host, self.port = host.split(':')\n self.proc = None\n\n def connect(self, **kwargs):\n port = '' if not self.port else '-p {}'.format(self.port)\n args = [\n 'ssh',\n '-q',\n self.host,\n port,\n 'docker system dial-stdio'\n ]\n self.proc = subprocess.Popen(\n ' '.join(args),\n shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n preexec_fn=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))\n\n def _write(self, data):\n if not self.proc or self.proc.stdin.closed:\n raise Exception('SSH subprocess not initiated.'\n 'connect() must be called first.')\n written = self.proc.stdin.write(data)\n self.proc.stdin.flush()\n return written\n\n def sendall(self, data):\n self._write(data)\n\n def send(self, data):\n return self._write(data)\n\n def recv(self, n):\n if not self.proc:\n raise Exception('SSH subprocess not initiated.'\n 'connect() must be called first.')\n return self.proc.stdout.read(n)\n\n def makefile(self, mode):\n if not self.proc:\n self.connect()\n if six.PY3:\n self.proc.stdout.channel = self\n\n return self.proc.stdout\n\n def close(self):\n if not self.proc or self.proc.stdin.closed:\n return\n self.proc.stdin.write(b'\\n\\n')\n self.proc.stdin.flush()\n self.proc.terminate()\n\n\nclass SSHConnection(httplib.HTTPConnection, object):\n def __init__(self, ssh_transport=None, timeout=60, host=None):\n super(SSHConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.ssh_transport = ssh_transport\n self.timeout = timeout\n self.ssh_host = host\n\n def connect(self):\n if self.ssh_transport:\n sock = self.ssh_transport.open_session()\n sock.settimeout(self.timeout)\n sock.exec_command('docker system dial-stdio')\n else:\n sock = SSHSocket(self.ssh_host)\n sock.settimeout(self.timeout)\n sock.connect()\n\n self.sock = sock\n\n\nclass SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n scheme = 'ssh'\n\n def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):\n super(SSHConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.ssh_transport = None\n self.timeout = timeout\n if ssh_client:\n self.ssh_transport = ssh_client.get_transport()\n self.ssh_host = host\n self.ssh_port = None\n if ':' in host:\n self.ssh_host, self.ssh_port = host.split(':')\n\n def _new_conn(self):\n return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)\n\n # When re-using connections, urllib3 calls fileno() on our\n # SSH channel instance, quickly overloading our fd limit. To avoid this,\n # we override _get_conn\n def _get_conn(self, timeout):\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise urllib3.exceptions.ClosedPoolError(self, \"Pool is closed.\")\n\n except six.moves.queue.Empty:\n if self.block:\n raise urllib3.exceptions.EmptyPoolError(\n self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\"\n )\n pass # Oh well, we'll create a new connection then\n\n return conn or self._new_conn()\n\n\nclass SSHHTTPAdapter(BaseHTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [\n 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'\n ]\n\n def __init__(self, base_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS,\n max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,\n shell_out=True):\n self.ssh_client = None\n if not shell_out:\n self._create_paramiko_client(base_url)\n self._connect()\n\n self.ssh_host = base_url.lstrip('ssh://')\n self.timeout = timeout\n self.max_pool_size = max_pool_size\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(SSHHTTPAdapter, self).__init__()\n\n def _create_paramiko_client(self, base_url):\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n self.ssh_client = paramiko.SSHClient()\n base_url = six.moves.urllib_parse.urlparse(base_url)\n self.ssh_params = {\n \"hostname\": base_url.hostname,\n \"port\": base_url.port,\n \"username\": base_url.username\n }\n ssh_config_file = os.path.expanduser(\"~/.ssh/config\")\n if os.path.exists(ssh_config_file):\n conf = paramiko.SSHConfig()\n with open(ssh_config_file) as f:\n conf.parse(f)\n host_config = conf.lookup(base_url.hostname)\n self.ssh_conf = host_config\n if 'proxycommand' in host_config:\n self.ssh_params[\"sock\"] = paramiko.ProxyCommand(\n self.ssh_conf['proxycommand']\n )\n if 'hostname' in host_config:\n self.ssh_params['hostname'] = host_config['hostname']\n if base_url.port is None and 'port' in host_config:\n self.ssh_params['port'] = self.ssh_conf['port']\n if base_url.username is None and 'user' in host_config:\n self.ssh_params['username'] = self.ssh_conf['user']\n\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n def _connect(self):\n if self.ssh_client:\n self.ssh_client.connect(**self.ssh_params)\n\n def get_connection(self, url, proxies=None):\n if not self.ssh_client:\n return SSHConnectionPool(\n ssh_client=self.ssh_client,\n timeout=self.timeout,\n maxsize=self.max_pool_size,\n host=self.ssh_host\n )\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n # Connection is closed try a reconnect\n if self.ssh_client and not self.ssh_client.get_transport():\n self._connect()\n\n pool = SSHConnectionPool(\n ssh_client=self.ssh_client,\n timeout=self.timeout,\n maxsize=self.max_pool_size,\n host=self.ssh_host\n )\n self.pools[url] = pool\n\n return pool\n\n def close(self):\n super(SSHHTTPAdapter, self).close()\n if self.ssh_client:\n self.ssh_client.close()\n", "path": "docker/transport/sshconn.py"}], "after_files": [{"content": "import paramiko\nimport requests.adapters\nimport six\nimport logging\nimport os\nimport signal\nimport socket\nimport subprocess\n\nfrom docker.transport.basehttpadapter import BaseHTTPAdapter\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass SSHSocket(socket.socket):\n def __init__(self, host):\n super(SSHSocket, self).__init__(\n socket.AF_INET, socket.SOCK_STREAM)\n self.host = host\n self.port = None\n if ':' in host:\n self.host, self.port = host.split(':')\n self.proc = None\n\n def connect(self, **kwargs):\n port = '' if not self.port else '-p {}'.format(self.port)\n args = [\n 'ssh',\n '-q',\n self.host,\n port,\n 'docker system dial-stdio'\n ]\n\n preexec_func = None\n if not constants.IS_WINDOWS_PLATFORM:\n preexec_func = lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n self.proc = subprocess.Popen(\n ' '.join(args),\n shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n preexec_fn=preexec_func)\n\n def _write(self, data):\n if not self.proc or self.proc.stdin.closed:\n raise Exception('SSH subprocess not initiated.'\n 'connect() must be called first.')\n written = self.proc.stdin.write(data)\n self.proc.stdin.flush()\n return written\n\n def sendall(self, data):\n self._write(data)\n\n def send(self, data):\n return self._write(data)\n\n def recv(self, n):\n if not self.proc:\n raise Exception('SSH subprocess not initiated.'\n 'connect() must be called first.')\n return self.proc.stdout.read(n)\n\n def makefile(self, mode):\n if not self.proc:\n self.connect()\n if six.PY3:\n self.proc.stdout.channel = self\n\n return self.proc.stdout\n\n def close(self):\n if not self.proc or self.proc.stdin.closed:\n return\n self.proc.stdin.write(b'\\n\\n')\n self.proc.stdin.flush()\n self.proc.terminate()\n\n\nclass SSHConnection(httplib.HTTPConnection, object):\n def __init__(self, ssh_transport=None, timeout=60, host=None):\n super(SSHConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.ssh_transport = ssh_transport\n self.timeout = timeout\n self.ssh_host = host\n\n def connect(self):\n if self.ssh_transport:\n sock = self.ssh_transport.open_session()\n sock.settimeout(self.timeout)\n sock.exec_command('docker system dial-stdio')\n else:\n sock = SSHSocket(self.ssh_host)\n sock.settimeout(self.timeout)\n sock.connect()\n\n self.sock = sock\n\n\nclass SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n scheme = 'ssh'\n\n def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):\n super(SSHConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.ssh_transport = None\n self.timeout = timeout\n if ssh_client:\n self.ssh_transport = ssh_client.get_transport()\n self.ssh_host = host\n self.ssh_port = None\n if ':' in host:\n self.ssh_host, self.ssh_port = host.split(':')\n\n def _new_conn(self):\n return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)\n\n # When re-using connections, urllib3 calls fileno() on our\n # SSH channel instance, quickly overloading our fd limit. To avoid this,\n # we override _get_conn\n def _get_conn(self, timeout):\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise urllib3.exceptions.ClosedPoolError(self, \"Pool is closed.\")\n\n except six.moves.queue.Empty:\n if self.block:\n raise urllib3.exceptions.EmptyPoolError(\n self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\"\n )\n pass # Oh well, we'll create a new connection then\n\n return conn or self._new_conn()\n\n\nclass SSHHTTPAdapter(BaseHTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [\n 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'\n ]\n\n def __init__(self, base_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS,\n max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,\n shell_out=True):\n self.ssh_client = None\n if not shell_out:\n self._create_paramiko_client(base_url)\n self._connect()\n\n self.ssh_host = base_url.lstrip('ssh://')\n self.timeout = timeout\n self.max_pool_size = max_pool_size\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(SSHHTTPAdapter, self).__init__()\n\n def _create_paramiko_client(self, base_url):\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n self.ssh_client = paramiko.SSHClient()\n base_url = six.moves.urllib_parse.urlparse(base_url)\n self.ssh_params = {\n \"hostname\": base_url.hostname,\n \"port\": base_url.port,\n \"username\": base_url.username\n }\n ssh_config_file = os.path.expanduser(\"~/.ssh/config\")\n if os.path.exists(ssh_config_file):\n conf = paramiko.SSHConfig()\n with open(ssh_config_file) as f:\n conf.parse(f)\n host_config = conf.lookup(base_url.hostname)\n self.ssh_conf = host_config\n if 'proxycommand' in host_config:\n self.ssh_params[\"sock\"] = paramiko.ProxyCommand(\n self.ssh_conf['proxycommand']\n )\n if 'hostname' in host_config:\n self.ssh_params['hostname'] = host_config['hostname']\n if base_url.port is None and 'port' in host_config:\n self.ssh_params['port'] = self.ssh_conf['port']\n if base_url.username is None and 'user' in host_config:\n self.ssh_params['username'] = self.ssh_conf['user']\n\n self.ssh_client.load_system_host_keys()\n self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n def _connect(self):\n if self.ssh_client:\n self.ssh_client.connect(**self.ssh_params)\n\n def get_connection(self, url, proxies=None):\n if not self.ssh_client:\n return SSHConnectionPool(\n ssh_client=self.ssh_client,\n timeout=self.timeout,\n maxsize=self.max_pool_size,\n host=self.ssh_host\n )\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n # Connection is closed try a reconnect\n if self.ssh_client and not self.ssh_client.get_transport():\n self._connect()\n\n pool = SSHConnectionPool(\n ssh_client=self.ssh_client,\n timeout=self.timeout,\n maxsize=self.max_pool_size,\n host=self.ssh_host\n )\n self.pools[url] = pool\n\n return pool\n\n def close(self):\n super(SSHHTTPAdapter, self).close()\n if self.ssh_client:\n self.ssh_client.close()\n", "path": "docker/transport/sshconn.py"}]}
3,792
180
gh_patches_debug_10357
rasdani/github-patches
git_diff
Parsl__parsl-2450
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- walltime app function parameter modifies task_record func_name **Describe the bug** When the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to "wrapper" instead of to the function's name. **To Reproduce** Launch the code below using parsl version 1.2.0: ``` import parsl print(parsl.__version__, flush = True) from parsl.app.app import python_app, bash_app from parsl.config import Config from parsl.executors import HighThroughputExecutor @python_app def test(stdout='std.out', stderr = 'std.err', walltime = 5): from time import sleep sleep(1) Fail = 1/0 return 'Hello' def retry_handler(exception, task_record): print(task_record['func_name'], flush = True) return 1 if __name__ == '__main__': config = Config( executors = [HighThroughputExecutor()], retries = 2, retry_handler = retry_handler ) print('Loading Parsl Config', flush = True) parsl.load(config) fut = test() print(fut.result()) ``` It will print "wrapper" if the walltime parameter is present and test otherwise. **Expected behavior** The code should print the function's name (test). **Environment** - OS: Linux - Python version: 3.10.4 - Parsl version: 1.2.0 **Distributed Environment** - Where are you running the Parsl script from ? Local - Where do you need the workers to run ? Local --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/app/python.py` Content: ``` 1 import logging 2 3 import tblib.pickling_support 4 tblib.pickling_support.install() 5 6 from parsl.app.app import AppBase 7 from parsl.app.errors import wrap_error 8 from parsl.dataflow.dflow import DataFlowKernelLoader 9 10 11 logger = logging.getLogger(__name__) 12 13 14 def timeout(f, seconds): 15 def wrapper(*args, **kwargs): 16 import threading 17 import ctypes 18 import parsl.app.errors 19 20 def inject_exception(thread): 21 ctypes.pythonapi.PyThreadState_SetAsyncExc( 22 ctypes.c_long(thread), 23 ctypes.py_object(parsl.app.errors.AppTimeout) 24 ) 25 26 thread = threading.current_thread().ident 27 timer = threading.Timer(seconds, inject_exception, args=[thread]) 28 timer.start() 29 result = f(*args, **kwargs) 30 timer.cancel() 31 return result 32 return wrapper 33 34 35 class PythonApp(AppBase): 36 """Extends AppBase to cover the Python App.""" 37 38 def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False): 39 super().__init__( 40 wrap_error(func), 41 data_flow_kernel=data_flow_kernel, 42 executors=executors, 43 cache=cache, 44 ignore_for_cache=ignore_for_cache 45 ) 46 self.join = join 47 48 def __call__(self, *args, **kwargs): 49 """This is where the call to a python app is handled. 50 51 Args: 52 - Arbitrary 53 Kwargs: 54 - Arbitrary 55 56 Returns: 57 App_fut 58 59 """ 60 invocation_kwargs = {} 61 invocation_kwargs.update(self.kwargs) 62 invocation_kwargs.update(kwargs) 63 64 if self.data_flow_kernel is None: 65 dfk = DataFlowKernelLoader.dfk() 66 else: 67 dfk = self.data_flow_kernel 68 69 walltime = invocation_kwargs.get('walltime') 70 if walltime is not None: 71 func = timeout(self.func, walltime) 72 else: 73 func = self.func 74 75 app_fut = dfk.submit(func, app_args=args, 76 executors=self.executors, 77 cache=self.cache, 78 ignore_for_cache=self.ignore_for_cache, 79 app_kwargs=invocation_kwargs, 80 join=self.join) 81 82 return app_fut 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/app/python.py b/parsl/app/python.py --- a/parsl/app/python.py +++ b/parsl/app/python.py @@ -3,6 +3,8 @@ import tblib.pickling_support tblib.pickling_support.install() +from functools import wraps + from parsl.app.app import AppBase from parsl.app.errors import wrap_error from parsl.dataflow.dflow import DataFlowKernelLoader @@ -12,6 +14,7 @@ def timeout(f, seconds): + @wraps(f) def wrapper(*args, **kwargs): import threading import ctypes
{"golden_diff": "diff --git a/parsl/app/python.py b/parsl/app/python.py\n--- a/parsl/app/python.py\n+++ b/parsl/app/python.py\n@@ -3,6 +3,8 @@\n import tblib.pickling_support\n tblib.pickling_support.install()\n \n+from functools import wraps\n+\n from parsl.app.app import AppBase\n from parsl.app.errors import wrap_error\n from parsl.dataflow.dflow import DataFlowKernelLoader\n@@ -12,6 +14,7 @@\n \n \n def timeout(f, seconds):\n+ @wraps(f)\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n", "issue": "walltime app function parameter modifies task_record func_name\n**Describe the bug**\r\nWhen the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to \"wrapper\" instead of to the function's name. \r\n\r\n**To Reproduce**\r\nLaunch the code below using parsl version 1.2.0:\r\n```\r\nimport parsl\r\nprint(parsl.__version__, flush = True)\r\nfrom parsl.app.app import python_app, bash_app\r\nfrom parsl.config import Config\r\nfrom parsl.executors import HighThroughputExecutor\r\n\r\n\r\n@python_app\r\ndef test(stdout='std.out', stderr = 'std.err', walltime = 5):\r\n from time import sleep\r\n sleep(1)\r\n Fail = 1/0\r\n return 'Hello'\r\n\r\ndef retry_handler(exception, task_record):\r\n print(task_record['func_name'], flush = True)\r\n return 1\r\n\r\nif __name__ == '__main__':\r\n\r\n config = Config(\r\n executors = [HighThroughputExecutor()],\r\n retries = 2,\r\n retry_handler = retry_handler\r\n )\r\n print('Loading Parsl Config', flush = True)\r\n parsl.load(config)\r\n\r\n fut = test()\r\n\r\n print(fut.result())\r\n\r\n```\r\n\r\nIt will print \"wrapper\" if the walltime parameter is present and test otherwise. \r\n\r\n**Expected behavior**\r\nThe code should print the function's name (test).\r\n\r\n**Environment**\r\n- OS: Linux\r\n- Python version: 3.10.4\r\n- Parsl version: 1.2.0\r\n\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? Local\r\n- Where do you need the workers to run ? Local\r\n\n", "before_files": [{"content": "import logging\n\nimport tblib.pickling_support\ntblib.pickling_support.install()\n\nfrom parsl.app.app import AppBase\nfrom parsl.app.errors import wrap_error\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef timeout(f, seconds):\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n import parsl.app.errors\n\n def inject_exception(thread):\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread),\n ctypes.py_object(parsl.app.errors.AppTimeout)\n )\n\n thread = threading.current_thread().ident\n timer = threading.Timer(seconds, inject_exception, args=[thread])\n timer.start()\n result = f(*args, **kwargs)\n timer.cancel()\n return result\n return wrapper\n\n\nclass PythonApp(AppBase):\n \"\"\"Extends AppBase to cover the Python App.\"\"\"\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):\n super().__init__(\n wrap_error(func),\n data_flow_kernel=data_flow_kernel,\n executors=executors,\n cache=cache,\n ignore_for_cache=ignore_for_cache\n )\n self.join = join\n\n def __call__(self, *args, **kwargs):\n \"\"\"This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n walltime = invocation_kwargs.get('walltime')\n if walltime is not None:\n func = timeout(self.func, walltime)\n else:\n func = self.func\n\n app_fut = dfk.submit(func, app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs,\n join=self.join)\n\n return app_fut\n", "path": "parsl/app/python.py"}], "after_files": [{"content": "import logging\n\nimport tblib.pickling_support\ntblib.pickling_support.install()\n\nfrom functools import wraps\n\nfrom parsl.app.app import AppBase\nfrom parsl.app.errors import wrap_error\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef timeout(f, seconds):\n @wraps(f)\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n import parsl.app.errors\n\n def inject_exception(thread):\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread),\n ctypes.py_object(parsl.app.errors.AppTimeout)\n )\n\n thread = threading.current_thread().ident\n timer = threading.Timer(seconds, inject_exception, args=[thread])\n timer.start()\n result = f(*args, **kwargs)\n timer.cancel()\n return result\n return wrapper\n\n\nclass PythonApp(AppBase):\n \"\"\"Extends AppBase to cover the Python App.\"\"\"\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):\n super().__init__(\n wrap_error(func),\n data_flow_kernel=data_flow_kernel,\n executors=executors,\n cache=cache,\n ignore_for_cache=ignore_for_cache\n )\n self.join = join\n\n def __call__(self, *args, **kwargs):\n \"\"\"This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n walltime = invocation_kwargs.get('walltime')\n if walltime is not None:\n func = timeout(self.func, walltime)\n else:\n func = self.func\n\n app_fut = dfk.submit(func, app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs,\n join=self.join)\n\n return app_fut\n", "path": "parsl/app/python.py"}]}
1,257
142
gh_patches_debug_24727
rasdani/github-patches
git_diff
sublimelsp__LSP-1852
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to save modified file when dragged to a new window Description Editing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623) Steps to reproduce Start Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed) open multiple files in tabs drag one of the tabs out to a separate window modify the tab that's been dragged out, then try to save Expected behavior Expect the separate window/modified file to save. Actual behavior ![125587999-89de6ff6-454e-4746-ba01-375f066a4c3b](https://user-images.githubusercontent.com/34407152/125639978-7c250834-f248-4ea8-8eda-3122dd66f7d8.png) The separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol) Environment Sublime Build: 4112 Operating system and version: macOS 11.4, --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/save_command.py` Content: ``` 1 from .core.registry import LspTextCommand 2 from .core.settings import userprefs 3 from .core.typing import Callable, List, Type 4 from abc import ABCMeta, abstractmethod 5 import sublime 6 import sublime_plugin 7 8 9 class SaveTask(metaclass=ABCMeta): 10 """ 11 Base class for tasks that run on save. 12 13 Note: The whole task runs on the async thread. 14 """ 15 16 @classmethod 17 @abstractmethod 18 def is_applicable(cls, view: sublime.View) -> bool: 19 pass 20 21 def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]): 22 self._task_runner = task_runner 23 self._on_done = on_done 24 self._completed = False 25 self._cancelled = False 26 self._status_key = 'lsp_save_task_timeout' 27 28 def run_async(self) -> None: 29 self._erase_view_status() 30 sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms) 31 32 def _on_timeout(self) -> None: 33 if not self._completed and not self._cancelled: 34 self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__)) 35 self._cancelled = True 36 self._on_done() 37 38 def cancel(self) -> None: 39 self._cancelled = True 40 41 def _set_view_status(self, text: str) -> None: 42 self._task_runner.view.set_status(self._status_key, text) 43 sublime.set_timeout_async(self._erase_view_status, 5000) 44 45 def _erase_view_status(self) -> None: 46 self._task_runner.view.erase_status(self._status_key) 47 48 def _on_complete(self) -> None: 49 assert not self._completed 50 self._completed = True 51 if not self._cancelled: 52 self._on_done() 53 54 def _purge_changes_async(self) -> None: 55 # Supermassive hack that will go away later. 56 listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), []) 57 for listener in listeners: 58 if listener.__class__.__name__ == 'DocumentSyncListener': 59 listener.purge_changes_async() # type: ignore 60 break 61 62 63 class LspSaveCommand(LspTextCommand): 64 """ 65 A command used as a substitute for native save command. Runs code actions and document 66 formatting before triggering the native save command. 67 """ 68 _tasks = [] # type: List[Type[SaveTask]] 69 70 @classmethod 71 def register_task(cls, task: Type[SaveTask]) -> None: 72 assert task not in cls._tasks 73 cls._tasks.append(task) 74 75 def __init__(self, view: sublime.View) -> None: 76 super().__init__(view) 77 self._pending_tasks = [] # type: List[SaveTask] 78 79 def run(self, edit: sublime.Edit) -> None: 80 if self._pending_tasks: 81 for task in self._pending_tasks: 82 task.cancel() 83 self._pending_tasks = [] 84 sublime.set_timeout_async(self._trigger_on_pre_save_async) 85 for Task in self._tasks: 86 if Task.is_applicable(self.view): 87 self._pending_tasks.append(Task(self, self._on_task_completed_async)) 88 if self._pending_tasks: 89 sublime.set_timeout_async(self._run_next_task_async) 90 else: 91 self._trigger_native_save() 92 93 def _trigger_on_pre_save_async(self) -> None: 94 # Supermassive hack that will go away later. 95 listeners = sublime_plugin.view_event_listeners.get(self.view.id(), []) 96 for listener in listeners: 97 if listener.__class__.__name__ == 'DocumentSyncListener': 98 listener.trigger_on_pre_save_async() # type: ignore 99 break 100 101 def _run_next_task_async(self) -> None: 102 current_task = self._pending_tasks[0] 103 current_task.run_async() 104 105 def _on_task_completed_async(self) -> None: 106 self._pending_tasks.pop(0) 107 if self._pending_tasks: 108 self._run_next_task_async() 109 else: 110 self._trigger_native_save() 111 112 def _trigger_native_save(self) -> None: 113 # Triggered from set_timeout to preserve original semantics of on_pre_save handling 114 sublime.set_timeout(lambda: self.view.run_command('save', {"async": True})) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/save_command.py b/plugin/save_command.py --- a/plugin/save_command.py +++ b/plugin/save_command.py @@ -1,6 +1,6 @@ from .core.registry import LspTextCommand from .core.settings import userprefs -from .core.typing import Callable, List, Type +from .core.typing import Callable, List, Optional, Type from abc import ABCMeta, abstractmethod import sublime import sublime_plugin @@ -90,6 +90,15 @@ else: self._trigger_native_save() + def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool: + # Workaround to ensure that the command will run, even if a view was dragged out to a new window, + # see https://github.com/sublimelsp/LSP/issues/1791. + # The check to determine whether the keybinding for lsp_save is applicable already happens in + # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the + # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for + # dragged out views and that LSP keeps working as expected. + return True + def _trigger_on_pre_save_async(self) -> None: # Supermassive hack that will go away later. listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
{"golden_diff": "diff --git a/plugin/save_command.py b/plugin/save_command.py\n--- a/plugin/save_command.py\n+++ b/plugin/save_command.py\n@@ -1,6 +1,6 @@\n from .core.registry import LspTextCommand\n from .core.settings import userprefs\n-from .core.typing import Callable, List, Type\n+from .core.typing import Callable, List, Optional, Type\n from abc import ABCMeta, abstractmethod\n import sublime\n import sublime_plugin\n@@ -90,6 +90,15 @@\n else:\n self._trigger_native_save()\n \n+ def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n+ # Workaround to ensure that the command will run, even if a view was dragged out to a new window,\n+ # see https://github.com/sublimelsp/LSP/issues/1791.\n+ # The check to determine whether the keybinding for lsp_save is applicable already happens in\n+ # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the\n+ # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for\n+ # dragged out views and that LSP keeps working as expected.\n+ return True\n+\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n", "issue": "Unable to save modified file when dragged to a new window\nDescription\r\nEditing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623)\r\n\r\nSteps to reproduce\r\nStart Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed)\r\nopen multiple files in tabs\r\ndrag one of the tabs out to a separate window\r\nmodify the tab that's been dragged out, then try to save\r\nExpected behavior\r\nExpect the separate window/modified file to save.\r\n\r\nActual behavior\r\n![125587999-89de6ff6-454e-4746-ba01-375f066a4c3b](https://user-images.githubusercontent.com/34407152/125639978-7c250834-f248-4ea8-8eda-3122dd66f7d8.png)\r\n\r\n\r\nThe separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol)\r\n\r\nEnvironment\r\nSublime Build: 4112\r\nOperating system and version: macOS 11.4,\n", "before_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = 'lsp_save_task_timeout'\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n self._run_next_task_async()\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n", "path": "plugin/save_command.py"}], "after_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Optional, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = 'lsp_save_task_timeout'\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n # Workaround to ensure that the command will run, even if a view was dragged out to a new window,\n # see https://github.com/sublimelsp/LSP/issues/1791.\n # The check to determine whether the keybinding for lsp_save is applicable already happens in\n # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the\n # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for\n # dragged out views and that LSP keeps working as expected.\n return True\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n self._run_next_task_async()\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n", "path": "plugin/save_command.py"}]}
1,729
327
gh_patches_debug_10398
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4526
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/pipeline/schedule/_utils.py` Content: ``` 1 from typing import Any, List, Optional 2 3 import torch 4 import torch.cuda 5 from torch.nn import Module 6 from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten 7 8 9 def to_device(x: Any, device: Optional[torch.device] = None) -> Any: 10 """Move object to device if it is a tensor. 11 12 Args: 13 x (Any): Object to be moved. 14 device (Optional[torch.device], optional): Target device. Defaults to None. 15 16 Returns: 17 Any: Moved object. 18 """ 19 if isinstance(x, torch.Tensor): 20 return x.to(device) 21 return x 22 23 24 def get_batch_size(batch: Any) -> int: 25 """Get the batch size (size of dimension-0) of the first tensor in the batch. 26 27 Args: 28 batch (Any): Batch to be inspected. 29 30 Raises: 31 RuntimeError: If no tensor is found in the batch. 32 33 Returns: 34 int: Batch size. 35 """ 36 data_list, _ = tree_flatten(batch) 37 for data in data_list: 38 if isinstance(data, torch.Tensor): 39 return data.size(0) 40 raise RuntimeError('No tensor found in the batch') 41 42 43 def get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any: 44 """Get a micro batch of the original batch. 45 46 Args: 47 batch (Any): Batch to be sliced. 48 start (int): Start index of the micro batch. 49 micro_batch_size (int): Size of the micro batch. 50 51 Returns: 52 Any: Target micro batch. 53 """ 54 55 def _get_tensor_slice(x: Any): 56 if isinstance(x, torch.Tensor): 57 return x[start:start + micro_batch_size] 58 return x 59 60 return tree_map(_get_tensor_slice, batch) 61 62 63 def model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any: 64 """Call model forward function with data and internal inputs. 65 66 Args: 67 model (Module): Model to be called. 68 data (Any): Data loaded from data iterator. 69 internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage. 70 71 Returns: 72 Any: Outputs of the model. 73 """ 74 if internal_inputs is None: 75 internal_inputs = {} 76 if isinstance(data, (list, tuple)): 77 return model(*data, **internal_inputs) 78 elif isinstance(data, dict): 79 return model(**data, **internal_inputs) 80 return model(data, **internal_inputs) 81 82 83 def retain_grad(x: Any) -> None: 84 """Call retain_grad() on a tensor. 85 86 Args: 87 x (Any): Object to be called. 88 """ 89 if isinstance(x, torch.Tensor) and x.requires_grad: 90 x.retain_grad() 91 92 93 def detach(x: Any) -> Any: 94 """Call detach() on a tensor. 95 96 Args: 97 x (Any): Object to be called. 98 99 Returns: 100 Any: The detached object. 101 """ 102 if isinstance(x, torch.Tensor): 103 return x.detach() 104 return x 105 106 107 def merge_batch(data: List[Any]) -> Any: 108 """Merge micro batches into a batch. 109 110 Args: 111 data (List[Any]): A list of micro batches. 112 113 Returns: 114 Any: Merge batch. 115 """ 116 if len(data) == 0: 117 return 118 flattened_data = [] 119 tree_spec = None 120 for d in data: 121 elems, tree_spec = tree_flatten(d) 122 flattened_data.append(elems) 123 merged_data = [] 124 for elem_batch in zip(*flattened_data): 125 if isinstance(elem_batch[0], torch.Tensor): 126 merged_data.append(torch.cat(elem_batch, dim=0)) 127 else: 128 merged_data.append(list(elem_batch)) 129 return tree_unflatten(merged_data, tree_spec) 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py --- a/colossalai/pipeline/schedule/_utils.py +++ b/colossalai/pipeline/schedule/_utils.py @@ -123,7 +123,10 @@ merged_data = [] for elem_batch in zip(*flattened_data): if isinstance(elem_batch[0], torch.Tensor): - merged_data.append(torch.cat(elem_batch, dim=0)) + if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs + merged_data.append(None) + else: + merged_data.append(torch.cat(elem_batch, dim=0)) else: merged_data.append(list(elem_batch)) return tree_unflatten(merged_data, tree_spec)
{"golden_diff": "diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py\n--- a/colossalai/pipeline/schedule/_utils.py\n+++ b/colossalai/pipeline/schedule/_utils.py\n@@ -123,7 +123,10 @@\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n- merged_data.append(torch.cat(elem_batch, dim=0))\n+ if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs\n+ merged_data.append(None)\n+ else:\n+ merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nimport torch.cuda\nfrom torch.nn import Module\nfrom torch.utils._pytree import tree_flatten, tree_map, tree_unflatten\n\n\ndef to_device(x: Any, device: Optional[torch.device] = None) -> Any:\n \"\"\"Move object to device if it is a tensor.\n\n Args:\n x (Any): Object to be moved.\n device (Optional[torch.device], optional): Target device. Defaults to None.\n\n Returns:\n Any: Moved object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.to(device)\n return x\n\n\ndef get_batch_size(batch: Any) -> int:\n \"\"\"Get the batch size (size of dimension-0) of the first tensor in the batch.\n\n Args:\n batch (Any): Batch to be inspected.\n\n Raises:\n RuntimeError: If no tensor is found in the batch.\n\n Returns:\n int: Batch size.\n \"\"\"\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')\n\n\ndef get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:\n \"\"\"Get a micro batch of the original batch.\n\n Args:\n batch (Any): Batch to be sliced.\n start (int): Start index of the micro batch.\n micro_batch_size (int): Size of the micro batch.\n\n Returns:\n Any: Target micro batch.\n \"\"\"\n\n def _get_tensor_slice(x: Any):\n if isinstance(x, torch.Tensor):\n return x[start:start + micro_batch_size]\n return x\n\n return tree_map(_get_tensor_slice, batch)\n\n\ndef model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:\n \"\"\"Call model forward function with data and internal inputs.\n\n Args:\n model (Module): Model to be called.\n data (Any): Data loaded from data iterator.\n internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.\n\n Returns:\n Any: Outputs of the model.\n \"\"\"\n if internal_inputs is None:\n internal_inputs = {}\n if isinstance(data, (list, tuple)):\n return model(*data, **internal_inputs)\n elif isinstance(data, dict):\n return model(**data, **internal_inputs)\n return model(data, **internal_inputs)\n\n\ndef retain_grad(x: Any) -> None:\n \"\"\"Call retain_grad() on a tensor.\n\n Args:\n x (Any): Object to be called.\n \"\"\"\n if isinstance(x, torch.Tensor) and x.requires_grad:\n x.retain_grad()\n\n\ndef detach(x: Any) -> Any:\n \"\"\"Call detach() on a tensor.\n\n Args:\n x (Any): Object to be called.\n\n Returns:\n Any: The detached object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.detach()\n return x\n\n\ndef merge_batch(data: List[Any]) -> Any:\n \"\"\"Merge micro batches into a batch.\n\n Args:\n data (List[Any]): A list of micro batches.\n\n Returns:\n Any: Merge batch.\n \"\"\"\n if len(data) == 0:\n return\n flattened_data = []\n tree_spec = None\n for d in data:\n elems, tree_spec = tree_flatten(d)\n flattened_data.append(elems)\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "path": "colossalai/pipeline/schedule/_utils.py"}], "after_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nimport torch.cuda\nfrom torch.nn import Module\nfrom torch.utils._pytree import tree_flatten, tree_map, tree_unflatten\n\n\ndef to_device(x: Any, device: Optional[torch.device] = None) -> Any:\n \"\"\"Move object to device if it is a tensor.\n\n Args:\n x (Any): Object to be moved.\n device (Optional[torch.device], optional): Target device. Defaults to None.\n\n Returns:\n Any: Moved object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.to(device)\n return x\n\n\ndef get_batch_size(batch: Any) -> int:\n \"\"\"Get the batch size (size of dimension-0) of the first tensor in the batch.\n\n Args:\n batch (Any): Batch to be inspected.\n\n Raises:\n RuntimeError: If no tensor is found in the batch.\n\n Returns:\n int: Batch size.\n \"\"\"\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')\n\n\ndef get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:\n \"\"\"Get a micro batch of the original batch.\n\n Args:\n batch (Any): Batch to be sliced.\n start (int): Start index of the micro batch.\n micro_batch_size (int): Size of the micro batch.\n\n Returns:\n Any: Target micro batch.\n \"\"\"\n\n def _get_tensor_slice(x: Any):\n if isinstance(x, torch.Tensor):\n return x[start:start + micro_batch_size]\n return x\n\n return tree_map(_get_tensor_slice, batch)\n\n\ndef model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:\n \"\"\"Call model forward function with data and internal inputs.\n\n Args:\n model (Module): Model to be called.\n data (Any): Data loaded from data iterator.\n internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.\n\n Returns:\n Any: Outputs of the model.\n \"\"\"\n if internal_inputs is None:\n internal_inputs = {}\n if isinstance(data, (list, tuple)):\n return model(*data, **internal_inputs)\n elif isinstance(data, dict):\n return model(**data, **internal_inputs)\n return model(data, **internal_inputs)\n\n\ndef retain_grad(x: Any) -> None:\n \"\"\"Call retain_grad() on a tensor.\n\n Args:\n x (Any): Object to be called.\n \"\"\"\n if isinstance(x, torch.Tensor) and x.requires_grad:\n x.retain_grad()\n\n\ndef detach(x: Any) -> Any:\n \"\"\"Call detach() on a tensor.\n\n Args:\n x (Any): Object to be called.\n\n Returns:\n Any: The detached object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.detach()\n return x\n\n\ndef merge_batch(data: List[Any]) -> Any:\n \"\"\"Merge micro batches into a batch.\n\n Args:\n data (List[Any]): A list of micro batches.\n\n Returns:\n Any: Merge batch.\n \"\"\"\n if len(data) == 0:\n return\n flattened_data = []\n tree_spec = None\n for d in data:\n elems, tree_spec = tree_flatten(d)\n flattened_data.append(elems)\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs\n merged_data.append(None)\n else:\n merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "path": "colossalai/pipeline/schedule/_utils.py"}]}
1,398
186
gh_patches_debug_27404
rasdani/github-patches
git_diff
onnx__onnx-tensorflow-762
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float' Hi, I have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx > > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 56, in prepare > return cls.onnx_model_to_tensorflow_rep(model, strict) > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 76, in onnx_model_to_tensorflow_rep > return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict) > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 133, in _onnx_graph_to_tensorflow_rep > onnx_node, tensor_dict, handlers, opset=opset, strict=strict) > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 228, in _onnx_node_to_tensorflow_op > return handler.handle(node, tensor_dict=tensor_dict, strict=strict) > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\handler.py", line 59, in handle > return ver_handle(node, **kwargs) > File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\backend\upsample.py", line 33, in version_7 > new_height = np.floor(x_shape[2] * scales[2]) > TypeError: unsupported operand type(s) for *: 'NoneType' and 'float' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `onnx_tf/handlers/backend/upsample.py` Content: ``` 1 import copy 2 3 import numpy as np 4 import tensorflow as tf 5 6 from onnx_tf.common import exception 7 from onnx_tf.handlers.backend_handler import BackendHandler 8 from onnx_tf.handlers.handler import onnx_op 9 from onnx_tf.handlers.handler import partial_support 10 from onnx_tf.handlers.handler import ps_description 11 from onnx_tf.handlers.handler import tf_func 12 from onnx_tf.common.tf_helper import tf_shape 13 14 15 @onnx_op("Upsample") 16 @tf_func(tf.image.resize) 17 @partial_support(True) 18 @ps_description("Upsample required 4D input in Tensorflow.") 19 class Upsample(BackendHandler): 20 21 @classmethod 22 def args_check(cls, node, **kwargs): 23 x = kwargs["tensor_dict"][node.inputs[0]] 24 x_shape = x.get_shape().as_list() 25 if len(x_shape) != 4: 26 exception.OP_UNSUPPORTED_EXCEPT("Upsample without 4D input", "Tensorflow") 27 28 if node.attrs.get( 29 "mode", "nearest").lower() not in ["nearest", "bilinear", "linear"]: 30 exception.OP_UNSUPPORTED_EXCEPT("Upsample without nearest or bilinear", 31 "Tensorflow") 32 33 @classmethod 34 def version_7(cls, node, **kwargs): 35 x = kwargs["tensor_dict"][node.inputs[0]] 36 x_shape = x.get_shape().as_list() 37 attrs = copy.deepcopy(node.attrs) 38 scales = attrs["scales"] 39 new_height = np.floor(x_shape[2] * scales[2]) 40 new_weight = np.floor(x_shape[3] * scales[3]) 41 42 mode = attrs.get("mode", "nearest") 43 if mode.lower() == "bilinear" or mode.lower() == "linear": 44 mode = tf.image.ResizeMethod.BILINEAR 45 else: 46 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR 47 48 attrs["size"] = np.array((new_height, new_weight), dtype=np.int32) 49 attrs["method"] = mode 50 51 return [ 52 cls.make_tensor_from_onnx_node( 53 node, attrs=attrs, c_last_only=True, **kwargs) 54 ] 55 56 @classmethod 57 def version_9(cls, node, **kwargs): 58 x = kwargs["tensor_dict"][node.inputs[0]] 59 x_shape = tf_shape(x) 60 attrs = copy.deepcopy(node.attrs) 61 scales = kwargs["tensor_dict"][node.inputs[1]] 62 63 assert_n_c_scale_is_one = tf.Assert( 64 tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)), 65 [scales]) 66 67 with tf.control_dependencies([assert_n_c_scale_is_one]): 68 h_w_scale = scales[2:] 69 h_w_shape = x_shape[2:] 70 new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype), 71 tf.int32) 72 73 mode = attrs.get("mode", "nearest") 74 if mode.lower() == "bilinear" or mode.lower() == "linear": 75 mode = tf.image.ResizeMethod.BILINEAR 76 else: 77 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR 78 79 attrs["size"] = new_h_w_shape 80 attrs["method"] = mode 81 82 # Remove scale. 83 upsample_node = copy.deepcopy(node) 84 del upsample_node.inputs[1] 85 return [ 86 cls.make_tensor_from_onnx_node( 87 upsample_node, attrs=attrs, c_last_only=True, **kwargs) 88 ] 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py --- a/onnx_tf/handlers/backend/upsample.py +++ b/onnx_tf/handlers/backend/upsample.py @@ -1,6 +1,5 @@ import copy -import numpy as np import tensorflow as tf from onnx_tf.common import exception @@ -33,20 +32,28 @@ @classmethod def version_7(cls, node, **kwargs): x = kwargs["tensor_dict"][node.inputs[0]] - x_shape = x.get_shape().as_list() + x_shape = tf_shape(x) attrs = copy.deepcopy(node.attrs) scales = attrs["scales"] - new_height = np.floor(x_shape[2] * scales[2]) - new_weight = np.floor(x_shape[3] * scales[3]) - mode = attrs.get("mode", "nearest") - if mode.lower() == "bilinear" or mode.lower() == "linear": - mode = tf.image.ResizeMethod.BILINEAR - else: - mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR + assert_n_c_scale_is_one = tf.Assert( + tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)), + [scales]) + + with tf.control_dependencies([assert_n_c_scale_is_one]): + h_w_scale = scales[2:] + h_w_shape = x_shape[2:] + new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])), + tf.int32) + + mode = attrs.get("mode", "nearest") + if mode.lower() == "bilinear" or mode.lower() == "linear": + mode = tf.image.ResizeMethod.BILINEAR + else: + mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR - attrs["size"] = np.array((new_height, new_weight), dtype=np.int32) - attrs["method"] = mode + attrs["size"] = new_h_w_shape + attrs["method"] = mode return [ cls.make_tensor_from_onnx_node(
{"golden_diff": "diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py\n--- a/onnx_tf/handlers/backend/upsample.py\n+++ b/onnx_tf/handlers/backend/upsample.py\n@@ -1,6 +1,5 @@\n import copy\n \n-import numpy as np\n import tensorflow as tf\n \n from onnx_tf.common import exception\n@@ -33,20 +32,28 @@\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n- x_shape = x.get_shape().as_list()\n+ x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n- new_height = np.floor(x_shape[2] * scales[2])\n- new_weight = np.floor(x_shape[3] * scales[3])\n \n- mode = attrs.get(\"mode\", \"nearest\")\n- if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n- mode = tf.image.ResizeMethod.BILINEAR\n- else:\n- mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n+ assert_n_c_scale_is_one = tf.Assert(\n+ tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n+ [scales])\n+\n+ with tf.control_dependencies([assert_n_c_scale_is_one]):\n+ h_w_scale = scales[2:]\n+ h_w_shape = x_shape[2:]\n+ new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),\n+ tf.int32)\n+\n+ mode = attrs.get(\"mode\", \"nearest\")\n+ if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n+ mode = tf.image.ResizeMethod.BILINEAR\n+ else:\n+ mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n \n- attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n- attrs[\"method\"] = mode\n+ attrs[\"size\"] = new_h_w_shape\n+ attrs[\"method\"] = mode\n \n return [\n cls.make_tensor_from_onnx_node(\n", "issue": "Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\nHi, \r\nI have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx\r\n\r\n> \r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 56, in prepare\r\n> return cls.onnx_model_to_tensorflow_rep(model, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 76, in onnx_model_to_tensorflow_rep\r\n> return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 133, in _onnx_graph_to_tensorflow_rep\r\n> onnx_node, tensor_dict, handlers, opset=opset, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 228, in _onnx_node_to_tensorflow_op\r\n> return handler.handle(node, tensor_dict=tensor_dict, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\handler.py\", line 59, in handle\r\n> return ver_handle(node, **kwargs)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\backend\\upsample.py\", line 33, in version_7\r\n> new_height = np.floor(x_shape[2] * scales[2])\r\n> TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\n", "before_files": [{"content": "import copy\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom onnx_tf.handlers.handler import tf_func\nfrom onnx_tf.common.tf_helper import tf_shape\n\n\n@onnx_op(\"Upsample\")\n@tf_func(tf.image.resize)\n@partial_support(True)\n@ps_description(\"Upsample required 4D input in Tensorflow.\")\nclass Upsample(BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n if len(x_shape) != 4:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without 4D input\", \"Tensorflow\")\n\n if node.attrs.get(\n \"mode\", \"nearest\").lower() not in [\"nearest\", \"bilinear\", \"linear\"]:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without nearest or bilinear\",\n \"Tensorflow\")\n\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n new_height = np.floor(x_shape[2] * scales[2])\n new_weight = np.floor(x_shape[3] * scales[3])\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n attrs[\"method\"] = mode\n\n return [\n cls.make_tensor_from_onnx_node(\n node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n\n @classmethod\n def version_9(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = kwargs[\"tensor_dict\"][node.inputs[1]]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n # Remove scale.\n upsample_node = copy.deepcopy(node)\n del upsample_node.inputs[1]\n return [\n cls.make_tensor_from_onnx_node(\n upsample_node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n", "path": "onnx_tf/handlers/backend/upsample.py"}], "after_files": [{"content": "import copy\n\nimport tensorflow as tf\n\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom onnx_tf.handlers.handler import tf_func\nfrom onnx_tf.common.tf_helper import tf_shape\n\n\n@onnx_op(\"Upsample\")\n@tf_func(tf.image.resize)\n@partial_support(True)\n@ps_description(\"Upsample required 4D input in Tensorflow.\")\nclass Upsample(BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n if len(x_shape) != 4:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without 4D input\", \"Tensorflow\")\n\n if node.attrs.get(\n \"mode\", \"nearest\").lower() not in [\"nearest\", \"bilinear\", \"linear\"]:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without nearest or bilinear\",\n \"Tensorflow\")\n\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n return [\n cls.make_tensor_from_onnx_node(\n node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n\n @classmethod\n def version_9(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = kwargs[\"tensor_dict\"][node.inputs[1]]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n # Remove scale.\n upsample_node = copy.deepcopy(node)\n del upsample_node.inputs[1]\n return [\n cls.make_tensor_from_onnx_node(\n upsample_node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n", "path": "onnx_tf/handlers/backend/upsample.py"}]}
1,639
503
gh_patches_debug_37967
rasdani/github-patches
git_diff
holoviz__panel-1354
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve Title functionality of panel.serve to be specific for each app served THE BELOW IS COPIED FROM https://discourse.holoviz.org/t/how-do-i-set-the-app-title-with-panel-serve/550 IN THAT POST I HAVE TRIED DIFFERENT APPROACHES WITHOUT LUCK. #### SOLUTION Provide functionality to support use case ----------- I'm using `panel.serve` to serve a list of Panel applications. I would like the *index* page to show a custom title in the browser tab like `Corp Analytics` I would like each app to show a custom title in the browser tab like `App Name`. **How do I achieve that?** What I can do and would like is shown below #### Index Page ![image](https://user-images.githubusercontent.com/42288570/79939878-80bfc600-8460-11ea-9ef6-0bafdf907667.png) #### App 1 Page ![image](https://user-images.githubusercontent.com/42288570/79939884-85847a00-8460-11ea-8a0f-bcc5f639a792.png) #### App 2 Page ![image](https://user-images.githubusercontent.com/42288570/79939898-8cab8800-8460-11ea-98c6-3f04e77a1a09.png) #### Code ```python import panel as pn def create_app(): return pn.Spacer(background="green", sizing_mode="stretch_both") APP_ROUTES = { "app1": create_app, "app2": create_app } pn.serve(APP_ROUTES, port=14033, title="My App") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/io/server.py` Content: ``` 1 """ 2 Utilities for creating bokeh Server instances. 3 """ 4 from __future__ import absolute_import, division, unicode_literals 5 6 import os 7 import signal 8 import sys 9 import threading 10 import uuid 11 12 from contextlib import contextmanager 13 from functools import partial 14 from types import FunctionType 15 16 from bokeh.document.events import ModelChangedEvent 17 from bokeh.server.server import Server 18 from tornado.websocket import WebSocketHandler 19 from tornado.web import RequestHandler 20 from tornado.wsgi import WSGIContainer 21 22 from .state import state 23 24 25 #--------------------------------------------------------------------- 26 # Private API 27 #--------------------------------------------------------------------- 28 29 INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html") 30 31 def _origin_url(url): 32 if url.startswith("http"): 33 url = url.split("//")[1] 34 return url 35 36 37 def _server_url(url, port): 38 if url.startswith("http"): 39 return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/") 40 else: 41 return 'http://%s:%d%s' % (url.split(':')[0], port, "/") 42 43 def _eval_panel(panel, server_id, title, location, doc): 44 from ..template import BaseTemplate 45 from ..pane import panel as as_panel 46 47 if isinstance(panel, FunctionType): 48 panel = panel() 49 if isinstance(panel, BaseTemplate): 50 return panel._modify_doc(server_id, title, doc, location) 51 return as_panel(panel)._modify_doc(server_id, title, doc, location) 52 53 #--------------------------------------------------------------------- 54 # Public API 55 #--------------------------------------------------------------------- 56 57 58 @contextmanager 59 def unlocked(): 60 """ 61 Context manager which unlocks a Document and dispatches 62 ModelChangedEvents triggered in the context body to all sockets 63 on current sessions. 64 """ 65 curdoc = state.curdoc 66 if curdoc is None or curdoc.session_context is None: 67 yield 68 return 69 connections = curdoc.session_context.session._subscribed_connections 70 71 hold = curdoc._hold 72 if hold: 73 old_events = list(curdoc._held_events) 74 else: 75 old_events = [] 76 curdoc.hold() 77 try: 78 yield 79 events = [] 80 for conn in connections: 81 socket = conn._socket 82 if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0: 83 state._locks.add(socket) 84 locked = socket in state._locks 85 for event in curdoc._held_events: 86 if (isinstance(event, ModelChangedEvent) and event not in old_events 87 and hasattr(socket, 'write_message') and not locked): 88 msg = conn.protocol.create('PATCH-DOC', [event]) 89 WebSocketHandler.write_message(socket, msg.header_json) 90 WebSocketHandler.write_message(socket, msg.metadata_json) 91 WebSocketHandler.write_message(socket, msg.content_json) 92 for header, payload in msg._buffers: 93 WebSocketHandler.write_message(socket, header) 94 WebSocketHandler.write_message(socket, payload, binary=True) 95 elif event not in events: 96 events.append(event) 97 curdoc._held_events = events 98 finally: 99 if not hold: 100 curdoc.unhold() 101 102 103 def serve(panels, port=0, websocket_origin=None, loop=None, show=True, 104 start=True, title=None, verbose=True, location=True, **kwargs): 105 """ 106 Allows serving one or more panel objects on a single server. 107 The panels argument should be either a Panel object or a function 108 returning a Panel object or a dictionary of these two. If a 109 dictionary is supplied the keys represent the slugs at which 110 each app is served, e.g. `serve({'app': panel1, 'app2': panel2})` 111 will serve apps at /app and /app2 on the server. 112 113 Arguments 114 --------- 115 panel: Viewable, function or {str: Viewable} 116 A Panel object, a function returning a Panel object or a 117 dictionary mapping from the URL slug to either. 118 port: int (optional, default=0) 119 Allows specifying a specific port 120 websocket_origin: str or list(str) (optional) 121 A list of hosts that can connect to the websocket. 122 123 This is typically required when embedding a server app in 124 an external web site. 125 126 If None, "localhost" is used. 127 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current()) 128 The tornado IOLoop to run the Server on 129 show : boolean (optional, default=False) 130 Whether to open the server in a new browser tab on start 131 start : boolean(optional, default=False) 132 Whether to start the Server 133 title: str (optional, default=None) 134 An HTML title for the application 135 verbose: boolean (optional, default=True) 136 Whether to print the address and port 137 location : boolean or panel.io.location.Location 138 Whether to create a Location component to observe and 139 set the URL location. 140 kwargs: dict 141 Additional keyword arguments to pass to Server instance 142 """ 143 return get_server(panels, port, websocket_origin, loop, show, start, 144 title, verbose, location, **kwargs) 145 146 147 class ProxyFallbackHandler(RequestHandler): 148 """A `RequestHandler` that wraps another HTTP server callback and 149 proxies the subpath. 150 """ 151 152 def initialize(self, fallback, proxy=None): 153 self.fallback = fallback 154 self.proxy = proxy 155 156 def prepare(self): 157 if self.proxy: 158 self.request.path = self.request.path.replace(self.proxy, '') 159 self.fallback(self.request) 160 self._finished = True 161 self.on_finish() 162 163 164 def get_server(panel, port=0, websocket_origin=None, loop=None, 165 show=False, start=False, title=None, verbose=False, 166 location=True, **kwargs): 167 """ 168 Returns a Server instance with this panel attached as the root 169 app. 170 171 Arguments 172 --------- 173 panel: Viewable, function or {str: Viewable} 174 A Panel object, a function returning a Panel object or a 175 dictionary mapping from the URL slug to either. 176 port: int (optional, default=0) 177 Allows specifying a specific port 178 websocket_origin: str or list(str) (optional) 179 A list of hosts that can connect to the websocket. 180 181 This is typically required when embedding a server app in 182 an external web site. 183 184 If None, "localhost" is used. 185 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current()) 186 The tornado IOLoop to run the Server on 187 show : boolean (optional, default=False) 188 Whether to open the server in a new browser tab on start 189 start : boolean(optional, default=False) 190 Whether to start the Server 191 title: str (optional, default=None) 192 An HTML title for the application 193 verbose: boolean (optional, default=False) 194 Whether to report the address and port 195 location : boolean or panel.io.location.Location 196 Whether to create a Location component to observe and 197 set the URL location. 198 kwargs: dict 199 Additional keyword arguments to pass to Server instance 200 201 Returns 202 ------- 203 server : bokeh.server.server.Server 204 Bokeh Server instance running this panel 205 """ 206 from tornado.ioloop import IOLoop 207 208 server_id = kwargs.pop('server_id', uuid.uuid4().hex) 209 kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', []) 210 if isinstance(panel, dict): 211 apps = {} 212 for slug, app in panel.items(): 213 slug = slug if slug.startswith('/') else '/'+slug 214 if 'flask' in sys.modules: 215 from flask import Flask 216 if isinstance(app, Flask): 217 wsgi = WSGIContainer(app) 218 if slug == '/': 219 raise ValueError('Flask apps must be served on a subpath.') 220 if not slug.endswith('/'): 221 slug += '/' 222 extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler, 223 dict(fallback=wsgi, proxy=slug))) 224 continue 225 apps[slug] = partial(_eval_panel, app, server_id, title, location) 226 else: 227 apps = {'/': partial(_eval_panel, panel, server_id, title, location)} 228 229 opts = dict(kwargs) 230 if loop: 231 loop.make_current() 232 opts['io_loop'] = loop 233 else: 234 opts['io_loop'] = IOLoop.current() 235 236 if 'index' not in opts: 237 opts['index'] = INDEX_HTML 238 239 if websocket_origin: 240 if not isinstance(websocket_origin, list): 241 websocket_origin = [websocket_origin] 242 opts['allow_websocket_origin'] = websocket_origin 243 244 server = Server(apps, port=port, **opts) 245 if verbose: 246 address = server.address or 'localhost' 247 print("Launching server at http://%s:%s" % (address, server.port)) 248 249 state._servers[server_id] = (server, panel, []) 250 251 if show: 252 def show_callback(): 253 server.show('/') 254 server.io_loop.add_callback(show_callback) 255 256 def sig_exit(*args, **kwargs): 257 server.io_loop.add_callback_from_signal(do_stop) 258 259 def do_stop(*args, **kwargs): 260 server.io_loop.stop() 261 262 try: 263 signal.signal(signal.SIGINT, sig_exit) 264 except ValueError: 265 pass # Can't use signal on a thread 266 267 if start: 268 server.start() 269 try: 270 server.io_loop.start() 271 except RuntimeError: 272 pass 273 return server 274 275 276 class StoppableThread(threading.Thread): 277 """Thread class with a stop() method.""" 278 279 def __init__(self, io_loop=None, timeout=1000, **kwargs): 280 from tornado import ioloop 281 super(StoppableThread, self).__init__(**kwargs) 282 self._stop_event = threading.Event() 283 self.io_loop = io_loop 284 self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout) 285 self._cb.start() 286 287 def _check_stopped(self): 288 if self.stopped: 289 self._cb.stop() 290 self.io_loop.stop() 291 292 def run(self): 293 if hasattr(self, '_target'): 294 target, args, kwargs = self._target, self._args, self._kwargs 295 else: 296 target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs 297 if not target: 298 return 299 bokeh_server = None 300 try: 301 bokeh_server = target(*args, **kwargs) 302 finally: 303 if isinstance(bokeh_server, Server): 304 bokeh_server.stop() 305 if hasattr(self, '_target'): 306 del self._target, self._args, self._kwargs 307 else: 308 del self._Thread__target, self._Thread__args, self._Thread__kwargs 309 310 def stop(self): 311 self._stop_event.set() 312 313 @property 314 def stopped(self): 315 return self._stop_event.is_set() 316 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/panel/io/server.py b/panel/io/server.py --- a/panel/io/server.py +++ b/panel/io/server.py @@ -130,8 +130,9 @@ Whether to open the server in a new browser tab on start start : boolean(optional, default=False) Whether to start the Server - title: str (optional, default=None) - An HTML title for the application + title: str or {str: str} (optional, default=None) + An HTML title for the application or a dictionary mapping + from the URL slug to a customized title verbose: boolean (optional, default=True) Whether to print the address and port location : boolean or panel.io.location.Location @@ -188,8 +189,9 @@ Whether to open the server in a new browser tab on start start : boolean(optional, default=False) Whether to start the Server - title: str (optional, default=None) - An HTML title for the application + title: str or {str: str} (optional, default=None) + An HTML title for the application or a dictionary mapping + from the URL slug to a customized title verbose: boolean (optional, default=False) Whether to report the address and port location : boolean or panel.io.location.Location @@ -210,6 +212,16 @@ if isinstance(panel, dict): apps = {} for slug, app in panel.items(): + if isinstance(title, dict): + try: + title_ = title[slug] + except KeyError: + raise KeyError( + "Keys of the title dictionnary and of the apps " + f"dictionary must match. No {slug} key found in the " + "title dictionnary.") + else: + title_ = title slug = slug if slug.startswith('/') else '/'+slug if 'flask' in sys.modules: from flask import Flask @@ -222,7 +234,7 @@ extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler, dict(fallback=wsgi, proxy=slug))) continue - apps[slug] = partial(_eval_panel, app, server_id, title, location) + apps[slug] = partial(_eval_panel, app, server_id, title_, location) else: apps = {'/': partial(_eval_panel, panel, server_id, title, location)}
{"golden_diff": "diff --git a/panel/io/server.py b/panel/io/server.py\n--- a/panel/io/server.py\n+++ b/panel/io/server.py\n@@ -130,8 +130,9 @@\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n- title: str (optional, default=None)\n- An HTML title for the application\n+ title: str or {str: str} (optional, default=None)\n+ An HTML title for the application or a dictionary mapping\n+ from the URL slug to a customized title\n verbose: boolean (optional, default=True)\n Whether to print the address and port\n location : boolean or panel.io.location.Location\n@@ -188,8 +189,9 @@\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n- title: str (optional, default=None)\n- An HTML title for the application\n+ title: str or {str: str} (optional, default=None)\n+ An HTML title for the application or a dictionary mapping\n+ from the URL slug to a customized title\n verbose: boolean (optional, default=False)\n Whether to report the address and port\n location : boolean or panel.io.location.Location\n@@ -210,6 +212,16 @@\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n+ if isinstance(title, dict):\n+ try:\n+ title_ = title[slug]\n+ except KeyError:\n+ raise KeyError(\n+ \"Keys of the title dictionnary and of the apps \"\n+ f\"dictionary must match. No {slug} key found in the \"\n+ \"title dictionnary.\") \n+ else:\n+ title_ = title\n slug = slug if slug.startswith('/') else '/'+slug\n if 'flask' in sys.modules:\n from flask import Flask\n@@ -222,7 +234,7 @@\n extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,\n dict(fallback=wsgi, proxy=slug)))\n continue\n- apps[slug] = partial(_eval_panel, app, server_id, title, location)\n+ apps[slug] = partial(_eval_panel, app, server_id, title_, location)\n else:\n apps = {'/': partial(_eval_panel, panel, server_id, title, location)}\n", "issue": "Improve Title functionality of panel.serve to be specific for each app served\nTHE BELOW IS COPIED FROM https://discourse.holoviz.org/t/how-do-i-set-the-app-title-with-panel-serve/550\r\n\r\nIN THAT POST I HAVE TRIED DIFFERENT APPROACHES WITHOUT LUCK.\r\n\r\n#### SOLUTION\r\n\r\nProvide functionality to support use case\r\n\r\n-----------\r\n\r\nI'm using `panel.serve` to serve a list of Panel applications.\r\n\r\nI would like the *index* page to show a custom title in the browser tab like `Corp Analytics`\r\n\r\nI would like each app to show a custom title in the browser tab like `App Name`.\r\n\r\n**How do I achieve that?**\r\n\r\nWhat I can do and would like is shown below\r\n\r\n#### Index Page\r\n\r\n![image](https://user-images.githubusercontent.com/42288570/79939878-80bfc600-8460-11ea-9ef6-0bafdf907667.png)\r\n\r\n#### App 1 Page\r\n\r\n![image](https://user-images.githubusercontent.com/42288570/79939884-85847a00-8460-11ea-8a0f-bcc5f639a792.png)\r\n\r\n#### App 2 Page\r\n\r\n![image](https://user-images.githubusercontent.com/42288570/79939898-8cab8800-8460-11ea-98c6-3f04e77a1a09.png)\r\n\r\n#### Code\r\n\r\n```python\r\nimport panel as pn\r\n\r\ndef create_app():\r\n\r\n return pn.Spacer(background=\"green\", sizing_mode=\"stretch_both\")\r\n\r\nAPP_ROUTES = {\r\n\r\n \"app1\": create_app,\r\n\r\n \"app2\": create_app\r\n\r\n}\r\n\r\npn.serve(APP_ROUTES, port=14033, title=\"My App\")\r\n```\n", "before_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\nimport signal\nimport sys\nimport threading\nimport uuid\n\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom types import FunctionType\n\nfrom bokeh.document.events import ModelChangedEvent\nfrom bokeh.server.server import Server\nfrom tornado.websocket import WebSocketHandler\nfrom tornado.web import RequestHandler\nfrom tornado.wsgi import WSGIContainer\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nINDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', \"index.html\")\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\ndef _eval_panel(panel, server_id, title, location, doc):\n from ..template import BaseTemplate\n from ..pane import panel as as_panel\n\n if isinstance(panel, FunctionType):\n panel = panel()\n if isinstance(panel, BaseTemplate):\n return panel._modify_doc(server_id, title, doc, location)\n return as_panel(panel)._modify_doc(server_id, title, doc, location)\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\n\n@contextmanager\ndef unlocked():\n \"\"\"\n Context manager which unlocks a Document and dispatches\n ModelChangedEvents triggered in the context body to all sockets\n on current sessions.\n \"\"\"\n curdoc = state.curdoc\n if curdoc is None or curdoc.session_context is None:\n yield\n return\n connections = curdoc.session_context.session._subscribed_connections\n\n hold = curdoc._hold\n if hold:\n old_events = list(curdoc._held_events)\n else:\n old_events = []\n curdoc.hold()\n try:\n yield\n events = []\n for conn in connections:\n socket = conn._socket\n if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:\n state._locks.add(socket)\n locked = socket in state._locks\n for event in curdoc._held_events:\n if (isinstance(event, ModelChangedEvent) and event not in old_events\n and hasattr(socket, 'write_message') and not locked):\n msg = conn.protocol.create('PATCH-DOC', [event])\n WebSocketHandler.write_message(socket, msg.header_json)\n WebSocketHandler.write_message(socket, msg.metadata_json)\n WebSocketHandler.write_message(socket, msg.content_json)\n for header, payload in msg._buffers:\n WebSocketHandler.write_message(socket, header)\n WebSocketHandler.write_message(socket, payload, binary=True)\n elif event not in events:\n events.append(event)\n curdoc._held_events = events\n finally:\n if not hold:\n curdoc.unhold()\n\n\ndef serve(panels, port=0, websocket_origin=None, loop=None, show=True,\n start=True, title=None, verbose=True, location=True, **kwargs):\n \"\"\"\n Allows serving one or more panel objects on a single server.\n The panels argument should be either a Panel object or a function\n returning a Panel object or a dictionary of these two. If a \n dictionary is supplied the keys represent the slugs at which\n each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`\n will serve apps at /app and /app2 on the server.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n title: str (optional, default=None)\n An HTML title for the application\n verbose: boolean (optional, default=True)\n Whether to print the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n \"\"\"\n return get_server(panels, port, websocket_origin, loop, show, start,\n title, verbose, location, **kwargs)\n\n\nclass ProxyFallbackHandler(RequestHandler):\n \"\"\"A `RequestHandler` that wraps another HTTP server callback and\n proxies the subpath.\n \"\"\"\n\n def initialize(self, fallback, proxy=None):\n self.fallback = fallback\n self.proxy = proxy\n\n def prepare(self):\n if self.proxy:\n self.request.path = self.request.path.replace(self.proxy, '')\n self.fallback(self.request)\n self._finished = True\n self.on_finish()\n\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, title=None, verbose=False,\n location=True, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n title: str (optional, default=None)\n An HTML title for the application\n verbose: boolean (optional, default=False)\n Whether to report the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n\n server_id = kwargs.pop('server_id', uuid.uuid4().hex)\n kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n slug = slug if slug.startswith('/') else '/'+slug\n if 'flask' in sys.modules:\n from flask import Flask\n if isinstance(app, Flask):\n wsgi = WSGIContainer(app)\n if slug == '/':\n raise ValueError('Flask apps must be served on a subpath.')\n if not slug.endswith('/'):\n slug += '/'\n extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,\n dict(fallback=wsgi, proxy=slug)))\n continue\n apps[slug] = partial(_eval_panel, app, server_id, title, location)\n else:\n apps = {'/': partial(_eval_panel, panel, server_id, title, location)}\n\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if 'index' not in opts:\n opts['index'] = INDEX_HTML\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server = Server(apps, port=port, **opts)\n if verbose:\n address = server.address or 'localhost'\n print(\"Launching server at http://%s:%s\" % (address, server.port))\n\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n\n def run(self):\n if hasattr(self, '_target'):\n target, args, kwargs = self._target, self._args, self._kwargs\n else:\n target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs\n if not target:\n return\n bokeh_server = None\n try:\n bokeh_server = target(*args, **kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n bokeh_server.stop()\n if hasattr(self, '_target'):\n del self._target, self._args, self._kwargs\n else:\n del self._Thread__target, self._Thread__args, self._Thread__kwargs\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py"}], "after_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\nimport signal\nimport sys\nimport threading\nimport uuid\n\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom types import FunctionType\n\nfrom bokeh.document.events import ModelChangedEvent\nfrom bokeh.server.server import Server\nfrom tornado.websocket import WebSocketHandler\nfrom tornado.web import RequestHandler\nfrom tornado.wsgi import WSGIContainer\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\nINDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', \"index.html\")\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\ndef _eval_panel(panel, server_id, title, location, doc):\n from ..template import BaseTemplate\n from ..pane import panel as as_panel\n\n if isinstance(panel, FunctionType):\n panel = panel()\n if isinstance(panel, BaseTemplate):\n return panel._modify_doc(server_id, title, doc, location)\n return as_panel(panel)._modify_doc(server_id, title, doc, location)\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\n\n@contextmanager\ndef unlocked():\n \"\"\"\n Context manager which unlocks a Document and dispatches\n ModelChangedEvents triggered in the context body to all sockets\n on current sessions.\n \"\"\"\n curdoc = state.curdoc\n if curdoc is None or curdoc.session_context is None:\n yield\n return\n connections = curdoc.session_context.session._subscribed_connections\n\n hold = curdoc._hold\n if hold:\n old_events = list(curdoc._held_events)\n else:\n old_events = []\n curdoc.hold()\n try:\n yield\n events = []\n for conn in connections:\n socket = conn._socket\n if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:\n state._locks.add(socket)\n locked = socket in state._locks\n for event in curdoc._held_events:\n if (isinstance(event, ModelChangedEvent) and event not in old_events\n and hasattr(socket, 'write_message') and not locked):\n msg = conn.protocol.create('PATCH-DOC', [event])\n WebSocketHandler.write_message(socket, msg.header_json)\n WebSocketHandler.write_message(socket, msg.metadata_json)\n WebSocketHandler.write_message(socket, msg.content_json)\n for header, payload in msg._buffers:\n WebSocketHandler.write_message(socket, header)\n WebSocketHandler.write_message(socket, payload, binary=True)\n elif event not in events:\n events.append(event)\n curdoc._held_events = events\n finally:\n if not hold:\n curdoc.unhold()\n\n\ndef serve(panels, port=0, websocket_origin=None, loop=None, show=True,\n start=True, title=None, verbose=True, location=True, **kwargs):\n \"\"\"\n Allows serving one or more panel objects on a single server.\n The panels argument should be either a Panel object or a function\n returning a Panel object or a dictionary of these two. If a \n dictionary is supplied the keys represent the slugs at which\n each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`\n will serve apps at /app and /app2 on the server.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n title: str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title\n verbose: boolean (optional, default=True)\n Whether to print the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n \"\"\"\n return get_server(panels, port, websocket_origin, loop, show, start,\n title, verbose, location, **kwargs)\n\n\nclass ProxyFallbackHandler(RequestHandler):\n \"\"\"A `RequestHandler` that wraps another HTTP server callback and\n proxies the subpath.\n \"\"\"\n\n def initialize(self, fallback, proxy=None):\n self.fallback = fallback\n self.proxy = proxy\n\n def prepare(self):\n if self.proxy:\n self.request.path = self.request.path.replace(self.proxy, '')\n self.fallback(self.request)\n self._finished = True\n self.on_finish()\n\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, title=None, verbose=False,\n location=True, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n panel: Viewable, function or {str: Viewable}\n A Panel object, a function returning a Panel object or a\n dictionary mapping from the URL slug to either.\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n title: str or {str: str} (optional, default=None)\n An HTML title for the application or a dictionary mapping\n from the URL slug to a customized title\n verbose: boolean (optional, default=False)\n Whether to report the address and port\n location : boolean or panel.io.location.Location\n Whether to create a Location component to observe and\n set the URL location.\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n\n server_id = kwargs.pop('server_id', uuid.uuid4().hex)\n kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])\n if isinstance(panel, dict):\n apps = {}\n for slug, app in panel.items():\n if isinstance(title, dict):\n try:\n title_ = title[slug]\n except KeyError:\n raise KeyError(\n \"Keys of the title dictionnary and of the apps \"\n f\"dictionary must match. No {slug} key found in the \"\n \"title dictionnary.\") \n else:\n title_ = title\n slug = slug if slug.startswith('/') else '/'+slug\n if 'flask' in sys.modules:\n from flask import Flask\n if isinstance(app, Flask):\n wsgi = WSGIContainer(app)\n if slug == '/':\n raise ValueError('Flask apps must be served on a subpath.')\n if not slug.endswith('/'):\n slug += '/'\n extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,\n dict(fallback=wsgi, proxy=slug)))\n continue\n apps[slug] = partial(_eval_panel, app, server_id, title_, location)\n else:\n apps = {'/': partial(_eval_panel, panel, server_id, title, location)}\n\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if 'index' not in opts:\n opts['index'] = INDEX_HTML\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server = Server(apps, port=port, **opts)\n if verbose:\n address = server.address or 'localhost'\n print(\"Launching server at http://%s:%s\" % (address, server.port))\n\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n\n def run(self):\n if hasattr(self, '_target'):\n target, args, kwargs = self._target, self._args, self._kwargs\n else:\n target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs\n if not target:\n return\n bokeh_server = None\n try:\n bokeh_server = target(*args, **kwargs)\n finally:\n if isinstance(bokeh_server, Server):\n bokeh_server.stop()\n if hasattr(self, '_target'):\n del self._target, self._args, self._kwargs\n else:\n del self._Thread__target, self._Thread__args, self._Thread__kwargs\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py"}]}
3,893
556
gh_patches_debug_26273
rasdani/github-patches
git_diff
getredash__redash-3442
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PostgreSQL queries returning a tsrange fail ### Issue Summary Running a query which includes a tsrange column fail with a JSON error like this: ``` DateTimeRange(datetime.datetime(2016, 8, 25, 15, 21, 45, 760521), datetime.datetime(2016, 10, 14, 5, 56, 52, 452297), '[)') is not JSON serializable ``` ### Steps to Reproduce 1. Create a table with a tsrange column 2. Insert some data into it 3. Create a `SELECT * FROM table` query 4. Run the query This is turning out to be confusing for our users - I'm getting regular complaints about queries failing with this error because some of our tables have range columns. ### Technical details: * Redash Version: 1.0.1+b2845 * Browser/OS: Linux * How did you install Redash: Docker image --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/query_runner/pg.py` Content: ``` 1 import os 2 import logging 3 import select 4 5 import psycopg2 6 7 from redash.query_runner import * 8 from redash.utils import json_dumps, json_loads 9 10 logger = logging.getLogger(__name__) 11 12 types_map = { 13 20: TYPE_INTEGER, 14 21: TYPE_INTEGER, 15 23: TYPE_INTEGER, 16 700: TYPE_FLOAT, 17 1700: TYPE_FLOAT, 18 701: TYPE_FLOAT, 19 16: TYPE_BOOLEAN, 20 1082: TYPE_DATE, 21 1114: TYPE_DATETIME, 22 1184: TYPE_DATETIME, 23 1014: TYPE_STRING, 24 1015: TYPE_STRING, 25 1008: TYPE_STRING, 26 1009: TYPE_STRING, 27 2951: TYPE_STRING 28 } 29 30 31 def _wait(conn, timeout=None): 32 while 1: 33 try: 34 state = conn.poll() 35 if state == psycopg2.extensions.POLL_OK: 36 break 37 elif state == psycopg2.extensions.POLL_WRITE: 38 select.select([], [conn.fileno()], [], timeout) 39 elif state == psycopg2.extensions.POLL_READ: 40 select.select([conn.fileno()], [], [], timeout) 41 else: 42 raise psycopg2.OperationalError("poll() returned %s" % state) 43 except select.error: 44 raise psycopg2.OperationalError("select.error received") 45 46 47 class PostgreSQL(BaseSQLQueryRunner): 48 noop_query = "SELECT 1" 49 50 @classmethod 51 def configuration_schema(cls): 52 return { 53 "type": "object", 54 "properties": { 55 "user": { 56 "type": "string" 57 }, 58 "password": { 59 "type": "string" 60 }, 61 "host": { 62 "type": "string", 63 "default": "127.0.0.1" 64 }, 65 "port": { 66 "type": "number", 67 "default": 5432 68 }, 69 "dbname": { 70 "type": "string", 71 "title": "Database Name" 72 }, 73 "sslmode": { 74 "type": "string", 75 "title": "SSL Mode", 76 "default": "prefer" 77 } 78 }, 79 "order": ['host', 'port', 'user', 'password'], 80 "required": ["dbname"], 81 "secret": ["password"] 82 } 83 84 @classmethod 85 def type(cls): 86 return "pg" 87 88 def _get_definitions(self, schema, query): 89 results, error = self.run_query(query, None) 90 91 if error is not None: 92 raise Exception("Failed getting schema.") 93 94 results = json_loads(results) 95 96 for row in results['rows']: 97 if row['table_schema'] != 'public': 98 table_name = u'{}.{}'.format(row['table_schema'], row['table_name']) 99 else: 100 table_name = row['table_name'] 101 102 if table_name not in schema: 103 schema[table_name] = {'name': table_name, 'columns': []} 104 105 schema[table_name]['columns'].append(row['column_name']) 106 107 def _get_tables(self, schema): 108 ''' 109 relkind constants per https://www.postgresql.org/docs/10/static/catalog-pg-class.html 110 r = regular table 111 v = view 112 m = materialized view 113 f = foreign table 114 p = partitioned table (new in 10) 115 --- 116 i = index 117 S = sequence 118 t = TOAST table 119 c = composite type 120 ''' 121 122 query = """ 123 SELECT s.nspname as table_schema, 124 c.relname as table_name, 125 a.attname as column_name 126 FROM pg_class c 127 JOIN pg_namespace s 128 ON c.relnamespace = s.oid 129 AND s.nspname NOT IN ('pg_catalog', 'information_schema') 130 JOIN pg_attribute a 131 ON a.attrelid = c.oid 132 AND a.attnum > 0 133 AND NOT a.attisdropped 134 WHERE c.relkind IN ('r', 'v', 'm', 'f', 'p') 135 """ 136 137 self._get_definitions(schema, query) 138 139 return schema.values() 140 141 def _get_connection(self): 142 connection = psycopg2.connect(user=self.configuration.get('user'), 143 password=self.configuration.get('password'), 144 host=self.configuration.get('host'), 145 port=self.configuration.get('port'), 146 dbname=self.configuration.get('dbname'), 147 sslmode=self.configuration.get('sslmode'), 148 async_=True) 149 150 return connection 151 152 def run_query(self, query, user): 153 connection = self._get_connection() 154 _wait(connection, timeout=10) 155 156 cursor = connection.cursor() 157 158 try: 159 cursor.execute(query) 160 _wait(connection) 161 162 if cursor.description is not None: 163 columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) 164 rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor] 165 166 data = {'columns': columns, 'rows': rows} 167 error = None 168 json_data = json_dumps(data, ignore_nan=True) 169 else: 170 error = 'Query completed but it returned no data.' 171 json_data = None 172 except (select.error, OSError) as e: 173 error = "Query interrupted. Please retry." 174 json_data = None 175 except psycopg2.DatabaseError as e: 176 error = e.message 177 json_data = None 178 except (KeyboardInterrupt, InterruptException): 179 connection.cancel() 180 error = "Query cancelled by user." 181 json_data = None 182 finally: 183 connection.close() 184 185 return json_data, error 186 187 188 class Redshift(PostgreSQL): 189 @classmethod 190 def type(cls): 191 return "redshift" 192 193 def _get_connection(self): 194 sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt') 195 196 connection = psycopg2.connect(user=self.configuration.get('user'), 197 password=self.configuration.get('password'), 198 host=self.configuration.get('host'), 199 port=self.configuration.get('port'), 200 dbname=self.configuration.get('dbname'), 201 sslmode=self.configuration.get('sslmode', 'prefer'), 202 sslrootcert=sslrootcert_path, 203 async_=True) 204 205 return connection 206 207 @classmethod 208 def configuration_schema(cls): 209 210 return { 211 "type": "object", 212 "properties": { 213 "user": { 214 "type": "string" 215 }, 216 "password": { 217 "type": "string" 218 }, 219 "host": { 220 "type": "string" 221 }, 222 "port": { 223 "type": "number" 224 }, 225 "dbname": { 226 "type": "string", 227 "title": "Database Name" 228 }, 229 "sslmode": { 230 "type": "string", 231 "title": "SSL Mode", 232 "default": "prefer" 233 } 234 }, 235 "order": ['host', 'port', 'user', 'password'], 236 "required": ["dbname", "user", "password", "host", "port"], 237 "secret": ["password"] 238 } 239 240 def _get_tables(self, schema): 241 # Use svv_columns to include internal & external (Spectrum) tables and views data for Redshift 242 # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_COLUMNS.html 243 # Use PG_GET_LATE_BINDING_VIEW_COLS to include schema for late binding views data for Redshift 244 # https://docs.aws.amazon.com/redshift/latest/dg/PG_GET_LATE_BINDING_VIEW_COLS.html 245 # Use HAS_SCHEMA_PRIVILEGE(), SVV_EXTERNAL_SCHEMAS and HAS_TABLE_PRIVILEGE() to filter 246 # out tables the current user cannot access. 247 # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_SCHEMA_PRIVILEGE.html 248 # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_EXTERNAL_SCHEMAS.html 249 # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_TABLE_PRIVILEGE.html 250 query = """ 251 WITH tables AS ( 252 SELECT DISTINCT table_name, 253 table_schema, 254 column_name, 255 ordinal_position AS pos 256 FROM svv_columns 257 WHERE table_schema NOT IN ('pg_internal','pg_catalog','information_schema') 258 UNION ALL 259 SELECT DISTINCT view_name::varchar AS table_name, 260 view_schema::varchar AS table_schema, 261 col_name::varchar AS column_name, 262 col_num AS pos 263 FROM pg_get_late_binding_view_cols() 264 cols(view_schema name, view_name name, col_name name, col_type varchar, col_num int) 265 ) 266 SELECT table_name, table_schema, column_name 267 FROM tables 268 WHERE 269 HAS_SCHEMA_PRIVILEGE(table_schema, 'USAGE') AND 270 ( 271 table_schema IN (SELECT schemaname FROM SVV_EXTERNAL_SCHEMAS) OR 272 HAS_TABLE_PRIVILEGE('"' || table_schema || '"."' || table_name || '"', 'SELECT') 273 ) 274 ORDER BY table_name, pos 275 """ 276 277 self._get_definitions(schema, query) 278 279 return schema.values() 280 281 282 class CockroachDB(PostgreSQL): 283 284 @classmethod 285 def type(cls): 286 return "cockroach" 287 288 register(PostgreSQL) 289 register(Redshift) 290 register(CockroachDB) 291 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -3,9 +3,10 @@ import select import psycopg2 +from psycopg2.extras import Range from redash.query_runner import * -from redash.utils import json_dumps, json_loads +from redash.utils import JSONEncoder, json_dumps, json_loads logger = logging.getLogger(__name__) @@ -28,6 +29,26 @@ } +class PostgreSQLJSONEncoder(JSONEncoder): + def default(self, o): + if isinstance(o, Range): + # From: https://github.com/psycopg/psycopg2/pull/779 + if o._bounds is None: + return '' + + items = [ + o._bounds[0], + str(o._lower), + ', ', + str(o._upper), + o._bounds[1] + ] + + return ''.join(items) + + return super(PostgreSQLJSONEncoder, self).default(o) + + def _wait(conn, timeout=None): while 1: try: @@ -165,7 +186,7 @@ data = {'columns': columns, 'rows': rows} error = None - json_data = json_dumps(data, ignore_nan=True) + json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder) else: error = 'Query completed but it returned no data.' json_data = None
{"golden_diff": "diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py\n--- a/redash/query_runner/pg.py\n+++ b/redash/query_runner/pg.py\n@@ -3,9 +3,10 @@\n import select\n \n import psycopg2\n+from psycopg2.extras import Range\n \n from redash.query_runner import *\n-from redash.utils import json_dumps, json_loads\n+from redash.utils import JSONEncoder, json_dumps, json_loads\n \n logger = logging.getLogger(__name__)\n \n@@ -28,6 +29,26 @@\n }\n \n \n+class PostgreSQLJSONEncoder(JSONEncoder):\n+ def default(self, o):\n+ if isinstance(o, Range):\n+ # From: https://github.com/psycopg/psycopg2/pull/779\n+ if o._bounds is None:\n+ return ''\n+\n+ items = [\n+ o._bounds[0],\n+ str(o._lower),\n+ ', ',\n+ str(o._upper),\n+ o._bounds[1]\n+ ]\n+\n+ return ''.join(items)\n+\n+ return super(PostgreSQLJSONEncoder, self).default(o)\n+\n+\n def _wait(conn, timeout=None):\n while 1:\n try:\n@@ -165,7 +186,7 @@\n \n data = {'columns': columns, 'rows': rows}\n error = None\n- json_data = json_dumps(data, ignore_nan=True)\n+ json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder)\n else:\n error = 'Query completed but it returned no data.'\n json_data = None\n", "issue": "PostgreSQL queries returning a tsrange fail\n### Issue Summary\r\n\r\nRunning a query which includes a tsrange column fail with a JSON error like this:\r\n\r\n```\r\nDateTimeRange(datetime.datetime(2016, 8, 25, 15, 21, 45, 760521), datetime.datetime(2016, 10, 14, 5, 56, 52, 452297), '[)') is not JSON serializable\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a table with a tsrange column\r\n2. Insert some data into it\r\n3. Create a `SELECT * FROM table` query\r\n4. Run the query\r\n\r\nThis is turning out to be confusing for our users - I'm getting regular complaints about queries failing with this error because some of our tables have range columns.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.1+b2845\r\n* Browser/OS: Linux\r\n* How did you install Redash: Docker image\r\n\n", "before_files": [{"content": "import os\nimport logging\nimport select\n\nimport psycopg2\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntypes_map = {\n 20: TYPE_INTEGER,\n 21: TYPE_INTEGER,\n 23: TYPE_INTEGER,\n 700: TYPE_FLOAT,\n 1700: TYPE_FLOAT,\n 701: TYPE_FLOAT,\n 16: TYPE_BOOLEAN,\n 1082: TYPE_DATE,\n 1114: TYPE_DATETIME,\n 1184: TYPE_DATETIME,\n 1014: TYPE_STRING,\n 1015: TYPE_STRING,\n 1008: TYPE_STRING,\n 1009: TYPE_STRING,\n 2951: TYPE_STRING\n}\n\n\ndef _wait(conn, timeout=None):\n while 1:\n try:\n state = conn.poll()\n if state == psycopg2.extensions.POLL_OK:\n break\n elif state == psycopg2.extensions.POLL_WRITE:\n select.select([], [conn.fileno()], [], timeout)\n elif state == psycopg2.extensions.POLL_READ:\n select.select([conn.fileno()], [], [], timeout)\n else:\n raise psycopg2.OperationalError(\"poll() returned %s\" % state)\n except select.error:\n raise psycopg2.OperationalError(\"select.error received\")\n\n\nclass PostgreSQL(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"host\": {\n \"type\": \"string\",\n \"default\": \"127.0.0.1\"\n },\n \"port\": {\n \"type\": \"number\",\n \"default\": 5432\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"sslmode\": {\n \"type\": \"string\",\n \"title\": \"SSL Mode\",\n \"default\": \"prefer\"\n }\n },\n \"order\": ['host', 'port', 'user', 'password'],\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"pg\"\n\n def _get_definitions(self, schema, query):\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n if row['table_schema'] != 'public':\n table_name = u'{}.{}'.format(row['table_schema'], row['table_name'])\n else:\n table_name = row['table_name']\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['column_name'])\n\n def _get_tables(self, schema):\n '''\n relkind constants per https://www.postgresql.org/docs/10/static/catalog-pg-class.html\n r = regular table\n v = view\n m = materialized view\n f = foreign table\n p = partitioned table (new in 10)\n ---\n i = index\n S = sequence\n t = TOAST table\n c = composite type\n '''\n\n query = \"\"\"\n SELECT s.nspname as table_schema,\n c.relname as table_name,\n a.attname as column_name\n FROM pg_class c\n JOIN pg_namespace s\n ON c.relnamespace = s.oid\n AND s.nspname NOT IN ('pg_catalog', 'information_schema')\n JOIN pg_attribute a\n ON a.attrelid = c.oid\n AND a.attnum > 0\n AND NOT a.attisdropped\n WHERE c.relkind IN ('r', 'v', 'm', 'f', 'p')\n \"\"\"\n\n self._get_definitions(schema, query)\n\n return schema.values()\n\n def _get_connection(self):\n connection = psycopg2.connect(user=self.configuration.get('user'),\n password=self.configuration.get('password'),\n host=self.configuration.get('host'),\n port=self.configuration.get('port'),\n dbname=self.configuration.get('dbname'),\n sslmode=self.configuration.get('sslmode'),\n async_=True)\n\n return connection\n\n def run_query(self, query, user):\n connection = self._get_connection()\n _wait(connection, timeout=10)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n _wait(connection)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])\n rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]\n\n data = {'columns': columns, 'rows': rows}\n error = None\n json_data = json_dumps(data, ignore_nan=True)\n else:\n error = 'Query completed but it returned no data.'\n json_data = None\n except (select.error, OSError) as e:\n error = \"Query interrupted. Please retry.\"\n json_data = None\n except psycopg2.DatabaseError as e:\n error = e.message\n json_data = None\n except (KeyboardInterrupt, InterruptException):\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nclass Redshift(PostgreSQL):\n @classmethod\n def type(cls):\n return \"redshift\"\n\n def _get_connection(self):\n sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt')\n\n connection = psycopg2.connect(user=self.configuration.get('user'),\n password=self.configuration.get('password'),\n host=self.configuration.get('host'),\n port=self.configuration.get('port'),\n dbname=self.configuration.get('dbname'),\n sslmode=self.configuration.get('sslmode', 'prefer'),\n sslrootcert=sslrootcert_path,\n async_=True)\n\n return connection\n\n @classmethod\n def configuration_schema(cls):\n\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"host\": {\n \"type\": \"string\"\n },\n \"port\": {\n \"type\": \"number\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"sslmode\": {\n \"type\": \"string\",\n \"title\": \"SSL Mode\",\n \"default\": \"prefer\"\n }\n },\n \"order\": ['host', 'port', 'user', 'password'],\n \"required\": [\"dbname\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"]\n }\n\n def _get_tables(self, schema):\n # Use svv_columns to include internal & external (Spectrum) tables and views data for Redshift\n # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_COLUMNS.html\n # Use PG_GET_LATE_BINDING_VIEW_COLS to include schema for late binding views data for Redshift\n # https://docs.aws.amazon.com/redshift/latest/dg/PG_GET_LATE_BINDING_VIEW_COLS.html\n # Use HAS_SCHEMA_PRIVILEGE(), SVV_EXTERNAL_SCHEMAS and HAS_TABLE_PRIVILEGE() to filter\n # out tables the current user cannot access.\n # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_SCHEMA_PRIVILEGE.html\n # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_EXTERNAL_SCHEMAS.html\n # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_TABLE_PRIVILEGE.html\n query = \"\"\"\n WITH tables AS (\n SELECT DISTINCT table_name,\n table_schema,\n column_name,\n ordinal_position AS pos\n FROM svv_columns\n WHERE table_schema NOT IN ('pg_internal','pg_catalog','information_schema')\n UNION ALL\n SELECT DISTINCT view_name::varchar AS table_name,\n view_schema::varchar AS table_schema,\n col_name::varchar AS column_name,\n col_num AS pos\n FROM pg_get_late_binding_view_cols()\n cols(view_schema name, view_name name, col_name name, col_type varchar, col_num int)\n )\n SELECT table_name, table_schema, column_name\n FROM tables\n WHERE\n HAS_SCHEMA_PRIVILEGE(table_schema, 'USAGE') AND\n (\n table_schema IN (SELECT schemaname FROM SVV_EXTERNAL_SCHEMAS) OR\n HAS_TABLE_PRIVILEGE('\"' || table_schema || '\".\"' || table_name || '\"', 'SELECT')\n )\n ORDER BY table_name, pos\n \"\"\"\n\n self._get_definitions(schema, query)\n\n return schema.values()\n\n\nclass CockroachDB(PostgreSQL):\n\n @classmethod\n def type(cls):\n return \"cockroach\"\n\nregister(PostgreSQL)\nregister(Redshift)\nregister(CockroachDB)\n", "path": "redash/query_runner/pg.py"}], "after_files": [{"content": "import os\nimport logging\nimport select\n\nimport psycopg2\nfrom psycopg2.extras import Range\n\nfrom redash.query_runner import *\nfrom redash.utils import JSONEncoder, json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntypes_map = {\n 20: TYPE_INTEGER,\n 21: TYPE_INTEGER,\n 23: TYPE_INTEGER,\n 700: TYPE_FLOAT,\n 1700: TYPE_FLOAT,\n 701: TYPE_FLOAT,\n 16: TYPE_BOOLEAN,\n 1082: TYPE_DATE,\n 1114: TYPE_DATETIME,\n 1184: TYPE_DATETIME,\n 1014: TYPE_STRING,\n 1015: TYPE_STRING,\n 1008: TYPE_STRING,\n 1009: TYPE_STRING,\n 2951: TYPE_STRING\n}\n\n\nclass PostgreSQLJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, Range):\n # From: https://github.com/psycopg/psycopg2/pull/779\n if o._bounds is None:\n return ''\n\n items = [\n o._bounds[0],\n str(o._lower),\n ', ',\n str(o._upper),\n o._bounds[1]\n ]\n\n return ''.join(items)\n\n return super(PostgreSQLJSONEncoder, self).default(o)\n\n\ndef _wait(conn, timeout=None):\n while 1:\n try:\n state = conn.poll()\n if state == psycopg2.extensions.POLL_OK:\n break\n elif state == psycopg2.extensions.POLL_WRITE:\n select.select([], [conn.fileno()], [], timeout)\n elif state == psycopg2.extensions.POLL_READ:\n select.select([conn.fileno()], [], [], timeout)\n else:\n raise psycopg2.OperationalError(\"poll() returned %s\" % state)\n except select.error:\n raise psycopg2.OperationalError(\"select.error received\")\n\n\nclass PostgreSQL(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"host\": {\n \"type\": \"string\",\n \"default\": \"127.0.0.1\"\n },\n \"port\": {\n \"type\": \"number\",\n \"default\": 5432\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"sslmode\": {\n \"type\": \"string\",\n \"title\": \"SSL Mode\",\n \"default\": \"prefer\"\n }\n },\n \"order\": ['host', 'port', 'user', 'password'],\n \"required\": [\"dbname\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"pg\"\n\n def _get_definitions(self, schema, query):\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results['rows']:\n if row['table_schema'] != 'public':\n table_name = u'{}.{}'.format(row['table_schema'], row['table_name'])\n else:\n table_name = row['table_name']\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['column_name'])\n\n def _get_tables(self, schema):\n '''\n relkind constants per https://www.postgresql.org/docs/10/static/catalog-pg-class.html\n r = regular table\n v = view\n m = materialized view\n f = foreign table\n p = partitioned table (new in 10)\n ---\n i = index\n S = sequence\n t = TOAST table\n c = composite type\n '''\n\n query = \"\"\"\n SELECT s.nspname as table_schema,\n c.relname as table_name,\n a.attname as column_name\n FROM pg_class c\n JOIN pg_namespace s\n ON c.relnamespace = s.oid\n AND s.nspname NOT IN ('pg_catalog', 'information_schema')\n JOIN pg_attribute a\n ON a.attrelid = c.oid\n AND a.attnum > 0\n AND NOT a.attisdropped\n WHERE c.relkind IN ('r', 'v', 'm', 'f', 'p')\n \"\"\"\n\n self._get_definitions(schema, query)\n\n return schema.values()\n\n def _get_connection(self):\n connection = psycopg2.connect(user=self.configuration.get('user'),\n password=self.configuration.get('password'),\n host=self.configuration.get('host'),\n port=self.configuration.get('port'),\n dbname=self.configuration.get('dbname'),\n sslmode=self.configuration.get('sslmode'),\n async_=True)\n\n return connection\n\n def run_query(self, query, user):\n connection = self._get_connection()\n _wait(connection, timeout=10)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n _wait(connection)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])\n rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]\n\n data = {'columns': columns, 'rows': rows}\n error = None\n json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder)\n else:\n error = 'Query completed but it returned no data.'\n json_data = None\n except (select.error, OSError) as e:\n error = \"Query interrupted. Please retry.\"\n json_data = None\n except psycopg2.DatabaseError as e:\n error = e.message\n json_data = None\n except (KeyboardInterrupt, InterruptException):\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nclass Redshift(PostgreSQL):\n @classmethod\n def type(cls):\n return \"redshift\"\n\n def _get_connection(self):\n sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt')\n\n connection = psycopg2.connect(user=self.configuration.get('user'),\n password=self.configuration.get('password'),\n host=self.configuration.get('host'),\n port=self.configuration.get('port'),\n dbname=self.configuration.get('dbname'),\n sslmode=self.configuration.get('sslmode', 'prefer'),\n sslrootcert=sslrootcert_path,\n async_=True)\n\n return connection\n\n @classmethod\n def configuration_schema(cls):\n\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"host\": {\n \"type\": \"string\"\n },\n \"port\": {\n \"type\": \"number\"\n },\n \"dbname\": {\n \"type\": \"string\",\n \"title\": \"Database Name\"\n },\n \"sslmode\": {\n \"type\": \"string\",\n \"title\": \"SSL Mode\",\n \"default\": \"prefer\"\n }\n },\n \"order\": ['host', 'port', 'user', 'password'],\n \"required\": [\"dbname\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"]\n }\n\n def _get_tables(self, schema):\n # Use svv_columns to include internal & external (Spectrum) tables and views data for Redshift\n # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_COLUMNS.html\n # Use PG_GET_LATE_BINDING_VIEW_COLS to include schema for late binding views data for Redshift\n # https://docs.aws.amazon.com/redshift/latest/dg/PG_GET_LATE_BINDING_VIEW_COLS.html\n # Use HAS_SCHEMA_PRIVILEGE(), SVV_EXTERNAL_SCHEMAS and HAS_TABLE_PRIVILEGE() to filter\n # out tables the current user cannot access.\n # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_SCHEMA_PRIVILEGE.html\n # https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_EXTERNAL_SCHEMAS.html\n # https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_TABLE_PRIVILEGE.html\n query = \"\"\"\n WITH tables AS (\n SELECT DISTINCT table_name,\n table_schema,\n column_name,\n ordinal_position AS pos\n FROM svv_columns\n WHERE table_schema NOT IN ('pg_internal','pg_catalog','information_schema')\n UNION ALL\n SELECT DISTINCT view_name::varchar AS table_name,\n view_schema::varchar AS table_schema,\n col_name::varchar AS column_name,\n col_num AS pos\n FROM pg_get_late_binding_view_cols()\n cols(view_schema name, view_name name, col_name name, col_type varchar, col_num int)\n )\n SELECT table_name, table_schema, column_name\n FROM tables\n WHERE\n HAS_SCHEMA_PRIVILEGE(table_schema, 'USAGE') AND\n (\n table_schema IN (SELECT schemaname FROM SVV_EXTERNAL_SCHEMAS) OR\n HAS_TABLE_PRIVILEGE('\"' || table_schema || '\".\"' || table_name || '\"', 'SELECT')\n )\n ORDER BY table_name, pos\n \"\"\"\n\n self._get_definitions(schema, query)\n\n return schema.values()\n\n\nclass CockroachDB(PostgreSQL):\n\n @classmethod\n def type(cls):\n return \"cockroach\"\n\nregister(PostgreSQL)\nregister(Redshift)\nregister(CockroachDB)\n", "path": "redash/query_runner/pg.py"}]}
3,280
356
gh_patches_debug_32108
rasdani/github-patches
git_diff
litestar-org__litestar-3285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: `default` not generated in OpenAPI schema for body as models ### Description Declaring default fields (various ways) generates OpenAPI spec with missing `default` declarations. Also, when/if fields are marked into `required` varies a bit. Didn't test what happens when requests are sent - are the defaults actually picked up runtime. But at least the OpenAPI schema generation fails. (The docs don't mention _how_ these are supported so a bit difficult to know how it's _intended_.) ### MCVE ```python import dataclasses import json from dataclasses import dataclass from typing import Annotated import msgspec from litestar import post from litestar.app import Litestar from litestar.params import Parameter from msgspec import Struct from pydantic import BaseModel, Field class PydanticBody(BaseModel): field1: Annotated[str, Field(default="dummy")] field2: str = Field(default="dummy") field3: str = "dummy" field4: Annotated[str, Parameter(default="dummy")] @dataclass class DataclassBody: field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required field3: str = Parameter(default="dummy") field4: str = "dummy" field5: str = dataclasses.field(default="dummy") class MsgspecBody(Struct): field1: Annotated[str, Parameter(default="dummy")] # default generated, but declared as required field2: Annotated[str, msgspec.field(default="dummy")] # no default, marked as required field3: str = Parameter(default="dummy") field4: str = "dummy" field5: str = msgspec.field(default="dummy") @post("/1") async def pydantic_handler(data: PydanticBody) -> None: ... @post("/2") async def dataclass_handler(data: DataclassBody) -> None: ... @post("/3") async def msgspec_handler(data: MsgspecBody) -> None: ... app = Litestar([pydantic_handler, dataclass_handler, msgspec_handler]) print(json.dumps(app.openapi_schema.to_schema(), indent=4)) ``` ### Steps to reproduce This generates: ```json "components": { "schemas": { "DataclassBody": { "properties": { "field1": { "type": "string", "default": "dummy" }, "field3": { "type": "string" }, "field4": { "type": "string" }, "field5": { "type": "string" } }, "type": "object", "required": [ "field1" ], "title": "DataclassBody" }, "MsgspecBody": { "properties": { "field1": { "type": "string", "default": "dummy" }, "field2": { "type": "string" }, "field3": { "type": "string" }, "field4": { "type": "string" }, "field5": { "type": "string" } }, "type": "object", "required": [ "field1", "field2" ], "title": "MsgspecBody" }, "PydanticBody": { "properties": { "field1": { "type": "string" }, "field2": { "type": "string" }, "field3": { "type": "string" }, "field4": { "type": "string" } }, "type": "object", "required": [ "field4" ], "title": "PydanticBody" } } ``` ### Litestar Version 2.7.0 ### Platform - [X] Linux - [ ] Mac - [ ] Windows - [ ] Other (Please specify in the description above) <!-- POLAR PLEDGE BADGE START --> --- > [!NOTE] > While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and > [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship. > > Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org) > * If you would like to see an issue prioritized, make a pledge towards it! > * We receive the pledge once the issue is completed & verified > * This, along with engagement in the community, helps us know which features are a priority to our users. <a href="https://polar.sh/litestar-org/litestar/issues/3201"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `litestar/_openapi/schema_generation/plugins/struct.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING 4 5 from msgspec import Struct 6 from msgspec.structs import fields 7 8 from litestar.plugins import OpenAPISchemaPlugin 9 from litestar.types.empty import Empty 10 from litestar.typing import FieldDefinition 11 from litestar.utils.predicates import is_optional_union 12 13 if TYPE_CHECKING: 14 from msgspec.structs import FieldInfo 15 16 from litestar._openapi.schema_generation import SchemaCreator 17 from litestar.openapi.spec import Schema 18 19 20 class StructSchemaPlugin(OpenAPISchemaPlugin): 21 def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool: 22 return not field_definition.is_union and field_definition.is_subclass_of(Struct) 23 24 def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema: 25 def is_field_required(field: FieldInfo) -> bool: 26 return field.required or field.default_factory is Empty 27 28 type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True) 29 struct_fields = fields(field_definition.type_) 30 31 return schema_creator.create_component_schema( 32 field_definition, 33 required=sorted( 34 [ 35 field.encode_name 36 for field in struct_fields 37 if is_field_required(field=field) and not is_optional_union(type_hints[field.name]) 38 ] 39 ), 40 property_fields={ 41 field.encode_name: FieldDefinition.from_kwarg(type_hints[field.name], field.encode_name) 42 for field in struct_fields 43 }, 44 ) 45 ``` Path: `litestar/_openapi/schema_generation/plugins/dataclass.py` Content: ``` 1 from __future__ import annotations 2 3 from dataclasses import MISSING, fields 4 from typing import TYPE_CHECKING 5 6 from litestar.plugins import OpenAPISchemaPlugin 7 from litestar.typing import FieldDefinition 8 from litestar.utils.predicates import is_optional_union 9 10 if TYPE_CHECKING: 11 from litestar._openapi.schema_generation import SchemaCreator 12 from litestar.openapi.spec import Schema 13 14 15 class DataclassSchemaPlugin(OpenAPISchemaPlugin): 16 def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool: 17 return field_definition.is_dataclass_type 18 19 def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema: 20 type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True) 21 dataclass_fields = fields(field_definition.type_) 22 return schema_creator.create_component_schema( 23 field_definition, 24 required=sorted( 25 field.name 26 for field in dataclass_fields 27 if ( 28 field.default is MISSING 29 and field.default_factory is MISSING 30 and not is_optional_union(type_hints[field.name]) 31 ) 32 ), 33 property_fields={ 34 field.name: FieldDefinition.from_kwarg(type_hints[field.name], field.name) for field in dataclass_fields 35 }, 36 ) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/litestar/_openapi/schema_generation/plugins/dataclass.py b/litestar/_openapi/schema_generation/plugins/dataclass.py --- a/litestar/_openapi/schema_generation/plugins/dataclass.py +++ b/litestar/_openapi/schema_generation/plugins/dataclass.py @@ -1,9 +1,11 @@ from __future__ import annotations +import dataclasses from dataclasses import MISSING, fields from typing import TYPE_CHECKING from litestar.plugins import OpenAPISchemaPlugin +from litestar.types import Empty from litestar.typing import FieldDefinition from litestar.utils.predicates import is_optional_union @@ -31,6 +33,11 @@ ) ), property_fields={ - field.name: FieldDefinition.from_kwarg(type_hints[field.name], field.name) for field in dataclass_fields + field.name: FieldDefinition.from_kwarg( + annotation=type_hints[field.name], + name=field.name, + default=field.default if field.default is not dataclasses.MISSING else Empty, + ) + for field in dataclass_fields }, ) diff --git a/litestar/_openapi/schema_generation/plugins/struct.py b/litestar/_openapi/schema_generation/plugins/struct.py --- a/litestar/_openapi/schema_generation/plugins/struct.py +++ b/litestar/_openapi/schema_generation/plugins/struct.py @@ -2,6 +2,7 @@ from typing import TYPE_CHECKING +import msgspec from msgspec import Struct from msgspec.structs import fields @@ -38,7 +39,11 @@ ] ), property_fields={ - field.encode_name: FieldDefinition.from_kwarg(type_hints[field.name], field.encode_name) + field.encode_name: FieldDefinition.from_kwarg( + type_hints[field.name], + field.encode_name, + default=field.default if field.default not in {msgspec.NODEFAULT, msgspec.UNSET} else Empty, + ) for field in struct_fields }, )
{"golden_diff": "diff --git a/litestar/_openapi/schema_generation/plugins/dataclass.py b/litestar/_openapi/schema_generation/plugins/dataclass.py\n--- a/litestar/_openapi/schema_generation/plugins/dataclass.py\n+++ b/litestar/_openapi/schema_generation/plugins/dataclass.py\n@@ -1,9 +1,11 @@\n from __future__ import annotations\n \n+import dataclasses\n from dataclasses import MISSING, fields\n from typing import TYPE_CHECKING\n \n from litestar.plugins import OpenAPISchemaPlugin\n+from litestar.types import Empty\n from litestar.typing import FieldDefinition\n from litestar.utils.predicates import is_optional_union\n \n@@ -31,6 +33,11 @@\n )\n ),\n property_fields={\n- field.name: FieldDefinition.from_kwarg(type_hints[field.name], field.name) for field in dataclass_fields\n+ field.name: FieldDefinition.from_kwarg(\n+ annotation=type_hints[field.name],\n+ name=field.name,\n+ default=field.default if field.default is not dataclasses.MISSING else Empty,\n+ )\n+ for field in dataclass_fields\n },\n )\ndiff --git a/litestar/_openapi/schema_generation/plugins/struct.py b/litestar/_openapi/schema_generation/plugins/struct.py\n--- a/litestar/_openapi/schema_generation/plugins/struct.py\n+++ b/litestar/_openapi/schema_generation/plugins/struct.py\n@@ -2,6 +2,7 @@\n \n from typing import TYPE_CHECKING\n \n+import msgspec\n from msgspec import Struct\n from msgspec.structs import fields\n \n@@ -38,7 +39,11 @@\n ]\n ),\n property_fields={\n- field.encode_name: FieldDefinition.from_kwarg(type_hints[field.name], field.encode_name)\n+ field.encode_name: FieldDefinition.from_kwarg(\n+ type_hints[field.name],\n+ field.encode_name,\n+ default=field.default if field.default not in {msgspec.NODEFAULT, msgspec.UNSET} else Empty,\n+ )\n for field in struct_fields\n },\n )\n", "issue": "Bug: `default` not generated in OpenAPI schema for body as models\n### Description\r\n\r\nDeclaring default fields (various ways) generates OpenAPI spec with missing `default` declarations.\r\n\r\nAlso, when/if fields are marked into `required` varies a bit.\r\n\r\nDidn't test what happens when requests are sent - are the defaults actually picked up runtime. But at least the OpenAPI schema generation fails.\r\n\r\n(The docs don't mention _how_ these are supported so a bit difficult to know how it's _intended_.)\r\n\r\n### MCVE\r\n\r\n```python\r\nimport dataclasses\r\nimport json\r\nfrom dataclasses import dataclass\r\nfrom typing import Annotated\r\n\r\nimport msgspec\r\nfrom litestar import post\r\nfrom litestar.app import Litestar\r\nfrom litestar.params import Parameter\r\nfrom msgspec import Struct\r\nfrom pydantic import BaseModel, Field\r\n\r\n\r\nclass PydanticBody(BaseModel):\r\n field1: Annotated[str, Field(default=\"dummy\")]\r\n field2: str = Field(default=\"dummy\")\r\n field3: str = \"dummy\"\r\n field4: Annotated[str, Parameter(default=\"dummy\")]\r\n\r\n\r\n@dataclass\r\nclass DataclassBody:\r\n field1: Annotated[str, Parameter(default=\"dummy\")] # default generated, but declared as required\r\n field3: str = Parameter(default=\"dummy\")\r\n field4: str = \"dummy\"\r\n field5: str = dataclasses.field(default=\"dummy\")\r\n\r\n\r\nclass MsgspecBody(Struct):\r\n field1: Annotated[str, Parameter(default=\"dummy\")] # default generated, but declared as required\r\n field2: Annotated[str, msgspec.field(default=\"dummy\")] # no default, marked as required\r\n field3: str = Parameter(default=\"dummy\")\r\n field4: str = \"dummy\"\r\n field5: str = msgspec.field(default=\"dummy\")\r\n\r\n\r\n@post(\"/1\")\r\nasync def pydantic_handler(data: PydanticBody) -> None: ...\r\n\r\n\r\n@post(\"/2\")\r\nasync def dataclass_handler(data: DataclassBody) -> None: ...\r\n\r\n\r\n@post(\"/3\")\r\nasync def msgspec_handler(data: MsgspecBody) -> None: ...\r\n\r\n\r\napp = Litestar([pydantic_handler, dataclass_handler, msgspec_handler])\r\n\r\nprint(json.dumps(app.openapi_schema.to_schema(), indent=4))\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nThis generates:\r\n\r\n```json\r\n \"components\": {\r\n \"schemas\": {\r\n \"DataclassBody\": {\r\n \"properties\": {\r\n \"field1\": {\r\n \"type\": \"string\",\r\n \"default\": \"dummy\"\r\n },\r\n \"field3\": {\r\n \"type\": \"string\"\r\n },\r\n \"field4\": {\r\n \"type\": \"string\"\r\n },\r\n \"field5\": {\r\n \"type\": \"string\"\r\n }\r\n },\r\n \"type\": \"object\",\r\n \"required\": [\r\n \"field1\"\r\n ],\r\n \"title\": \"DataclassBody\"\r\n },\r\n \"MsgspecBody\": {\r\n \"properties\": {\r\n \"field1\": {\r\n \"type\": \"string\",\r\n \"default\": \"dummy\"\r\n },\r\n \"field2\": {\r\n \"type\": \"string\"\r\n },\r\n \"field3\": {\r\n \"type\": \"string\"\r\n },\r\n \"field4\": {\r\n \"type\": \"string\"\r\n },\r\n \"field5\": {\r\n \"type\": \"string\"\r\n }\r\n },\r\n \"type\": \"object\",\r\n \"required\": [\r\n \"field1\",\r\n \"field2\"\r\n ],\r\n \"title\": \"MsgspecBody\"\r\n },\r\n \"PydanticBody\": {\r\n \"properties\": {\r\n \"field1\": {\r\n \"type\": \"string\"\r\n },\r\n \"field2\": {\r\n \"type\": \"string\"\r\n },\r\n \"field3\": {\r\n \"type\": \"string\"\r\n },\r\n \"field4\": {\r\n \"type\": \"string\"\r\n }\r\n },\r\n \"type\": \"object\",\r\n \"required\": [\r\n \"field4\"\r\n ],\r\n \"title\": \"PydanticBody\"\r\n }\r\n }\r\n```\r\n\r\n### Litestar Version\r\n\r\n2.7.0\r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3201\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3201/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom msgspec import Struct\nfrom msgspec.structs import fields\n\nfrom litestar.plugins import OpenAPISchemaPlugin\nfrom litestar.types.empty import Empty\nfrom litestar.typing import FieldDefinition\nfrom litestar.utils.predicates import is_optional_union\n\nif TYPE_CHECKING:\n from msgspec.structs import FieldInfo\n\n from litestar._openapi.schema_generation import SchemaCreator\n from litestar.openapi.spec import Schema\n\n\nclass StructSchemaPlugin(OpenAPISchemaPlugin):\n def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool:\n return not field_definition.is_union and field_definition.is_subclass_of(Struct)\n\n def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema:\n def is_field_required(field: FieldInfo) -> bool:\n return field.required or field.default_factory is Empty\n\n type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True)\n struct_fields = fields(field_definition.type_)\n\n return schema_creator.create_component_schema(\n field_definition,\n required=sorted(\n [\n field.encode_name\n for field in struct_fields\n if is_field_required(field=field) and not is_optional_union(type_hints[field.name])\n ]\n ),\n property_fields={\n field.encode_name: FieldDefinition.from_kwarg(type_hints[field.name], field.encode_name)\n for field in struct_fields\n },\n )\n", "path": "litestar/_openapi/schema_generation/plugins/struct.py"}, {"content": "from __future__ import annotations\n\nfrom dataclasses import MISSING, fields\nfrom typing import TYPE_CHECKING\n\nfrom litestar.plugins import OpenAPISchemaPlugin\nfrom litestar.typing import FieldDefinition\nfrom litestar.utils.predicates import is_optional_union\n\nif TYPE_CHECKING:\n from litestar._openapi.schema_generation import SchemaCreator\n from litestar.openapi.spec import Schema\n\n\nclass DataclassSchemaPlugin(OpenAPISchemaPlugin):\n def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool:\n return field_definition.is_dataclass_type\n\n def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema:\n type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True)\n dataclass_fields = fields(field_definition.type_)\n return schema_creator.create_component_schema(\n field_definition,\n required=sorted(\n field.name\n for field in dataclass_fields\n if (\n field.default is MISSING\n and field.default_factory is MISSING\n and not is_optional_union(type_hints[field.name])\n )\n ),\n property_fields={\n field.name: FieldDefinition.from_kwarg(type_hints[field.name], field.name) for field in dataclass_fields\n },\n )\n", "path": "litestar/_openapi/schema_generation/plugins/dataclass.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport msgspec\nfrom msgspec import Struct\nfrom msgspec.structs import fields\n\nfrom litestar.plugins import OpenAPISchemaPlugin\nfrom litestar.types.empty import Empty\nfrom litestar.typing import FieldDefinition\nfrom litestar.utils.predicates import is_optional_union\n\nif TYPE_CHECKING:\n from msgspec.structs import FieldInfo\n\n from litestar._openapi.schema_generation import SchemaCreator\n from litestar.openapi.spec import Schema\n\n\nclass StructSchemaPlugin(OpenAPISchemaPlugin):\n def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool:\n return not field_definition.is_union and field_definition.is_subclass_of(Struct)\n\n def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema:\n def is_field_required(field: FieldInfo) -> bool:\n return field.required or field.default_factory is Empty\n\n type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True)\n struct_fields = fields(field_definition.type_)\n\n return schema_creator.create_component_schema(\n field_definition,\n required=sorted(\n [\n field.encode_name\n for field in struct_fields\n if is_field_required(field=field) and not is_optional_union(type_hints[field.name])\n ]\n ),\n property_fields={\n field.encode_name: FieldDefinition.from_kwarg(\n type_hints[field.name],\n field.encode_name,\n default=field.default if field.default not in {msgspec.NODEFAULT, msgspec.UNSET} else Empty,\n )\n for field in struct_fields\n },\n )\n", "path": "litestar/_openapi/schema_generation/plugins/struct.py"}, {"content": "from __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import MISSING, fields\nfrom typing import TYPE_CHECKING\n\nfrom litestar.plugins import OpenAPISchemaPlugin\nfrom litestar.types import Empty\nfrom litestar.typing import FieldDefinition\nfrom litestar.utils.predicates import is_optional_union\n\nif TYPE_CHECKING:\n from litestar._openapi.schema_generation import SchemaCreator\n from litestar.openapi.spec import Schema\n\n\nclass DataclassSchemaPlugin(OpenAPISchemaPlugin):\n def is_plugin_supported_field(self, field_definition: FieldDefinition) -> bool:\n return field_definition.is_dataclass_type\n\n def to_openapi_schema(self, field_definition: FieldDefinition, schema_creator: SchemaCreator) -> Schema:\n type_hints = field_definition.get_type_hints(include_extras=True, resolve_generics=True)\n dataclass_fields = fields(field_definition.type_)\n return schema_creator.create_component_schema(\n field_definition,\n required=sorted(\n field.name\n for field in dataclass_fields\n if (\n field.default is MISSING\n and field.default_factory is MISSING\n and not is_optional_union(type_hints[field.name])\n )\n ),\n property_fields={\n field.name: FieldDefinition.from_kwarg(\n annotation=type_hints[field.name],\n name=field.name,\n default=field.default if field.default is not dataclasses.MISSING else Empty,\n )\n for field in dataclass_fields\n },\n )\n", "path": "litestar/_openapi/schema_generation/plugins/dataclass.py"}]}
2,219
451
gh_patches_debug_17703
rasdani/github-patches
git_diff
Netflix__lemur-3766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support emitting logs to stdout **Context** We currently deploy Lemur in a containerized environment and would like to have Lemur emit JSON logs to `stdout` instead of in a rotating log file. Our reason is that it is generally regarded as a best practice when running in a container to write all logs to stdout and have the orchestrator do the collection. This is something we really embrace in our company and our tooling is built with this assumption in mind. **Issue** We tried configuring Lemur with `LOG_FILE = "/dev/stdout"` but the `RotatingFileHandler` throws an exception when attempting a `os.seek(...)` on the file descriptor. Unstructured logs are already emitted on `stderr` but that unfortunately doesn't address our need since splitting those logs is essentially impossible. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lemur/factory.py` Content: ``` 1 """ 2 .. module: lemur.factory 3 :platform: Unix 4 :synopsis: This module contains all the needed functions to allow 5 the factory app creation. 6 7 :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more 8 :license: Apache, see LICENSE for more details. 9 .. moduleauthor:: Kevin Glisson <[email protected]> 10 11 """ 12 import os 13 import importlib 14 import logmatic 15 import errno 16 import pkg_resources 17 import socket 18 19 from logging import Formatter, StreamHandler 20 from logging.handlers import RotatingFileHandler 21 22 from flask import Flask 23 from flask_replicated import FlaskReplicated 24 25 import sentry_sdk 26 from sentry_sdk.integrations.celery import CeleryIntegration 27 from sentry_sdk.integrations.redis import RedisIntegration 28 from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration 29 from sentry_sdk.integrations.flask import FlaskIntegration 30 31 from lemur.certificates.hooks import activate_debug_dump 32 from lemur.common.health import mod as health 33 from lemur.extensions import db, migrate, principal, smtp_mail, metrics, cors 34 35 36 DEFAULT_BLUEPRINTS = (health,) 37 38 API_VERSION = 1 39 40 41 def create_app(app_name=None, blueprints=None, config=None): 42 """ 43 Lemur application factory 44 45 :param config: 46 :param app_name: 47 :param blueprints: 48 :return: 49 """ 50 if not blueprints: 51 blueprints = DEFAULT_BLUEPRINTS 52 else: 53 blueprints = blueprints + DEFAULT_BLUEPRINTS 54 55 if not app_name: 56 app_name = __name__ 57 58 app = Flask(app_name) 59 configure_app(app, config) 60 configure_blueprints(app, blueprints) 61 configure_extensions(app) 62 configure_logging(app) 63 configure_database(app) 64 install_plugins(app) 65 66 @app.teardown_appcontext 67 def teardown(exception=None): 68 if db.session: 69 db.session.remove() 70 71 return app 72 73 74 def from_file(file_path, silent=False): 75 """ 76 Updates the values in the config from a Python file. This function 77 behaves as if the file was imported as module with the 78 79 :param file_path: 80 :param silent: 81 """ 82 module_spec = importlib.util.spec_from_file_location("config", file_path) 83 d = importlib.util.module_from_spec(module_spec) 84 85 try: 86 with open(file_path) as config_file: 87 exec( # nosec: config file safe 88 compile(config_file.read(), file_path, "exec"), d.__dict__ 89 ) 90 except IOError as e: 91 if silent and e.errno in (errno.ENOENT, errno.EISDIR): 92 return False 93 e.strerror = "Unable to load configuration file (%s)" % e.strerror 94 raise 95 return d 96 97 98 def configure_app(app, config=None): 99 """ 100 Different ways of configuration 101 102 :param app: 103 :param config: 104 :return: 105 """ 106 # respect the config first 107 if config and config != "None": 108 app.config["CONFIG_PATH"] = config 109 app.config.from_object(from_file(config)) 110 else: 111 try: 112 app.config.from_envvar("LEMUR_CONF") 113 except RuntimeError: 114 # look in default paths 115 if os.path.isfile(os.path.expanduser("~/.lemur/lemur.conf.py")): 116 app.config.from_object( 117 from_file(os.path.expanduser("~/.lemur/lemur.conf.py")) 118 ) 119 else: 120 app.config.from_object( 121 from_file( 122 os.path.join( 123 os.path.dirname(os.path.realpath(__file__)), 124 "default.conf.py", 125 ) 126 ) 127 ) 128 129 # we don't use this 130 app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False 131 132 133 def configure_extensions(app): 134 """ 135 Attaches and configures any needed flask extensions 136 to our app. 137 138 :param app: 139 """ 140 db.init_app(app) 141 migrate.init_app(app, db) 142 principal.init_app(app) 143 smtp_mail.init_app(app) 144 metrics.init_app(app) 145 146 # the legacy Raven[flask] relied on SENTRY_CONFIG 147 if app.config.get("SENTRY_DSN", None) or app.config.get("SENTRY_CONFIG", None): 148 # priority given to SENTRY_DSN 149 sentry_dsn = app.config.get("SENTRY_DSN", None) or app.config["SENTRY_CONFIG"]['dsn'] 150 sentry_sdk.init( 151 dsn=sentry_dsn, 152 integrations=[SqlalchemyIntegration(), 153 CeleryIntegration(), 154 RedisIntegration(), 155 FlaskIntegration()], 156 # associating users to errors 157 send_default_pii=True, 158 shutdown_timeout=60, 159 environment=app.config.get("LEMUR_ENV", ''), 160 ) 161 162 if app.config["CORS"]: 163 app.config["CORS_HEADERS"] = "Content-Type" 164 cors.init_app( 165 app, 166 resources=r"/api/*", 167 headers="Content-Type", 168 origin="*", 169 supports_credentials=True, 170 ) 171 172 173 def configure_blueprints(app, blueprints): 174 """ 175 We prefix our APIs with their given version so that we can support 176 multiple concurrent API versions. 177 178 :param app: 179 :param blueprints: 180 """ 181 for blueprint in blueprints: 182 app.register_blueprint(blueprint, url_prefix="/api/{0}".format(API_VERSION)) 183 184 185 def configure_database(app): 186 if app.config.get("SQLALCHEMY_ENABLE_FLASK_REPLICATED"): 187 FlaskReplicated(app) 188 189 190 def configure_logging(app): 191 """ 192 Sets up application wide logging. 193 194 :param app: 195 """ 196 handler = RotatingFileHandler( 197 app.config.get("LOG_FILE", "lemur.log"), maxBytes=10000000, backupCount=100 198 ) 199 200 handler.setFormatter( 201 Formatter( 202 "%(asctime)s %(levelname)s: %(message)s " "[in %(pathname)s:%(lineno)d]" 203 ) 204 ) 205 206 if app.config.get("LOG_JSON", False): 207 handler.setFormatter( 208 logmatic.JsonFormatter(extra={"hostname": socket.gethostname()}) 209 ) 210 211 handler.setLevel(app.config.get("LOG_LEVEL", "DEBUG")) 212 app.logger.setLevel(app.config.get("LOG_LEVEL", "DEBUG")) 213 app.logger.addHandler(handler) 214 215 stream_handler = StreamHandler() 216 stream_handler.setLevel(app.config.get("LOG_LEVEL", "DEBUG")) 217 app.logger.addHandler(stream_handler) 218 219 if app.config.get("DEBUG_DUMP", False): 220 activate_debug_dump() 221 222 223 def install_plugins(app): 224 """ 225 Installs new issuers that are not currently bundled with Lemur. 226 227 :param app: 228 :return: 229 """ 230 from lemur.plugins import plugins 231 from lemur.plugins.base import register 232 233 # entry_points={ 234 # 'lemur.plugins': [ 235 # 'verisign = lemur_verisign.plugin:VerisignPlugin' 236 # ], 237 # }, 238 for ep in pkg_resources.iter_entry_points("lemur.plugins"): 239 try: 240 plugin = ep.load() 241 except Exception: 242 import traceback 243 244 app.logger.error( 245 "Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()) 246 ) 247 else: 248 register(plugin) 249 250 # ensure that we have some way to notify 251 with app.app_context(): 252 slug = app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification") 253 try: 254 plugins.get(slug) 255 except KeyError: 256 raise Exception( 257 "Unable to location notification plugin: {slug}. Ensure that " 258 "LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.".format( 259 slug=slug 260 ) 261 ) 262 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lemur/factory.py b/lemur/factory.py --- a/lemur/factory.py +++ b/lemur/factory.py @@ -15,6 +15,7 @@ import errno import pkg_resources import socket +import stat from logging import Formatter, StreamHandler from logging.handlers import RotatingFileHandler @@ -193,9 +194,14 @@ :param app: """ - handler = RotatingFileHandler( - app.config.get("LOG_FILE", "lemur.log"), maxBytes=10000000, backupCount=100 - ) + logfile = app.config.get("LOG_FILE", "lemur.log") + # if the log file is a character special device file (ie. stdout/stderr), + # file rotation will not work and must be disabled. + disable_file_rotation = os.path.exists(logfile) and stat.S_ISCHR(os.stat(logfile).st_mode) + if disable_file_rotation: + handler = StreamHandler(open(logfile, 'a')) + else: + handler = RotatingFileHandler(logfile, maxBytes=10000000, backupCount=100) handler.setFormatter( Formatter(
{"golden_diff": "diff --git a/lemur/factory.py b/lemur/factory.py\n--- a/lemur/factory.py\n+++ b/lemur/factory.py\n@@ -15,6 +15,7 @@\n import errno\n import pkg_resources\n import socket\n+import stat\n \n from logging import Formatter, StreamHandler\n from logging.handlers import RotatingFileHandler\n@@ -193,9 +194,14 @@\n \n :param app:\n \"\"\"\n- handler = RotatingFileHandler(\n- app.config.get(\"LOG_FILE\", \"lemur.log\"), maxBytes=10000000, backupCount=100\n- )\n+ logfile = app.config.get(\"LOG_FILE\", \"lemur.log\")\n+ # if the log file is a character special device file (ie. stdout/stderr),\n+ # file rotation will not work and must be disabled.\n+ disable_file_rotation = os.path.exists(logfile) and stat.S_ISCHR(os.stat(logfile).st_mode)\n+ if disable_file_rotation:\n+ handler = StreamHandler(open(logfile, 'a'))\n+ else:\n+ handler = RotatingFileHandler(logfile, maxBytes=10000000, backupCount=100)\n \n handler.setFormatter(\n Formatter(\n", "issue": "Support emitting logs to stdout\n**Context**\r\nWe currently deploy Lemur in a containerized environment and would like to have Lemur emit JSON logs to `stdout` instead of in a rotating log file. Our reason is that it is generally regarded as a best practice when running in a container to write all logs to stdout and have the orchestrator do the collection. This is something we really embrace in our company and our tooling is built with this assumption in mind.\r\n\r\n**Issue**\r\nWe tried configuring Lemur with `LOG_FILE = \"/dev/stdout\"` but the `RotatingFileHandler` throws an exception when attempting a `os.seek(...)` on the file descriptor. Unstructured logs are already emitted on `stderr` but that unfortunately doesn't address our need since splitting those logs is essentially impossible.\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.factory\n :platform: Unix\n :synopsis: This module contains all the needed functions to allow\n the factory app creation.\n\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport os\nimport importlib\nimport logmatic\nimport errno\nimport pkg_resources\nimport socket\n\nfrom logging import Formatter, StreamHandler\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\nfrom flask_replicated import FlaskReplicated\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.redis import RedisIntegration\nfrom sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\nfrom lemur.certificates.hooks import activate_debug_dump\nfrom lemur.common.health import mod as health\nfrom lemur.extensions import db, migrate, principal, smtp_mail, metrics, cors\n\n\nDEFAULT_BLUEPRINTS = (health,)\n\nAPI_VERSION = 1\n\n\ndef create_app(app_name=None, blueprints=None, config=None):\n \"\"\"\n Lemur application factory\n\n :param config:\n :param app_name:\n :param blueprints:\n :return:\n \"\"\"\n if not blueprints:\n blueprints = DEFAULT_BLUEPRINTS\n else:\n blueprints = blueprints + DEFAULT_BLUEPRINTS\n\n if not app_name:\n app_name = __name__\n\n app = Flask(app_name)\n configure_app(app, config)\n configure_blueprints(app, blueprints)\n configure_extensions(app)\n configure_logging(app)\n configure_database(app)\n install_plugins(app)\n\n @app.teardown_appcontext\n def teardown(exception=None):\n if db.session:\n db.session.remove()\n\n return app\n\n\ndef from_file(file_path, silent=False):\n \"\"\"\n Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n\n :param file_path:\n :param silent:\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\"config\", file_path)\n d = importlib.util.module_from_spec(module_spec)\n\n try:\n with open(file_path) as config_file:\n exec( # nosec: config file safe\n compile(config_file.read(), file_path, \"exec\"), d.__dict__\n )\n except IOError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n e.strerror = \"Unable to load configuration file (%s)\" % e.strerror\n raise\n return d\n\n\ndef configure_app(app, config=None):\n \"\"\"\n Different ways of configuration\n\n :param app:\n :param config:\n :return:\n \"\"\"\n # respect the config first\n if config and config != \"None\":\n app.config[\"CONFIG_PATH\"] = config\n app.config.from_object(from_file(config))\n else:\n try:\n app.config.from_envvar(\"LEMUR_CONF\")\n except RuntimeError:\n # look in default paths\n if os.path.isfile(os.path.expanduser(\"~/.lemur/lemur.conf.py\")):\n app.config.from_object(\n from_file(os.path.expanduser(\"~/.lemur/lemur.conf.py\"))\n )\n else:\n app.config.from_object(\n from_file(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"default.conf.py\",\n )\n )\n )\n\n # we don't use this\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n\ndef configure_extensions(app):\n \"\"\"\n Attaches and configures any needed flask extensions\n to our app.\n\n :param app:\n \"\"\"\n db.init_app(app)\n migrate.init_app(app, db)\n principal.init_app(app)\n smtp_mail.init_app(app)\n metrics.init_app(app)\n\n # the legacy Raven[flask] relied on SENTRY_CONFIG\n if app.config.get(\"SENTRY_DSN\", None) or app.config.get(\"SENTRY_CONFIG\", None):\n # priority given to SENTRY_DSN\n sentry_dsn = app.config.get(\"SENTRY_DSN\", None) or app.config[\"SENTRY_CONFIG\"]['dsn']\n sentry_sdk.init(\n dsn=sentry_dsn,\n integrations=[SqlalchemyIntegration(),\n CeleryIntegration(),\n RedisIntegration(),\n FlaskIntegration()],\n # associating users to errors\n send_default_pii=True,\n shutdown_timeout=60,\n environment=app.config.get(\"LEMUR_ENV\", ''),\n )\n\n if app.config[\"CORS\"]:\n app.config[\"CORS_HEADERS\"] = \"Content-Type\"\n cors.init_app(\n app,\n resources=r\"/api/*\",\n headers=\"Content-Type\",\n origin=\"*\",\n supports_credentials=True,\n )\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"\n We prefix our APIs with their given version so that we can support\n multiple concurrent API versions.\n\n :param app:\n :param blueprints:\n \"\"\"\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=\"/api/{0}\".format(API_VERSION))\n\n\ndef configure_database(app):\n if app.config.get(\"SQLALCHEMY_ENABLE_FLASK_REPLICATED\"):\n FlaskReplicated(app)\n\n\ndef configure_logging(app):\n \"\"\"\n Sets up application wide logging.\n\n :param app:\n \"\"\"\n handler = RotatingFileHandler(\n app.config.get(\"LOG_FILE\", \"lemur.log\"), maxBytes=10000000, backupCount=100\n )\n\n handler.setFormatter(\n Formatter(\n \"%(asctime)s %(levelname)s: %(message)s \" \"[in %(pathname)s:%(lineno)d]\"\n )\n )\n\n if app.config.get(\"LOG_JSON\", False):\n handler.setFormatter(\n logmatic.JsonFormatter(extra={\"hostname\": socket.gethostname()})\n )\n\n handler.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.addHandler(handler)\n\n stream_handler = StreamHandler()\n stream_handler.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.addHandler(stream_handler)\n\n if app.config.get(\"DEBUG_DUMP\", False):\n activate_debug_dump()\n\n\ndef install_plugins(app):\n \"\"\"\n Installs new issuers that are not currently bundled with Lemur.\n\n :param app:\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.plugins.base import register\n\n # entry_points={\n # 'lemur.plugins': [\n # 'verisign = lemur_verisign.plugin:VerisignPlugin'\n # ],\n # },\n for ep in pkg_resources.iter_entry_points(\"lemur.plugins\"):\n try:\n plugin = ep.load()\n except Exception:\n import traceback\n\n app.logger.error(\n \"Failed to load plugin %r:\\n%s\\n\" % (ep.name, traceback.format_exc())\n )\n else:\n register(plugin)\n\n # ensure that we have some way to notify\n with app.app_context():\n slug = app.config.get(\"LEMUR_DEFAULT_NOTIFICATION_PLUGIN\", \"email-notification\")\n try:\n plugins.get(slug)\n except KeyError:\n raise Exception(\n \"Unable to location notification plugin: {slug}. Ensure that \"\n \"LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.\".format(\n slug=slug\n )\n )\n", "path": "lemur/factory.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.factory\n :platform: Unix\n :synopsis: This module contains all the needed functions to allow\n the factory app creation.\n\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport os\nimport importlib\nimport logmatic\nimport errno\nimport pkg_resources\nimport socket\nimport stat\n\nfrom logging import Formatter, StreamHandler\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\nfrom flask_replicated import FlaskReplicated\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.redis import RedisIntegration\nfrom sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\nfrom lemur.certificates.hooks import activate_debug_dump\nfrom lemur.common.health import mod as health\nfrom lemur.extensions import db, migrate, principal, smtp_mail, metrics, cors\n\n\nDEFAULT_BLUEPRINTS = (health,)\n\nAPI_VERSION = 1\n\n\ndef create_app(app_name=None, blueprints=None, config=None):\n \"\"\"\n Lemur application factory\n\n :param config:\n :param app_name:\n :param blueprints:\n :return:\n \"\"\"\n if not blueprints:\n blueprints = DEFAULT_BLUEPRINTS\n else:\n blueprints = blueprints + DEFAULT_BLUEPRINTS\n\n if not app_name:\n app_name = __name__\n\n app = Flask(app_name)\n configure_app(app, config)\n configure_blueprints(app, blueprints)\n configure_extensions(app)\n configure_logging(app)\n configure_database(app)\n install_plugins(app)\n\n @app.teardown_appcontext\n def teardown(exception=None):\n if db.session:\n db.session.remove()\n\n return app\n\n\ndef from_file(file_path, silent=False):\n \"\"\"\n Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n\n :param file_path:\n :param silent:\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\"config\", file_path)\n d = importlib.util.module_from_spec(module_spec)\n\n try:\n with open(file_path) as config_file:\n exec( # nosec: config file safe\n compile(config_file.read(), file_path, \"exec\"), d.__dict__\n )\n except IOError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n e.strerror = \"Unable to load configuration file (%s)\" % e.strerror\n raise\n return d\n\n\ndef configure_app(app, config=None):\n \"\"\"\n Different ways of configuration\n\n :param app:\n :param config:\n :return:\n \"\"\"\n # respect the config first\n if config and config != \"None\":\n app.config[\"CONFIG_PATH\"] = config\n app.config.from_object(from_file(config))\n else:\n try:\n app.config.from_envvar(\"LEMUR_CONF\")\n except RuntimeError:\n # look in default paths\n if os.path.isfile(os.path.expanduser(\"~/.lemur/lemur.conf.py\")):\n app.config.from_object(\n from_file(os.path.expanduser(\"~/.lemur/lemur.conf.py\"))\n )\n else:\n app.config.from_object(\n from_file(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"default.conf.py\",\n )\n )\n )\n\n # we don't use this\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n\ndef configure_extensions(app):\n \"\"\"\n Attaches and configures any needed flask extensions\n to our app.\n\n :param app:\n \"\"\"\n db.init_app(app)\n migrate.init_app(app, db)\n principal.init_app(app)\n smtp_mail.init_app(app)\n metrics.init_app(app)\n\n # the legacy Raven[flask] relied on SENTRY_CONFIG\n if app.config.get(\"SENTRY_DSN\", None) or app.config.get(\"SENTRY_CONFIG\", None):\n # priority given to SENTRY_DSN\n sentry_dsn = app.config.get(\"SENTRY_DSN\", None) or app.config[\"SENTRY_CONFIG\"]['dsn']\n sentry_sdk.init(\n dsn=sentry_dsn,\n integrations=[SqlalchemyIntegration(),\n CeleryIntegration(),\n RedisIntegration(),\n FlaskIntegration()],\n # associating users to errors\n send_default_pii=True,\n shutdown_timeout=60,\n environment=app.config.get(\"LEMUR_ENV\", ''),\n )\n\n if app.config[\"CORS\"]:\n app.config[\"CORS_HEADERS\"] = \"Content-Type\"\n cors.init_app(\n app,\n resources=r\"/api/*\",\n headers=\"Content-Type\",\n origin=\"*\",\n supports_credentials=True,\n )\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"\n We prefix our APIs with their given version so that we can support\n multiple concurrent API versions.\n\n :param app:\n :param blueprints:\n \"\"\"\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=\"/api/{0}\".format(API_VERSION))\n\n\ndef configure_database(app):\n if app.config.get(\"SQLALCHEMY_ENABLE_FLASK_REPLICATED\"):\n FlaskReplicated(app)\n\n\ndef configure_logging(app):\n \"\"\"\n Sets up application wide logging.\n\n :param app:\n \"\"\"\n logfile = app.config.get(\"LOG_FILE\", \"lemur.log\")\n # if the log file is a character special device file (ie. stdout/stderr),\n # file rotation will not work and must be disabled.\n disable_file_rotation = os.path.exists(logfile) and stat.S_ISCHR(os.stat(logfile).st_mode)\n if disable_file_rotation:\n handler = StreamHandler(open(logfile, 'a'))\n else:\n handler = RotatingFileHandler(logfile, maxBytes=10000000, backupCount=100)\n\n handler.setFormatter(\n Formatter(\n \"%(asctime)s %(levelname)s: %(message)s \" \"[in %(pathname)s:%(lineno)d]\"\n )\n )\n\n if app.config.get(\"LOG_JSON\", False):\n handler.setFormatter(\n logmatic.JsonFormatter(extra={\"hostname\": socket.gethostname()})\n )\n\n handler.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.addHandler(handler)\n\n stream_handler = StreamHandler()\n stream_handler.setLevel(app.config.get(\"LOG_LEVEL\", \"DEBUG\"))\n app.logger.addHandler(stream_handler)\n\n if app.config.get(\"DEBUG_DUMP\", False):\n activate_debug_dump()\n\n\ndef install_plugins(app):\n \"\"\"\n Installs new issuers that are not currently bundled with Lemur.\n\n :param app:\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.plugins.base import register\n\n # entry_points={\n # 'lemur.plugins': [\n # 'verisign = lemur_verisign.plugin:VerisignPlugin'\n # ],\n # },\n for ep in pkg_resources.iter_entry_points(\"lemur.plugins\"):\n try:\n plugin = ep.load()\n except Exception:\n import traceback\n\n app.logger.error(\n \"Failed to load plugin %r:\\n%s\\n\" % (ep.name, traceback.format_exc())\n )\n else:\n register(plugin)\n\n # ensure that we have some way to notify\n with app.app_context():\n slug = app.config.get(\"LEMUR_DEFAULT_NOTIFICATION_PLUGIN\", \"email-notification\")\n try:\n plugins.get(slug)\n except KeyError:\n raise Exception(\n \"Unable to location notification plugin: {slug}. Ensure that \"\n \"LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.\".format(\n slug=slug\n )\n )\n", "path": "lemur/factory.py"}]}
2,757
284
gh_patches_debug_13254
rasdani/github-patches
git_diff
PaddlePaddle__PaddleNLP-1416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fix perf and sample code <!-- Demo: https://github.com/PaddlePaddle/PaddleNLP/pull/26 --> ### PR types <!-- One of [ New features | Bug fixes | Function optimization | Performance optimization | Breaking changes | Others ] --> Others ### PR changes <!-- One of [ Models | APIs | Docs | Others ] --> APIs ### Description <!-- Describe what this PR does --> fix perf and sample code --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/language_model/gpt/faster_gpt/export_model.py` Content: ``` 1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import sys 15 import os 16 import numpy as np 17 from attrdict import AttrDict 18 import argparse 19 import time 20 21 import paddle 22 23 import yaml 24 from pprint import pprint 25 26 from paddlenlp.ops import FasterGPT 27 from paddlenlp.transformers import GPTLMHeadModel 28 from paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer 29 30 from paddlenlp.utils.log import logger 31 32 MODEL_CLASSES = { 33 "gpt-cpm-large-cn": (GPTLMHeadModel, GPTChineseTokenizer), 34 "gpt2-medium-en": (GPTLMHeadModel, GPTTokenizer), 35 } 36 37 38 def parse_args(): 39 parser = argparse.ArgumentParser() 40 parser.add_argument( 41 "--model_name_or_path", 42 default="gpt2-medium-en", 43 type=str, 44 help="The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. " 45 ) 46 parser.add_argument( 47 "--decoding_lib", 48 default="../../build/lib/libdecoding_op.so", 49 type=str, 50 help="Path of libdecoding_op.so. ") 51 parser.add_argument( 52 "--inference_model_dir", 53 default="./infer_model/", 54 type=str, 55 help="Path to save inference model of gpt. ") 56 parser.add_argument( 57 "--topk", 58 default=4, 59 type=int, 60 help="The number of candidate to procedure beam search. ") 61 parser.add_argument( 62 "--topp", 63 default=0.0, 64 type=float, 65 help="The probability threshold to procedure topp sampling. ") 66 parser.add_argument( 67 "--max_out_len", default=32, type=int, help="Maximum output length. ") 68 parser.add_argument( 69 "--temperature", 70 default=1.0, 71 type=float, 72 help="The temperature to set. ") 73 parser.add_argument( 74 "--use_fp16_decoding", 75 action="store_true", 76 help="Whether to use fp16 decoding to predict. ") 77 args = parser.parse_args() 78 return args 79 80 81 def do_predict(args): 82 place = "gpu" 83 place = paddle.set_device(place) 84 85 model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path] 86 tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) 87 logger.info('Loading the model parameters, please wait...') 88 model = model_class.from_pretrained( 89 args.model_name_or_path, max_predict_len=args.max_out_len) 90 91 gpt = FasterGPT( 92 model=model, 93 decoding_lib=args.decoding_lib, 94 use_fp16_decoding=args.use_fp16_decoding) 95 96 # Set evaluate mode 97 gpt.eval() 98 99 # Convert dygraph model to static graph model 100 gpt = paddle.jit.to_static( 101 gpt, 102 input_spec=[ 103 # input_ids 104 paddle.static.InputSpec( 105 shape=[None, None], dtype="int32"), 106 # 107 # If it's necessarry to provide mem_seq_len and attention_mask, 108 # the parameters should be: 109 # mem_seq_len 110 # paddle.static.InputSpec(shape=[None, None], dtype="int32"), 111 # attention_mask 112 # paddle.static.InputSpec(shape=[None, None, None], dtype="float16" if args.use_fp16_decoding else "float32"), 113 # 114 None, # mem_seq_len 115 None, # attention_mask 116 args.topk, 117 args.topp, 118 args.max_out_len, 119 tokenizer.eos_token_id, 120 tokenizer.eos_token_id, 121 tokenizer.pad_token_id, 122 args.temperature, 123 ]) 124 125 # Save converted static graph model 126 paddle.jit.save(gpt, os.path.join(args.inference_model_dir, "gpt")) 127 logger.info("GPT has been saved to {}".format(args.inference_model_dir)) 128 129 gpt.save_resources(tokenizer, args.inference_model_dir) 130 131 132 if __name__ == "__main__": 133 args = parse_args() 134 pprint(args) 135 do_predict(args) 136 ``` Path: `paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py` Content: ``` 1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import sys 15 import os 16 import numpy as np 17 from attrdict import AttrDict 18 import argparse 19 import time 20 21 import paddle 22 23 import yaml 24 from pprint import pprint 25 26 from paddlenlp.ops import FasterGPT 27 from paddlenlp.transformers import GPTLMHeadModel 28 from paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer 29 30 from paddlenlp.utils.log import logger 31 32 MODEL_CLASSES = { 33 "gpt-cpm-large-cn": (GPTLMHeadModel, GPTChineseTokenizer), 34 "gpt2-medium-en": (GPTLMHeadModel, GPTTokenizer), 35 } 36 37 38 def parse_args(): 39 parser = argparse.ArgumentParser() 40 parser.add_argument( 41 "--model_name_or_path", 42 default="gpt2-medium-en", 43 type=str, 44 help="The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. " 45 ) 46 parser.add_argument( 47 "--decoding_lib", 48 default="../../build/lib/libdecoding_op.so", 49 type=str, 50 help="Path of libdecoding_op.so. ") 51 parser.add_argument( 52 "--inference_model_dir", 53 default="./infer_model/", 54 type=str, 55 help="Path to save inference model of gpt. ") 56 parser.add_argument( 57 "--topk", 58 default=4, 59 type=int, 60 help="The number of candidate to procedure beam search. ") 61 parser.add_argument( 62 "--topp", 63 default=0.0, 64 type=float, 65 help="The probability threshold to procedure topp sampling. ") 66 parser.add_argument( 67 "--max_out_len", default=32, type=int, help="Maximum output length. ") 68 parser.add_argument( 69 "--temperature", 70 default=1.0, 71 type=float, 72 help="The temperature to set. ") 73 parser.add_argument( 74 "--use_fp16_decoding", 75 action="store_true", 76 help="Whether to use fp16 decoding to predict. ") 77 args = parser.parse_args() 78 return args 79 80 81 def do_predict(args): 82 place = "gpu" 83 place = paddle.set_device(place) 84 85 model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path] 86 tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) 87 logger.info('Loading the model parameters, please wait...') 88 model = model_class.from_pretrained( 89 args.model_name_or_path, max_predict_len=args.max_out_len) 90 91 gpt = FasterGPT( 92 model=model, 93 decoding_lib=args.decoding_lib, 94 use_fp16_decoding=args.use_fp16_decoding) 95 96 # Set evaluate mode 97 gpt.eval() 98 99 # Convert dygraph model to static graph model 100 gpt = paddle.jit.to_static( 101 gpt, 102 input_spec=[ 103 # input_ids 104 paddle.static.InputSpec( 105 shape=[None, None], dtype="int32"), 106 # 107 # If it's necessarry to provide mem_seq_len and attention_mask, 108 # the parameters should be: 109 # mem_seq_len 110 # paddle.static.InputSpec(shape=[None, None], dtype="int32"), 111 # attention_mask 112 # paddle.static.InputSpec(shape=[None, None, None], dtype="float16" if args.use_fp16_decoding else "float32"), 113 # 114 None, # mem_seq_len 115 None, # attention_mask 116 args.topk, 117 args.topp, 118 args.max_out_len, 119 tokenizer.eos_token_id, 120 tokenizer.eos_token_id, 121 tokenizer.pad_token_id, 122 args.temperature, 123 ]) 124 125 # Save converted static graph model 126 paddle.jit.save(gpt, os.path.join(args.inference_model_dir, "gpt")) 127 logger.info("GPT has been saved to {}".format(args.inference_model_dir)) 128 129 gpt.save_resources(tokenizer, args.inference_model_dir) 130 131 132 if __name__ == "__main__": 133 args = parse_args() 134 pprint(args) 135 do_predict(args) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/language_model/gpt/faster_gpt/export_model.py b/examples/language_model/gpt/faster_gpt/export_model.py --- a/examples/language_model/gpt/faster_gpt/export_model.py +++ b/examples/language_model/gpt/faster_gpt/export_model.py @@ -119,6 +119,7 @@ tokenizer.eos_token_id, tokenizer.eos_token_id, tokenizer.pad_token_id, + None, # forced_eos_token_id args.temperature, ]) diff --git a/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py b/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py --- a/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py +++ b/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py @@ -119,6 +119,7 @@ tokenizer.eos_token_id, tokenizer.eos_token_id, tokenizer.pad_token_id, + None, # forced_eos_token_id args.temperature, ])
{"golden_diff": "diff --git a/examples/language_model/gpt/faster_gpt/export_model.py b/examples/language_model/gpt/faster_gpt/export_model.py\n--- a/examples/language_model/gpt/faster_gpt/export_model.py\n+++ b/examples/language_model/gpt/faster_gpt/export_model.py\n@@ -119,6 +119,7 @@\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n+ None, # forced_eos_token_id\n args.temperature,\n ])\n \ndiff --git a/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py b/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py\n--- a/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py\n+++ b/paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py\n@@ -119,6 +119,7 @@\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n+ None, # forced_eos_token_id\n args.temperature,\n ])\n", "issue": "fix perf and sample code\n<!-- Demo: https://github.com/PaddlePaddle/PaddleNLP/pull/26 -->\r\n### PR types\r\n<!-- One of [ New features | Bug fixes | Function optimization | Performance optimization | Breaking changes | Others ] -->\r\nOthers\r\n### PR changes\r\n<!-- One of [ Models | APIs | Docs | Others ] -->\r\nAPIs\r\n### Description\r\n<!-- Describe what this PR does -->\r\nfix perf and sample code\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\nimport numpy as np\nfrom attrdict import AttrDict\nimport argparse\nimport time\n\nimport paddle\n\nimport yaml\nfrom pprint import pprint\n\nfrom paddlenlp.ops import FasterGPT\nfrom paddlenlp.transformers import GPTLMHeadModel\nfrom paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer\n\nfrom paddlenlp.utils.log import logger\n\nMODEL_CLASSES = {\n \"gpt-cpm-large-cn\": (GPTLMHeadModel, GPTChineseTokenizer),\n \"gpt2-medium-en\": (GPTLMHeadModel, GPTTokenizer),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"gpt2-medium-en\",\n type=str,\n help=\"The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. \"\n )\n parser.add_argument(\n \"--decoding_lib\",\n default=\"../../build/lib/libdecoding_op.so\",\n type=str,\n help=\"Path of libdecoding_op.so. \")\n parser.add_argument(\n \"--inference_model_dir\",\n default=\"./infer_model/\",\n type=str,\n help=\"Path to save inference model of gpt. \")\n parser.add_argument(\n \"--topk\",\n default=4,\n type=int,\n help=\"The number of candidate to procedure beam search. \")\n parser.add_argument(\n \"--topp\",\n default=0.0,\n type=float,\n help=\"The probability threshold to procedure topp sampling. \")\n parser.add_argument(\n \"--max_out_len\", default=32, type=int, help=\"Maximum output length. \")\n parser.add_argument(\n \"--temperature\",\n default=1.0,\n type=float,\n help=\"The temperature to set. \")\n parser.add_argument(\n \"--use_fp16_decoding\",\n action=\"store_true\",\n help=\"Whether to use fp16 decoding to predict. \")\n args = parser.parse_args()\n return args\n\n\ndef do_predict(args):\n place = \"gpu\"\n place = paddle.set_device(place)\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n logger.info('Loading the model parameters, please wait...')\n model = model_class.from_pretrained(\n args.model_name_or_path, max_predict_len=args.max_out_len)\n\n gpt = FasterGPT(\n model=model,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding)\n\n # Set evaluate mode\n gpt.eval()\n\n # Convert dygraph model to static graph model \n gpt = paddle.jit.to_static(\n gpt,\n input_spec=[\n # input_ids\n paddle.static.InputSpec(\n shape=[None, None], dtype=\"int32\"),\n #\n # If it's necessarry to provide mem_seq_len and attention_mask,\n # the parameters should be:\n # mem_seq_len\n # paddle.static.InputSpec(shape=[None, None], dtype=\"int32\"),\n # attention_mask\n # paddle.static.InputSpec(shape=[None, None, None], dtype=\"float16\" if args.use_fp16_decoding else \"float32\"),\n #\n None, # mem_seq_len\n None, # attention_mask\n args.topk,\n args.topp,\n args.max_out_len,\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n args.temperature,\n ])\n\n # Save converted static graph model\n paddle.jit.save(gpt, os.path.join(args.inference_model_dir, \"gpt\"))\n logger.info(\"GPT has been saved to {}\".format(args.inference_model_dir))\n\n gpt.save_resources(tokenizer, args.inference_model_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n pprint(args)\n do_predict(args)\n", "path": "examples/language_model/gpt/faster_gpt/export_model.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\nimport numpy as np\nfrom attrdict import AttrDict\nimport argparse\nimport time\n\nimport paddle\n\nimport yaml\nfrom pprint import pprint\n\nfrom paddlenlp.ops import FasterGPT\nfrom paddlenlp.transformers import GPTLMHeadModel\nfrom paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer\n\nfrom paddlenlp.utils.log import logger\n\nMODEL_CLASSES = {\n \"gpt-cpm-large-cn\": (GPTLMHeadModel, GPTChineseTokenizer),\n \"gpt2-medium-en\": (GPTLMHeadModel, GPTTokenizer),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"gpt2-medium-en\",\n type=str,\n help=\"The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. \"\n )\n parser.add_argument(\n \"--decoding_lib\",\n default=\"../../build/lib/libdecoding_op.so\",\n type=str,\n help=\"Path of libdecoding_op.so. \")\n parser.add_argument(\n \"--inference_model_dir\",\n default=\"./infer_model/\",\n type=str,\n help=\"Path to save inference model of gpt. \")\n parser.add_argument(\n \"--topk\",\n default=4,\n type=int,\n help=\"The number of candidate to procedure beam search. \")\n parser.add_argument(\n \"--topp\",\n default=0.0,\n type=float,\n help=\"The probability threshold to procedure topp sampling. \")\n parser.add_argument(\n \"--max_out_len\", default=32, type=int, help=\"Maximum output length. \")\n parser.add_argument(\n \"--temperature\",\n default=1.0,\n type=float,\n help=\"The temperature to set. \")\n parser.add_argument(\n \"--use_fp16_decoding\",\n action=\"store_true\",\n help=\"Whether to use fp16 decoding to predict. \")\n args = parser.parse_args()\n return args\n\n\ndef do_predict(args):\n place = \"gpu\"\n place = paddle.set_device(place)\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n logger.info('Loading the model parameters, please wait...')\n model = model_class.from_pretrained(\n args.model_name_or_path, max_predict_len=args.max_out_len)\n\n gpt = FasterGPT(\n model=model,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding)\n\n # Set evaluate mode\n gpt.eval()\n\n # Convert dygraph model to static graph model \n gpt = paddle.jit.to_static(\n gpt,\n input_spec=[\n # input_ids\n paddle.static.InputSpec(\n shape=[None, None], dtype=\"int32\"),\n #\n # If it's necessarry to provide mem_seq_len and attention_mask,\n # the parameters should be:\n # mem_seq_len\n # paddle.static.InputSpec(shape=[None, None], dtype=\"int32\"),\n # attention_mask\n # paddle.static.InputSpec(shape=[None, None, None], dtype=\"float16\" if args.use_fp16_decoding else \"float32\"),\n #\n None, # mem_seq_len\n None, # attention_mask\n args.topk,\n args.topp,\n args.max_out_len,\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n args.temperature,\n ])\n\n # Save converted static graph model\n paddle.jit.save(gpt, os.path.join(args.inference_model_dir, \"gpt\"))\n logger.info(\"GPT has been saved to {}\".format(args.inference_model_dir))\n\n gpt.save_resources(tokenizer, args.inference_model_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n pprint(args)\n do_predict(args)\n", "path": "paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py"}], "after_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\nimport numpy as np\nfrom attrdict import AttrDict\nimport argparse\nimport time\n\nimport paddle\n\nimport yaml\nfrom pprint import pprint\n\nfrom paddlenlp.ops import FasterGPT\nfrom paddlenlp.transformers import GPTLMHeadModel\nfrom paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer\n\nfrom paddlenlp.utils.log import logger\n\nMODEL_CLASSES = {\n \"gpt-cpm-large-cn\": (GPTLMHeadModel, GPTChineseTokenizer),\n \"gpt2-medium-en\": (GPTLMHeadModel, GPTTokenizer),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"gpt2-medium-en\",\n type=str,\n help=\"The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. \"\n )\n parser.add_argument(\n \"--decoding_lib\",\n default=\"../../build/lib/libdecoding_op.so\",\n type=str,\n help=\"Path of libdecoding_op.so. \")\n parser.add_argument(\n \"--inference_model_dir\",\n default=\"./infer_model/\",\n type=str,\n help=\"Path to save inference model of gpt. \")\n parser.add_argument(\n \"--topk\",\n default=4,\n type=int,\n help=\"The number of candidate to procedure beam search. \")\n parser.add_argument(\n \"--topp\",\n default=0.0,\n type=float,\n help=\"The probability threshold to procedure topp sampling. \")\n parser.add_argument(\n \"--max_out_len\", default=32, type=int, help=\"Maximum output length. \")\n parser.add_argument(\n \"--temperature\",\n default=1.0,\n type=float,\n help=\"The temperature to set. \")\n parser.add_argument(\n \"--use_fp16_decoding\",\n action=\"store_true\",\n help=\"Whether to use fp16 decoding to predict. \")\n args = parser.parse_args()\n return args\n\n\ndef do_predict(args):\n place = \"gpu\"\n place = paddle.set_device(place)\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n logger.info('Loading the model parameters, please wait...')\n model = model_class.from_pretrained(\n args.model_name_or_path, max_predict_len=args.max_out_len)\n\n gpt = FasterGPT(\n model=model,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding)\n\n # Set evaluate mode\n gpt.eval()\n\n # Convert dygraph model to static graph model \n gpt = paddle.jit.to_static(\n gpt,\n input_spec=[\n # input_ids\n paddle.static.InputSpec(\n shape=[None, None], dtype=\"int32\"),\n #\n # If it's necessarry to provide mem_seq_len and attention_mask,\n # the parameters should be:\n # mem_seq_len\n # paddle.static.InputSpec(shape=[None, None], dtype=\"int32\"),\n # attention_mask\n # paddle.static.InputSpec(shape=[None, None, None], dtype=\"float16\" if args.use_fp16_decoding else \"float32\"),\n #\n None, # mem_seq_len\n None, # attention_mask\n args.topk,\n args.topp,\n args.max_out_len,\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n None, # forced_eos_token_id\n args.temperature,\n ])\n\n # Save converted static graph model\n paddle.jit.save(gpt, os.path.join(args.inference_model_dir, \"gpt\"))\n logger.info(\"GPT has been saved to {}\".format(args.inference_model_dir))\n\n gpt.save_resources(tokenizer, args.inference_model_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n pprint(args)\n do_predict(args)\n", "path": "examples/language_model/gpt/faster_gpt/export_model.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\nimport numpy as np\nfrom attrdict import AttrDict\nimport argparse\nimport time\n\nimport paddle\n\nimport yaml\nfrom pprint import pprint\n\nfrom paddlenlp.ops import FasterGPT\nfrom paddlenlp.transformers import GPTLMHeadModel\nfrom paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer\n\nfrom paddlenlp.utils.log import logger\n\nMODEL_CLASSES = {\n \"gpt-cpm-large-cn\": (GPTLMHeadModel, GPTChineseTokenizer),\n \"gpt2-medium-en\": (GPTLMHeadModel, GPTTokenizer),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name_or_path\",\n default=\"gpt2-medium-en\",\n type=str,\n help=\"The model name to specify the gpt to use. Can be one of ['gpt2-en', 'gpt2-medium-en', 'gpt-cpm-large-cn']. \"\n )\n parser.add_argument(\n \"--decoding_lib\",\n default=\"../../build/lib/libdecoding_op.so\",\n type=str,\n help=\"Path of libdecoding_op.so. \")\n parser.add_argument(\n \"--inference_model_dir\",\n default=\"./infer_model/\",\n type=str,\n help=\"Path to save inference model of gpt. \")\n parser.add_argument(\n \"--topk\",\n default=4,\n type=int,\n help=\"The number of candidate to procedure beam search. \")\n parser.add_argument(\n \"--topp\",\n default=0.0,\n type=float,\n help=\"The probability threshold to procedure topp sampling. \")\n parser.add_argument(\n \"--max_out_len\", default=32, type=int, help=\"Maximum output length. \")\n parser.add_argument(\n \"--temperature\",\n default=1.0,\n type=float,\n help=\"The temperature to set. \")\n parser.add_argument(\n \"--use_fp16_decoding\",\n action=\"store_true\",\n help=\"Whether to use fp16 decoding to predict. \")\n args = parser.parse_args()\n return args\n\n\ndef do_predict(args):\n place = \"gpu\"\n place = paddle.set_device(place)\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_name_or_path]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n logger.info('Loading the model parameters, please wait...')\n model = model_class.from_pretrained(\n args.model_name_or_path, max_predict_len=args.max_out_len)\n\n gpt = FasterGPT(\n model=model,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding)\n\n # Set evaluate mode\n gpt.eval()\n\n # Convert dygraph model to static graph model \n gpt = paddle.jit.to_static(\n gpt,\n input_spec=[\n # input_ids\n paddle.static.InputSpec(\n shape=[None, None], dtype=\"int32\"),\n #\n # If it's necessarry to provide mem_seq_len and attention_mask,\n # the parameters should be:\n # mem_seq_len\n # paddle.static.InputSpec(shape=[None, None], dtype=\"int32\"),\n # attention_mask\n # paddle.static.InputSpec(shape=[None, None, None], dtype=\"float16\" if args.use_fp16_decoding else \"float32\"),\n #\n None, # mem_seq_len\n None, # attention_mask\n args.topk,\n args.topp,\n args.max_out_len,\n tokenizer.eos_token_id,\n tokenizer.eos_token_id,\n tokenizer.pad_token_id,\n None, # forced_eos_token_id\n args.temperature,\n ])\n\n # Save converted static graph model\n paddle.jit.save(gpt, os.path.join(args.inference_model_dir, \"gpt\"))\n logger.info(\"GPT has been saved to {}\".format(args.inference_model_dir))\n\n gpt.save_resources(tokenizer, args.inference_model_dir)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n pprint(args)\n do_predict(args)\n", "path": "paddlenlp/ops/faster_transformer/sample/gpt_export_model_sample.py"}]}
3,017
245
gh_patches_debug_17503
rasdani/github-patches
git_diff
voxel51__fiftyone-563
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] numpy.array sample fields trigger server error when serialized ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04 - **FiftyOne installed from (pip or source)**: source - **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5 - **Python version**: 3.6 ### Commands to reproduce 1. Start server with `python fiftyone/server/main.py` 2. Start app with `yarn dev` 3. Run the code below ### Describe the problem The server fails to serialize the sample (see traceback) and the sample does not display in the app. ### Code to reproduce issue ```python import fiftyone as fo import numpy as np dataset = fo.Dataset() dataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3]))) session = fo.launch_app(remote=True, dataset=dataset) ``` ### Other info / logs Probably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as: ``` "field": { "$binary": "eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=", "$type": "00" } ``` Server traceback: ``` File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/usr/lib/python3.6/threading.py", line 864, in run self._target(*self._args, **self._kwargs) File ".../lib/python3.6/site-packages/socketio/server.py", line 696, in _handle_event_internal binary=binary)) File ".../lib/python3.6/site-packages/socketio/server.py", line 607, in _send_packet encoded_packet = pkt.encode() File ".../lib/python3.6/site-packages/socketio/packet.py", line 71, in encode encoded_packet += self.json.dumps(data, separators=(',', ':')) File "/home/alan/code/fiftyone/fiftyone/server/json_util.py", line 47, in dumps json_util.dumps(*args, **kwargs), parse_constant=lambda c: c File ".../lib/python3.6/site-packages/bson/json_util.py", line 383, in dumps return json.dumps(_json_convert(obj, json_options), *args, **kwargs) File "/usr/lib/python3.6/json/__init__.py", line 238, in dumps **kw).encode(obj) File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 275, in encode chunks = self.iterencode(o, _one_shot=True) File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 357, in iterencode return _iterencode(o, 0) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte ``` ### What areas of FiftyOne does this bug affect? - [ ] `App`: FiftyOne application issue - [ ] `Core`: Core `fiftyone` Python library issue - [x] `Server`: Fiftyone server issue --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fiftyone/server/json_util.py` Content: ``` 1 """ 2 FiftyOne server json utilies. 3 4 | Copyright 2017-2020, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 from bson import ObjectId, json_util 9 from flask.json import JSONEncoder 10 11 from fiftyone.core.sample import Sample, SampleView 12 from fiftyone.core.stages import ViewStage 13 14 15 class FiftyOneJSONEncoder(JSONEncoder): 16 """JSON encoder for the FiftyOne server. 17 18 Any classes with non-standard serialization methods should 19 be accounted for in the `default()` method. 20 """ 21 22 def default(self, o): # pylint: disable=E0202 23 """Returns the serialized representation of the objects 24 25 Args: 26 o: the object 27 28 Returns: 29 str 30 """ 31 if isinstance(o, (Sample, SampleView)): 32 return o.to_mongo_dict() 33 if issubclass(type(o), ViewStage): 34 return o._serialize() 35 if isinstance(o, ObjectId): 36 return str(o) 37 if isinstance(o, float): 38 return json_util.dumps(o) 39 return super().default(o) 40 41 @staticmethod 42 def dumps(*args, **kwargs): 43 """Defined for overriding the default SocketIO `json` interface""" 44 kwargs["cls"] = FiftyOneJSONEncoder 45 return json_util.dumps( 46 json_util.loads( 47 json_util.dumps(*args, **kwargs), parse_constant=lambda c: c 48 ), 49 **kwargs 50 ) 51 52 @staticmethod 53 def loads(*args, **kwargs): 54 """Defined for overriding the default SocketIO `json` interface""" 55 return json_util.loads(*args, **kwargs) 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py --- a/fiftyone/server/json_util.py +++ b/fiftyone/server/json_util.py @@ -10,6 +10,16 @@ from fiftyone.core.sample import Sample, SampleView from fiftyone.core.stages import ViewStage +import fiftyone.core.utils as fou + + +def _handle_bytes(o): + for k, v in o.items(): + if isinstance(v, bytes): + o[k] = str(fou.deserialize_numpy_array(v).shape) + if isinstance(v, dict): + o[k] = _handle_bytes(v) + return o class FiftyOneJSONEncoder(JSONEncoder): @@ -29,7 +39,7 @@ str """ if isinstance(o, (Sample, SampleView)): - return o.to_mongo_dict() + return _handle_bytes(o.to_mongo_dict()) if issubclass(type(o), ViewStage): return o._serialize() if isinstance(o, ObjectId):
{"golden_diff": "diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py\n--- a/fiftyone/server/json_util.py\n+++ b/fiftyone/server/json_util.py\n@@ -10,6 +10,16 @@\n \n from fiftyone.core.sample import Sample, SampleView\n from fiftyone.core.stages import ViewStage\n+import fiftyone.core.utils as fou\n+\n+\n+def _handle_bytes(o):\n+ for k, v in o.items():\n+ if isinstance(v, bytes):\n+ o[k] = str(fou.deserialize_numpy_array(v).shape)\n+ if isinstance(v, dict):\n+ o[k] = _handle_bytes(v)\n+ return o\n \n \n class FiftyOneJSONEncoder(JSONEncoder):\n@@ -29,7 +39,7 @@\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n- return o.to_mongo_dict()\n+ return _handle_bytes(o.to_mongo_dict())\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n", "issue": "[BUG] numpy.array sample fields trigger server error when serialized\n### System information\r\n\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04\r\n- **FiftyOne installed from (pip or source)**: source\r\n- **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5\r\n- **Python version**: 3.6\r\n\r\n### Commands to reproduce\r\n\r\n1. Start server with `python fiftyone/server/main.py`\r\n2. Start app with `yarn dev`\r\n3. Run the code below\r\n\r\n### Describe the problem\r\n\r\nThe server fails to serialize the sample (see traceback) and the sample does not display in the app.\r\n\r\n\r\n### Code to reproduce issue\r\n```python\r\nimport fiftyone as fo\r\nimport numpy as np\r\ndataset = fo.Dataset()\r\ndataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3])))\r\nsession = fo.launch_app(remote=True, dataset=dataset)\r\n```\r\n\r\n### Other info / logs\r\n\r\nProbably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as:\r\n```\r\n \"field\": {\r\n \"$binary\": \"eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=\",\r\n \"$type\": \"00\"\r\n }\r\n```\r\n\r\nServer traceback:\r\n```\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 696, in _handle_event_internal\r\n binary=binary))\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 607, in _send_packet\r\n encoded_packet = pkt.encode()\r\n File \".../lib/python3.6/site-packages/socketio/packet.py\", line 71, in encode\r\n encoded_packet += self.json.dumps(data, separators=(',', ':'))\r\n File \"/home/alan/code/fiftyone/fiftyone/server/json_util.py\", line 47, in dumps\r\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\r\n File \".../lib/python3.6/site-packages/bson/json_util.py\", line 383, in dumps\r\n return json.dumps(_json_convert(obj, json_options), *args, **kwargs)\r\n File \"/usr/lib/python3.6/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 275, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 357, in iterencode\r\n return _iterencode(o, 0)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte\r\n```\r\n\r\n### What areas of FiftyOne does this bug affect?\r\n\r\n- [ ] `App`: FiftyOne application issue\r\n- [ ] `Core`: Core `fiftyone` Python library issue\r\n- [x] `Server`: Fiftyone server issue\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom flask.json import JSONEncoder\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return o.to_mongo_dict()\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom flask.json import JSONEncoder\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\nimport fiftyone.core.utils as fou\n\n\ndef _handle_bytes(o):\n for k, v in o.items():\n if isinstance(v, bytes):\n o[k] = str(fou.deserialize_numpy_array(v).shape)\n if isinstance(v, dict):\n o[k] = _handle_bytes(v)\n return o\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return _handle_bytes(o.to_mongo_dict())\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}]}
1,552
232
gh_patches_debug_39820
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-1472
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- keep_best_iv should order by iv then cp ### Expected Behavior If we have 3 pokemon of a type and wanted to keep the 2 best IV and two pokemon have the same low IV, we would like to keep the one with max CP ### Actual Behavior [20:25:39] Keep 2 best Slowpoke, based on iv [20:25:39] Slowpoke [CP 274] [Potential 0.56] [20:25:39] Slowpoke [CP 172] [Potential 0.51] [20:25:39] Exchange 1 pokemon(s) [20:25:39] Exchanging Slowpoke [CP 392] [Potential 0.51] for candy! ### Steps to Reproduce Farm pokemon and have some luck :) ### Other Information OS: OSX Git Commit: (run 'git log -n 1 --pretty=format:"%H"' and paste it here) 34caa002a464b7665cdcfb531ea3d302632898ac --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pokemongo_bot/cell_workers/pokemon_transfer_worker.py` Content: ``` 1 import json 2 3 from pokemongo_bot.human_behaviour import sleep, action_delay 4 from pokemongo_bot import logger 5 6 class PokemonTransferWorker(object): 7 8 def __init__(self, bot): 9 self.config = bot.config 10 self.pokemon_list = bot.pokemon_list 11 self.api = bot.api 12 self.bot = bot 13 14 def work(self): 15 if not self.config.release_pokemon: 16 return 17 18 pokemon_groups = self._release_pokemon_get_groups() 19 for pokemon_id in pokemon_groups: 20 group = pokemon_groups[pokemon_id] 21 22 if len(group) > 1: 23 pokemon_name = self.pokemon_list[pokemon_id - 1]['Name'] 24 keep_best, keep_best_cp, keep_best_iv = self._validate_keep_best_config(pokemon_name) 25 26 if keep_best: 27 order_criteria = 'cp' 28 limit = keep_best_cp 29 30 if keep_best_iv >= 1: 31 order_criteria = 'iv' 32 limit = keep_best_iv 33 34 best_pokemons = sorted(group, key=lambda x: x[order_criteria], reverse=True)[:limit] 35 36 # remove best pokemons from all pokemons array 37 all_pokemons = group 38 for best_pokemon in best_pokemons: 39 for pokemon in all_pokemons: 40 if best_pokemon['pokemon_data']['id'] == pokemon['pokemon_data']['id']: 41 all_pokemons.remove(pokemon) 42 43 if best_pokemons and all_pokemons: 44 logger.log("Keep {} best {}, based on {}".format(len(best_pokemons), 45 pokemon_name, 46 order_criteria), "green") 47 for best_pokemon in best_pokemons: 48 logger.log("{} [CP {}] [Potential {}]".format(pokemon_name, 49 best_pokemon['cp'], 50 best_pokemon['iv']), 'green') 51 52 logger.log("Exchange {} pokemon(s)".format(len(all_pokemons)), "green") 53 54 for pokemon in all_pokemons: 55 self.release_pokemon(pokemon_name, pokemon['cp'], pokemon['iv'], pokemon['pokemon_data']['id']) 56 else: 57 group = sorted(group, key=lambda x: x['cp'], reverse=True) 58 for item in group: 59 pokemon_cp = item['cp'] 60 pokemon_potential = item['iv'] 61 62 if self.should_release_pokemon(pokemon_name, pokemon_cp, pokemon_potential): 63 self.release_pokemon(pokemon_name, item['cp'], item['iv'], item['pokemon_data']['id']) 64 65 def _release_pokemon_get_groups(self): 66 pokemon_groups = {} 67 self.api.get_player().get_inventory() 68 inventory_req = self.api.call() 69 70 if inventory_req.get('responses', False) is False: 71 return pokemon_groups 72 73 inventory_dict = inventory_req['responses']['GET_INVENTORY']['inventory_delta']['inventory_items'] 74 75 user_web_inventory = 'web/inventory-%s.json' % (self.config.username) 76 with open(user_web_inventory, 'w') as outfile: 77 json.dump(inventory_dict, outfile) 78 79 for pokemon in inventory_dict: 80 try: 81 reduce(dict.__getitem__, [ 82 "inventory_item_data", "pokemon_data", "pokemon_id" 83 ], pokemon) 84 except KeyError: 85 continue 86 87 pokemon_data = pokemon['inventory_item_data']['pokemon_data'] 88 group_id = pokemon_data['pokemon_id'] 89 group_pokemon_cp = pokemon_data['cp'] 90 group_pokemon_iv = self.get_pokemon_potential(pokemon_data) 91 92 if group_id not in pokemon_groups: 93 pokemon_groups[group_id] = [] 94 95 pokemon_groups[group_id].append({ 96 'cp': group_pokemon_cp, 97 'iv': group_pokemon_iv, 98 'pokemon_data': pokemon_data 99 }) 100 101 return pokemon_groups 102 103 def get_pokemon_potential(self, pokemon_data): 104 total_iv = 0 105 iv_stats = ['individual_attack', 'individual_defense', 'individual_stamina'] 106 for individual_stat in iv_stats: 107 try: 108 total_iv += pokemon_data[individual_stat] 109 except Exception: 110 continue 111 return round((total_iv / 45.0), 2) 112 113 def should_release_pokemon(self, pokemon_name, cp, iv): 114 release_config = self._get_release_config_for(pokemon_name) 115 cp_iv_logic = release_config.get('logic') 116 if not cp_iv_logic: 117 cp_iv_logic = self._get_release_config_for('any').get('logic', 'and') 118 119 release_results = { 120 'cp': False, 121 'iv': False, 122 } 123 124 if release_config.get('never_release', False): 125 return False 126 127 if release_config.get('always_release', False): 128 return True 129 130 release_cp = release_config.get('release_below_cp', 0) 131 if cp < release_cp: 132 release_results['cp'] = True 133 134 release_iv = release_config.get('release_below_iv', 0) 135 if iv < release_iv: 136 release_results['iv'] = True 137 138 logic_to_function = { 139 'or': lambda x, y: x or y, 140 'and': lambda x, y: x and y 141 } 142 143 if logic_to_function[cp_iv_logic](*release_results.values()): 144 logger.log( 145 "Releasing {} with CP {} and IV {}. Matching release rule: CP < {} {} IV < {}. ".format( 146 pokemon_name, 147 cp, 148 iv, 149 release_cp, 150 cp_iv_logic.upper(), 151 release_iv 152 ), 'yellow' 153 ) 154 155 return logic_to_function[cp_iv_logic](*release_results.values()) 156 157 def release_pokemon(self, pokemon_name, cp, iv, pokemon_id): 158 logger.log('Exchanging {} [CP {}] [Potential {}] for candy!'.format(pokemon_name, 159 cp, 160 iv), 'green') 161 self.api.release_pokemon(pokemon_id=pokemon_id) 162 response_dict = self.api.call() 163 action_delay(self.config.action_wait_min, self.config.action_wait_max) 164 165 def _get_release_config_for(self, pokemon): 166 release_config = self.config.release.get(pokemon) 167 if not release_config: 168 release_config = self.config.release.get('any') 169 if not release_config: 170 release_config = {} 171 return release_config 172 173 def _validate_keep_best_config(self, pokemon_name): 174 keep_best = False 175 176 release_config = self._get_release_config_for(pokemon_name) 177 178 keep_best_cp = release_config.get('keep_best_cp', 0) 179 keep_best_iv = release_config.get('keep_best_iv', 0) 180 181 if keep_best_cp or keep_best_iv: 182 keep_best = True 183 try: 184 keep_best_cp = int(keep_best_cp) 185 except ValueError: 186 keep_best_cp = 0 187 188 try: 189 keep_best_iv = int(keep_best_iv) 190 except ValueError: 191 keep_best_iv = 0 192 193 if keep_best_cp > 1 and keep_best_iv > 1: 194 logger.log("keep_best_cp and keep_best_iv can't be > 0 at the same time. Ignore it.", "red") 195 keep_best = False 196 197 if keep_best_cp < 0 or keep_best_iv < 0: 198 logger.log("Keep best can't be < 0. Ignore it.", "red") 199 keep_best = False 200 201 if keep_best_cp == 0 and keep_best_iv == 0: 202 keep_best = False 203 204 return keep_best, keep_best_cp, keep_best_iv 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pokemongo_bot/cell_workers/pokemon_transfer_worker.py b/pokemongo_bot/cell_workers/pokemon_transfer_worker.py --- a/pokemongo_bot/cell_workers/pokemon_transfer_worker.py +++ b/pokemongo_bot/cell_workers/pokemon_transfer_worker.py @@ -3,6 +3,7 @@ from pokemongo_bot.human_behaviour import sleep, action_delay from pokemongo_bot import logger + class PokemonTransferWorker(object): def __init__(self, bot): @@ -24,21 +25,31 @@ keep_best, keep_best_cp, keep_best_iv = self._validate_keep_best_config(pokemon_name) if keep_best: - order_criteria = 'cp' - limit = keep_best_cp + best_pokemon_ids = set() + order_criteria = 'none' + if keep_best_cp >= 1: + cp_limit = keep_best_cp + best_cp_pokemons = sorted(group, key=lambda x: (x['cp'], x['iv']), reverse=True)[:cp_limit] + best_pokemon_ids = set(pokemon['pokemon_data']['id'] for pokemon in best_cp_pokemons) + order_criteria = 'cp' if keep_best_iv >= 1: - order_criteria = 'iv' - limit = keep_best_iv - - best_pokemons = sorted(group, key=lambda x: x[order_criteria], reverse=True)[:limit] + iv_limit = keep_best_iv + best_iv_pokemons = sorted(group, key=lambda x: (x['iv'], x['cp']), reverse=True)[:iv_limit] + best_pokemon_ids |= set(pokemon['pokemon_data']['id'] for pokemon in best_iv_pokemons) + if order_criteria == 'cp': + order_criteria = 'cp and iv' + else: + order_criteria = 'iv' # remove best pokemons from all pokemons array all_pokemons = group - for best_pokemon in best_pokemons: + best_pokemons = [] + for best_pokemon_id in best_pokemon_ids: for pokemon in all_pokemons: - if best_pokemon['pokemon_data']['id'] == pokemon['pokemon_data']['id']: + if best_pokemon_id == pokemon['pokemon_data']['id']: all_pokemons.remove(pokemon) + best_pokemons.append(pokemon) if best_pokemons and all_pokemons: logger.log("Keep {} best {}, based on {}".format(len(best_pokemons), @@ -190,10 +201,6 @@ except ValueError: keep_best_iv = 0 - if keep_best_cp > 1 and keep_best_iv > 1: - logger.log("keep_best_cp and keep_best_iv can't be > 0 at the same time. Ignore it.", "red") - keep_best = False - if keep_best_cp < 0 or keep_best_iv < 0: logger.log("Keep best can't be < 0. Ignore it.", "red") keep_best = False
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/pokemon_transfer_worker.py b/pokemongo_bot/cell_workers/pokemon_transfer_worker.py\n--- a/pokemongo_bot/cell_workers/pokemon_transfer_worker.py\n+++ b/pokemongo_bot/cell_workers/pokemon_transfer_worker.py\n@@ -3,6 +3,7 @@\n from pokemongo_bot.human_behaviour import sleep, action_delay\n from pokemongo_bot import logger\n \n+\n class PokemonTransferWorker(object):\n \n def __init__(self, bot):\n@@ -24,21 +25,31 @@\n keep_best, keep_best_cp, keep_best_iv = self._validate_keep_best_config(pokemon_name)\n \n if keep_best:\n- order_criteria = 'cp'\n- limit = keep_best_cp\n+ best_pokemon_ids = set()\n+ order_criteria = 'none'\n+ if keep_best_cp >= 1:\n+ cp_limit = keep_best_cp\n+ best_cp_pokemons = sorted(group, key=lambda x: (x['cp'], x['iv']), reverse=True)[:cp_limit]\n+ best_pokemon_ids = set(pokemon['pokemon_data']['id'] for pokemon in best_cp_pokemons)\n+ order_criteria = 'cp'\n \n if keep_best_iv >= 1:\n- order_criteria = 'iv'\n- limit = keep_best_iv\n-\n- best_pokemons = sorted(group, key=lambda x: x[order_criteria], reverse=True)[:limit]\n+ iv_limit = keep_best_iv\n+ best_iv_pokemons = sorted(group, key=lambda x: (x['iv'], x['cp']), reverse=True)[:iv_limit]\n+ best_pokemon_ids |= set(pokemon['pokemon_data']['id'] for pokemon in best_iv_pokemons)\n+ if order_criteria == 'cp':\n+ order_criteria = 'cp and iv'\n+ else:\n+ order_criteria = 'iv'\n \n # remove best pokemons from all pokemons array\n all_pokemons = group\n- for best_pokemon in best_pokemons:\n+ best_pokemons = []\n+ for best_pokemon_id in best_pokemon_ids:\n for pokemon in all_pokemons:\n- if best_pokemon['pokemon_data']['id'] == pokemon['pokemon_data']['id']:\n+ if best_pokemon_id == pokemon['pokemon_data']['id']:\n all_pokemons.remove(pokemon)\n+ best_pokemons.append(pokemon)\n \n if best_pokemons and all_pokemons:\n logger.log(\"Keep {} best {}, based on {}\".format(len(best_pokemons),\n@@ -190,10 +201,6 @@\n except ValueError:\n keep_best_iv = 0\n \n- if keep_best_cp > 1 and keep_best_iv > 1:\n- logger.log(\"keep_best_cp and keep_best_iv can't be > 0 at the same time. Ignore it.\", \"red\")\n- keep_best = False\n-\n if keep_best_cp < 0 or keep_best_iv < 0:\n logger.log(\"Keep best can't be < 0. Ignore it.\", \"red\")\n keep_best = False\n", "issue": "keep_best_iv should order by iv then cp\n### Expected Behavior\n\nIf we have 3 pokemon of a type and wanted to keep the 2 best IV and two pokemon have the same low IV, we would like to keep the one with max CP\n### Actual Behavior\n\n[20:25:39] Keep 2 best Slowpoke, based on iv\n[20:25:39] Slowpoke [CP 274] [Potential 0.56]\n[20:25:39] Slowpoke [CP 172] [Potential 0.51]\n[20:25:39] Exchange 1 pokemon(s)\n[20:25:39] Exchanging Slowpoke [CP 392] [Potential 0.51] for candy!\n### Steps to Reproduce\n\nFarm pokemon and have some luck :)\n### Other Information\n\nOS: OSX \nGit Commit: (run 'git log -n 1 --pretty=format:\"%H\"' and paste it here)\n34caa002a464b7665cdcfb531ea3d302632898ac\n\n", "before_files": [{"content": "import json\n\nfrom pokemongo_bot.human_behaviour import sleep, action_delay\nfrom pokemongo_bot import logger\n\nclass PokemonTransferWorker(object):\n\n def __init__(self, bot):\n self.config = bot.config\n self.pokemon_list = bot.pokemon_list\n self.api = bot.api\n self.bot = bot\n\n def work(self):\n if not self.config.release_pokemon:\n return\n\n pokemon_groups = self._release_pokemon_get_groups()\n for pokemon_id in pokemon_groups:\n group = pokemon_groups[pokemon_id]\n\n if len(group) > 1:\n pokemon_name = self.pokemon_list[pokemon_id - 1]['Name']\n keep_best, keep_best_cp, keep_best_iv = self._validate_keep_best_config(pokemon_name)\n\n if keep_best:\n order_criteria = 'cp'\n limit = keep_best_cp\n\n if keep_best_iv >= 1:\n order_criteria = 'iv'\n limit = keep_best_iv\n\n best_pokemons = sorted(group, key=lambda x: x[order_criteria], reverse=True)[:limit]\n\n # remove best pokemons from all pokemons array\n all_pokemons = group\n for best_pokemon in best_pokemons:\n for pokemon in all_pokemons:\n if best_pokemon['pokemon_data']['id'] == pokemon['pokemon_data']['id']:\n all_pokemons.remove(pokemon)\n\n if best_pokemons and all_pokemons:\n logger.log(\"Keep {} best {}, based on {}\".format(len(best_pokemons),\n pokemon_name,\n order_criteria), \"green\")\n for best_pokemon in best_pokemons:\n logger.log(\"{} [CP {}] [Potential {}]\".format(pokemon_name,\n best_pokemon['cp'],\n best_pokemon['iv']), 'green')\n\n logger.log(\"Exchange {} pokemon(s)\".format(len(all_pokemons)), \"green\")\n\n for pokemon in all_pokemons:\n self.release_pokemon(pokemon_name, pokemon['cp'], pokemon['iv'], pokemon['pokemon_data']['id'])\n else:\n group = sorted(group, key=lambda x: x['cp'], reverse=True)\n for item in group:\n pokemon_cp = item['cp']\n pokemon_potential = item['iv']\n\n if self.should_release_pokemon(pokemon_name, pokemon_cp, pokemon_potential):\n self.release_pokemon(pokemon_name, item['cp'], item['iv'], item['pokemon_data']['id'])\n\n def _release_pokemon_get_groups(self):\n pokemon_groups = {}\n self.api.get_player().get_inventory()\n inventory_req = self.api.call()\n\n if inventory_req.get('responses', False) is False:\n return pokemon_groups\n\n inventory_dict = inventory_req['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']\n\n user_web_inventory = 'web/inventory-%s.json' % (self.config.username)\n with open(user_web_inventory, 'w') as outfile:\n json.dump(inventory_dict, outfile)\n\n for pokemon in inventory_dict:\n try:\n reduce(dict.__getitem__, [\n \"inventory_item_data\", \"pokemon_data\", \"pokemon_id\"\n ], pokemon)\n except KeyError:\n continue\n\n pokemon_data = pokemon['inventory_item_data']['pokemon_data']\n group_id = pokemon_data['pokemon_id']\n group_pokemon_cp = pokemon_data['cp']\n group_pokemon_iv = self.get_pokemon_potential(pokemon_data)\n\n if group_id not in pokemon_groups:\n pokemon_groups[group_id] = []\n\n pokemon_groups[group_id].append({\n 'cp': group_pokemon_cp,\n 'iv': group_pokemon_iv,\n 'pokemon_data': pokemon_data\n })\n\n return pokemon_groups\n\n def get_pokemon_potential(self, pokemon_data):\n total_iv = 0\n iv_stats = ['individual_attack', 'individual_defense', 'individual_stamina']\n for individual_stat in iv_stats:\n try:\n total_iv += pokemon_data[individual_stat]\n except Exception:\n continue\n return round((total_iv / 45.0), 2)\n\n def should_release_pokemon(self, pokemon_name, cp, iv):\n release_config = self._get_release_config_for(pokemon_name)\n cp_iv_logic = release_config.get('logic')\n if not cp_iv_logic:\n cp_iv_logic = self._get_release_config_for('any').get('logic', 'and')\n\n release_results = {\n 'cp': False,\n 'iv': False,\n }\n\n if release_config.get('never_release', False):\n return False\n\n if release_config.get('always_release', False):\n return True\n\n release_cp = release_config.get('release_below_cp', 0)\n if cp < release_cp:\n release_results['cp'] = True\n\n release_iv = release_config.get('release_below_iv', 0)\n if iv < release_iv:\n release_results['iv'] = True\n\n logic_to_function = {\n 'or': lambda x, y: x or y,\n 'and': lambda x, y: x and y\n }\n\n if logic_to_function[cp_iv_logic](*release_results.values()):\n logger.log(\n \"Releasing {} with CP {} and IV {}. Matching release rule: CP < {} {} IV < {}. \".format(\n pokemon_name,\n cp,\n iv,\n release_cp,\n cp_iv_logic.upper(),\n release_iv\n ), 'yellow'\n )\n\n return logic_to_function[cp_iv_logic](*release_results.values())\n\n def release_pokemon(self, pokemon_name, cp, iv, pokemon_id):\n logger.log('Exchanging {} [CP {}] [Potential {}] for candy!'.format(pokemon_name,\n cp,\n iv), 'green')\n self.api.release_pokemon(pokemon_id=pokemon_id)\n response_dict = self.api.call()\n action_delay(self.config.action_wait_min, self.config.action_wait_max)\n\n def _get_release_config_for(self, pokemon):\n release_config = self.config.release.get(pokemon)\n if not release_config:\n release_config = self.config.release.get('any')\n if not release_config:\n release_config = {}\n return release_config\n\n def _validate_keep_best_config(self, pokemon_name):\n keep_best = False\n\n release_config = self._get_release_config_for(pokemon_name)\n\n keep_best_cp = release_config.get('keep_best_cp', 0)\n keep_best_iv = release_config.get('keep_best_iv', 0)\n\n if keep_best_cp or keep_best_iv:\n keep_best = True\n try:\n keep_best_cp = int(keep_best_cp)\n except ValueError:\n keep_best_cp = 0\n\n try:\n keep_best_iv = int(keep_best_iv)\n except ValueError:\n keep_best_iv = 0\n\n if keep_best_cp > 1 and keep_best_iv > 1:\n logger.log(\"keep_best_cp and keep_best_iv can't be > 0 at the same time. Ignore it.\", \"red\")\n keep_best = False\n\n if keep_best_cp < 0 or keep_best_iv < 0:\n logger.log(\"Keep best can't be < 0. Ignore it.\", \"red\")\n keep_best = False\n\n if keep_best_cp == 0 and keep_best_iv == 0:\n keep_best = False\n\n return keep_best, keep_best_cp, keep_best_iv\n", "path": "pokemongo_bot/cell_workers/pokemon_transfer_worker.py"}], "after_files": [{"content": "import json\n\nfrom pokemongo_bot.human_behaviour import sleep, action_delay\nfrom pokemongo_bot import logger\n\n\nclass PokemonTransferWorker(object):\n\n def __init__(self, bot):\n self.config = bot.config\n self.pokemon_list = bot.pokemon_list\n self.api = bot.api\n self.bot = bot\n\n def work(self):\n if not self.config.release_pokemon:\n return\n\n pokemon_groups = self._release_pokemon_get_groups()\n for pokemon_id in pokemon_groups:\n group = pokemon_groups[pokemon_id]\n\n if len(group) > 1:\n pokemon_name = self.pokemon_list[pokemon_id - 1]['Name']\n keep_best, keep_best_cp, keep_best_iv = self._validate_keep_best_config(pokemon_name)\n\n if keep_best:\n best_pokemon_ids = set()\n order_criteria = 'none'\n if keep_best_cp >= 1:\n cp_limit = keep_best_cp\n best_cp_pokemons = sorted(group, key=lambda x: (x['cp'], x['iv']), reverse=True)[:cp_limit]\n best_pokemon_ids = set(pokemon['pokemon_data']['id'] for pokemon in best_cp_pokemons)\n order_criteria = 'cp'\n\n if keep_best_iv >= 1:\n iv_limit = keep_best_iv\n best_iv_pokemons = sorted(group, key=lambda x: (x['iv'], x['cp']), reverse=True)[:iv_limit]\n best_pokemon_ids |= set(pokemon['pokemon_data']['id'] for pokemon in best_iv_pokemons)\n if order_criteria == 'cp':\n order_criteria = 'cp and iv'\n else:\n order_criteria = 'iv'\n\n # remove best pokemons from all pokemons array\n all_pokemons = group\n best_pokemons = []\n for best_pokemon_id in best_pokemon_ids:\n for pokemon in all_pokemons:\n if best_pokemon_id == pokemon['pokemon_data']['id']:\n all_pokemons.remove(pokemon)\n best_pokemons.append(pokemon)\n\n if best_pokemons and all_pokemons:\n logger.log(\"Keep {} best {}, based on {}\".format(len(best_pokemons),\n pokemon_name,\n order_criteria), \"green\")\n for best_pokemon in best_pokemons:\n logger.log(\"{} [CP {}] [Potential {}]\".format(pokemon_name,\n best_pokemon['cp'],\n best_pokemon['iv']), 'green')\n\n logger.log(\"Exchange {} pokemon(s)\".format(len(all_pokemons)), \"green\")\n\n for pokemon in all_pokemons:\n self.release_pokemon(pokemon_name, pokemon['cp'], pokemon['iv'], pokemon['pokemon_data']['id'])\n else:\n group = sorted(group, key=lambda x: x['cp'], reverse=True)\n for item in group:\n pokemon_cp = item['cp']\n pokemon_potential = item['iv']\n\n if self.should_release_pokemon(pokemon_name, pokemon_cp, pokemon_potential):\n self.release_pokemon(pokemon_name, item['cp'], item['iv'], item['pokemon_data']['id'])\n\n def _release_pokemon_get_groups(self):\n pokemon_groups = {}\n self.api.get_player().get_inventory()\n inventory_req = self.api.call()\n\n if inventory_req.get('responses', False) is False:\n return pokemon_groups\n\n inventory_dict = inventory_req['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']\n\n user_web_inventory = 'web/inventory-%s.json' % (self.config.username)\n with open(user_web_inventory, 'w') as outfile:\n json.dump(inventory_dict, outfile)\n\n for pokemon in inventory_dict:\n try:\n reduce(dict.__getitem__, [\n \"inventory_item_data\", \"pokemon_data\", \"pokemon_id\"\n ], pokemon)\n except KeyError:\n continue\n\n pokemon_data = pokemon['inventory_item_data']['pokemon_data']\n group_id = pokemon_data['pokemon_id']\n group_pokemon_cp = pokemon_data['cp']\n group_pokemon_iv = self.get_pokemon_potential(pokemon_data)\n\n if group_id not in pokemon_groups:\n pokemon_groups[group_id] = []\n\n pokemon_groups[group_id].append({\n 'cp': group_pokemon_cp,\n 'iv': group_pokemon_iv,\n 'pokemon_data': pokemon_data\n })\n\n return pokemon_groups\n\n def get_pokemon_potential(self, pokemon_data):\n total_iv = 0\n iv_stats = ['individual_attack', 'individual_defense', 'individual_stamina']\n for individual_stat in iv_stats:\n try:\n total_iv += pokemon_data[individual_stat]\n except Exception:\n continue\n return round((total_iv / 45.0), 2)\n\n def should_release_pokemon(self, pokemon_name, cp, iv):\n release_config = self._get_release_config_for(pokemon_name)\n cp_iv_logic = release_config.get('logic')\n if not cp_iv_logic:\n cp_iv_logic = self._get_release_config_for('any').get('logic', 'and')\n\n release_results = {\n 'cp': False,\n 'iv': False,\n }\n\n if release_config.get('never_release', False):\n return False\n\n if release_config.get('always_release', False):\n return True\n\n release_cp = release_config.get('release_below_cp', 0)\n if cp < release_cp:\n release_results['cp'] = True\n\n release_iv = release_config.get('release_below_iv', 0)\n if iv < release_iv:\n release_results['iv'] = True\n\n logic_to_function = {\n 'or': lambda x, y: x or y,\n 'and': lambda x, y: x and y\n }\n\n if logic_to_function[cp_iv_logic](*release_results.values()):\n logger.log(\n \"Releasing {} with CP {} and IV {}. Matching release rule: CP < {} {} IV < {}. \".format(\n pokemon_name,\n cp,\n iv,\n release_cp,\n cp_iv_logic.upper(),\n release_iv\n ), 'yellow'\n )\n\n return logic_to_function[cp_iv_logic](*release_results.values())\n\n def release_pokemon(self, pokemon_name, cp, iv, pokemon_id):\n logger.log('Exchanging {} [CP {}] [Potential {}] for candy!'.format(pokemon_name,\n cp,\n iv), 'green')\n self.api.release_pokemon(pokemon_id=pokemon_id)\n response_dict = self.api.call()\n action_delay(self.config.action_wait_min, self.config.action_wait_max)\n\n def _get_release_config_for(self, pokemon):\n release_config = self.config.release.get(pokemon)\n if not release_config:\n release_config = self.config.release.get('any')\n if not release_config:\n release_config = {}\n return release_config\n\n def _validate_keep_best_config(self, pokemon_name):\n keep_best = False\n\n release_config = self._get_release_config_for(pokemon_name)\n\n keep_best_cp = release_config.get('keep_best_cp', 0)\n keep_best_iv = release_config.get('keep_best_iv', 0)\n\n if keep_best_cp or keep_best_iv:\n keep_best = True\n try:\n keep_best_cp = int(keep_best_cp)\n except ValueError:\n keep_best_cp = 0\n\n try:\n keep_best_iv = int(keep_best_iv)\n except ValueError:\n keep_best_iv = 0\n\n if keep_best_cp < 0 or keep_best_iv < 0:\n logger.log(\"Keep best can't be < 0. Ignore it.\", \"red\")\n keep_best = False\n\n if keep_best_cp == 0 and keep_best_iv == 0:\n keep_best = False\n\n return keep_best, keep_best_cp, keep_best_iv\n", "path": "pokemongo_bot/cell_workers/pokemon_transfer_worker.py"}]}
2,643
687
gh_patches_debug_2067
rasdani/github-patches
git_diff
elastic__apm-agent-python-1947
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dbapi2 fails to extract table name when using square brackets **Describe the bug**: ... Queries made to tables which requires escaping end up with the wrong span name. The following spans are SELECTs from four different tables, but only two unique span names appear. ![Screenshot 2023-10-31 at 19 13 46](https://github.com/elastic/apm-agent-python/assets/435885/5384fd4d-df13-4828-a9f2-b7e1c309f07f) **To Reproduce** Import package and call [extract_signature](https://github.com/elastic/apm-agent-python/blob/05332cd007560615b4421b1567659ff9f9634088/elasticapm/instrumentation/packages/dbapi2.py#L153): ```python >>> from elasticapm.instrumentation.packages import dbapi2 >>> dbapi2.extract_signature("SELECT username FROM user") 'SELECT FROM user' >>> dbapi2.extract_signature("SELECT username FROM [user]") 'SELECT FROM [' ``` **Environment (please complete the following information)** - OS: [e.g. Linux] - Python version: - Framework and version [e.g. Django 2.1]: - APM Server version: - Agent version: **Additional context** Add any other context about the problem here. - Agent config options <!-- be careful not to post sensitive information --> <details> <summary>Click to expand</summary> ``` replace this line with your agent config options remember to mask any sensitive fields like tokens ``` </details> - `requirements.txt`: <details> <summary>Click to expand</summary> ``` replace this line with your `requirements.txt` ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/dbapi2.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 """Provides classes to instrument dbapi2 providers 32 33 https://www.python.org/dev/peps/pep-0249/ 34 """ 35 36 import re 37 38 import wrapt 39 40 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule 41 from elasticapm.traces import capture_span 42 from elasticapm.utils.encoding import force_text, shorten 43 44 45 class Literal(object): 46 def __init__(self, literal_type, content) -> None: 47 self.literal_type = literal_type 48 self.content = content 49 50 def __eq__(self, other): 51 return isinstance(other, Literal) and self.literal_type == other.literal_type and self.content == other.content 52 53 def __repr__(self): 54 return "<Literal {}{}{}>".format(self.literal_type, self.content, self.literal_type) 55 56 57 def look_for_table(sql, keyword): 58 tokens = tokenize(sql) 59 table_name = _scan_for_table_with_tokens(tokens, keyword) 60 if isinstance(table_name, Literal): 61 table_name = table_name.content.strip(table_name.literal_type) 62 return table_name 63 64 65 def _scan_for_table_with_tokens(tokens, keyword): 66 seen_keyword = False 67 for idx, lexeme in scan(tokens): 68 if seen_keyword: 69 if lexeme == "(": 70 return _scan_for_table_with_tokens(tokens[idx:], keyword) 71 else: 72 return lexeme 73 74 if isinstance(lexeme, str) and lexeme.upper() == keyword: 75 seen_keyword = True 76 77 78 def tokenize(sql): 79 # split on anything that is not a word character, excluding dots 80 return [t for t in re.split(r"([^\w.])", sql) if t != ""] 81 82 83 def scan(tokens): 84 literal_start_idx = None 85 literal_started = None 86 prev_was_escape = False 87 lexeme = [] 88 89 i = 0 90 while i < len(tokens): 91 token = tokens[i] 92 if literal_start_idx: 93 if prev_was_escape: 94 prev_was_escape = False 95 lexeme.append(token) 96 else: 97 if token == literal_started: 98 if literal_started == "'" and len(tokens) > i + 1 and tokens[i + 1] == "'": # double quotes 99 i += 1 100 lexeme.append("'") 101 else: 102 yield i, Literal(literal_started, "".join(lexeme)) 103 literal_start_idx = None 104 literal_started = None 105 lexeme = [] 106 else: 107 if token == "\\": 108 prev_was_escape = token 109 else: 110 prev_was_escape = False 111 lexeme.append(token) 112 elif literal_start_idx is None: 113 if token in ["'", '"', "`"]: 114 literal_start_idx = i 115 literal_started = token 116 elif token == "$": 117 # Postgres can use arbitrary characters between two $'s as a 118 # literal separation token, e.g.: $fish$ literal $fish$ 119 # This part will detect that and skip over the literal. 120 try: 121 # Closing dollar of the opening quote, 122 # i.e. the second $ in the first $fish$ 123 closing_dollar_idx = tokens.index("$", i + 1) 124 except ValueError: 125 pass 126 else: 127 quote = tokens[i : closing_dollar_idx + 1] 128 length = len(quote) 129 # Opening dollar of the closing quote, 130 # i.e. the first $ in the second $fish$ 131 closing_quote_idx = closing_dollar_idx + 1 132 while True: 133 try: 134 closing_quote_idx = tokens.index("$", closing_quote_idx) 135 except ValueError: 136 break 137 if tokens[closing_quote_idx : closing_quote_idx + length] == quote: 138 yield i, Literal( 139 "".join(quote), "".join(tokens[closing_dollar_idx + 1 : closing_quote_idx]) 140 ) 141 i = closing_quote_idx + length 142 break 143 closing_quote_idx += 1 144 else: 145 if token != " ": 146 yield i, token 147 i += 1 148 149 if lexeme: 150 yield i, lexeme 151 152 153 def extract_signature(sql): 154 """ 155 Extracts a minimal signature from a given SQL query 156 :param sql: the SQL statement 157 :return: a string representing the signature 158 """ 159 sql = force_text(sql) 160 sql = sql.strip() 161 first_space = sql.find(" ") 162 if first_space < 0: 163 return sql 164 165 second_space = sql.find(" ", first_space + 1) 166 167 sql_type = sql[0:first_space].upper() 168 169 if sql_type in ["INSERT", "DELETE"]: 170 keyword = "INTO" if sql_type == "INSERT" else "FROM" 171 sql_type = sql_type + " " + keyword 172 173 object_name = look_for_table(sql, keyword) 174 elif sql_type in ["CREATE", "DROP"]: 175 # 2nd word is part of SQL type 176 sql_type = sql_type + sql[first_space:second_space] 177 object_name = "" 178 elif sql_type == "UPDATE": 179 object_name = look_for_table(sql, "UPDATE") 180 elif sql_type == "SELECT": 181 # Name is first table 182 try: 183 sql_type = "SELECT FROM" 184 object_name = look_for_table(sql, "FROM") 185 except Exception: 186 object_name = "" 187 elif sql_type in ["EXEC", "EXECUTE"]: 188 sql_type = "EXECUTE" 189 end = second_space if second_space > first_space else len(sql) 190 object_name = sql[first_space + 1 : end] 191 elif sql_type == "CALL": 192 first_paren = sql.find("(", first_space) 193 end = first_paren if first_paren > first_space else len(sql) 194 procedure_name = sql[first_space + 1 : end].rstrip(";") 195 object_name = procedure_name + "()" 196 else: 197 # No name 198 object_name = "" 199 200 signature = " ".join(filter(bool, [sql_type, object_name])) 201 return signature 202 203 204 QUERY_ACTION = "query" 205 EXEC_ACTION = "exec" 206 PROCEDURE_STATEMENTS = ["EXEC", "EXECUTE", "CALL"] 207 208 209 def extract_action_from_signature(signature, default): 210 if signature.split(" ")[0] in PROCEDURE_STATEMENTS: 211 return EXEC_ACTION 212 return default 213 214 215 class CursorProxy(wrapt.ObjectProxy): 216 provider_name = None 217 DML_QUERIES = ("INSERT", "DELETE", "UPDATE") 218 219 def __init__(self, wrapped, destination_info=None) -> None: 220 super(CursorProxy, self).__init__(wrapped) 221 self._self_destination_info = destination_info or {} 222 223 def callproc(self, procname, params=None): 224 return self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION) 225 226 def execute(self, sql, params=None): 227 return self._trace_sql(self.__wrapped__.execute, sql, params) 228 229 def executemany(self, sql, param_list): 230 return self._trace_sql(self.__wrapped__.executemany, sql, param_list) 231 232 def _bake_sql(self, sql): 233 """ 234 Method to turn the "sql" argument into a string. Most database backends simply return 235 the given object, as it is already a string 236 """ 237 return sql 238 239 def _trace_sql(self, method, sql, params, action=QUERY_ACTION): 240 sql_string = self._bake_sql(sql) 241 if action == EXEC_ACTION: 242 signature = sql_string + "()" 243 else: 244 signature = self.extract_signature(sql_string) 245 action = extract_action_from_signature(signature, action) 246 247 # Truncate sql_string to 10000 characters to prevent large queries from 248 # causing an error to APM server. 249 sql_string = shorten(sql_string, string_length=10000) 250 251 with capture_span( 252 signature, 253 span_type="db", 254 span_subtype=self.provider_name, 255 span_action=action, 256 extra={ 257 "db": {"type": "sql", "statement": sql_string, "instance": getattr(self, "_self_database", None)}, 258 "destination": self._self_destination_info, 259 }, 260 skip_frames=1, 261 leaf=True, 262 ) as span: 263 if params is None: 264 result = method(sql) 265 else: 266 result = method(sql, params) 267 # store "rows affected", but only for DML queries like insert/update/delete 268 if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES): 269 span.update_context("db", {"rows_affected": self.rowcount}) 270 return result 271 272 def extract_signature(self, sql): 273 raise NotImplementedError() 274 275 276 class ConnectionProxy(wrapt.ObjectProxy): 277 cursor_proxy = CursorProxy 278 279 def __init__(self, wrapped, destination_info=None) -> None: 280 super(ConnectionProxy, self).__init__(wrapped) 281 self._self_destination_info = destination_info 282 283 def cursor(self, *args, **kwargs): 284 return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info) 285 286 287 class DbApi2Instrumentation(AbstractInstrumentedModule): 288 connect_method = None 289 290 def call(self, module, method, wrapped, instance, args, kwargs): 291 return ConnectionProxy(wrapped(*args, **kwargs)) 292 293 def call_if_sampling(self, module, method, wrapped, instance, args, kwargs): 294 # Contrasting to the superclass implementation, we *always* want to 295 # return a proxied connection, even if there is no ongoing elasticapm 296 # transaction yet. This ensures that we instrument the cursor once 297 # the transaction started. 298 return self.call(module, method, wrapped, instance, args, kwargs) 299 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/dbapi2.py b/elasticapm/instrumentation/packages/dbapi2.py --- a/elasticapm/instrumentation/packages/dbapi2.py +++ b/elasticapm/instrumentation/packages/dbapi2.py @@ -76,8 +76,8 @@ def tokenize(sql): - # split on anything that is not a word character, excluding dots - return [t for t in re.split(r"([^\w.])", sql) if t != ""] + # split on anything that is not a word character or a square bracket, excluding dots + return [t for t in re.split(r"([^\w.\[\]])", sql) if t != ""] def scan(tokens):
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/dbapi2.py b/elasticapm/instrumentation/packages/dbapi2.py\n--- a/elasticapm/instrumentation/packages/dbapi2.py\n+++ b/elasticapm/instrumentation/packages/dbapi2.py\n@@ -76,8 +76,8 @@\n \n \n def tokenize(sql):\n- # split on anything that is not a word character, excluding dots\n- return [t for t in re.split(r\"([^\\w.])\", sql) if t != \"\"]\n+ # split on anything that is not a word character or a square bracket, excluding dots\n+ return [t for t in re.split(r\"([^\\w.\\[\\]])\", sql) if t != \"\"]\n \n \n def scan(tokens):\n", "issue": "dbapi2 fails to extract table name when using square brackets \n**Describe the bug**: ...\r\n\r\nQueries made to tables which requires escaping end up with the wrong span name.\r\n\r\nThe following spans are SELECTs from four different tables, but only two unique span names appear.\r\n![Screenshot 2023-10-31 at 19 13 46](https://github.com/elastic/apm-agent-python/assets/435885/5384fd4d-df13-4828-a9f2-b7e1c309f07f)\r\n\r\n**To Reproduce**\r\n\r\nImport package and call [extract_signature](https://github.com/elastic/apm-agent-python/blob/05332cd007560615b4421b1567659ff9f9634088/elasticapm/instrumentation/packages/dbapi2.py#L153):\r\n\r\n```python\r\n>>> from elasticapm.instrumentation.packages import dbapi2\r\n>>> dbapi2.extract_signature(\"SELECT username FROM user\")\r\n'SELECT FROM user'\r\n>>> dbapi2.extract_signature(\"SELECT username FROM [user]\")\r\n'SELECT FROM ['\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: [e.g. Linux]\r\n- Python version:\r\n- Framework and version [e.g. Django 2.1]:\r\n- APM Server version:\r\n- Agent version:\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options <!-- be careful not to post sensitive information -->\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your agent config options\r\n remember to mask any sensitive fields like tokens\r\n ```\r\n </details>\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your `requirements.txt`\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Provides classes to instrument dbapi2 providers\n\nhttps://www.python.org/dev/peps/pep-0249/\n\"\"\"\n\nimport re\n\nimport wrapt\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.encoding import force_text, shorten\n\n\nclass Literal(object):\n def __init__(self, literal_type, content) -> None:\n self.literal_type = literal_type\n self.content = content\n\n def __eq__(self, other):\n return isinstance(other, Literal) and self.literal_type == other.literal_type and self.content == other.content\n\n def __repr__(self):\n return \"<Literal {}{}{}>\".format(self.literal_type, self.content, self.literal_type)\n\n\ndef look_for_table(sql, keyword):\n tokens = tokenize(sql)\n table_name = _scan_for_table_with_tokens(tokens, keyword)\n if isinstance(table_name, Literal):\n table_name = table_name.content.strip(table_name.literal_type)\n return table_name\n\n\ndef _scan_for_table_with_tokens(tokens, keyword):\n seen_keyword = False\n for idx, lexeme in scan(tokens):\n if seen_keyword:\n if lexeme == \"(\":\n return _scan_for_table_with_tokens(tokens[idx:], keyword)\n else:\n return lexeme\n\n if isinstance(lexeme, str) and lexeme.upper() == keyword:\n seen_keyword = True\n\n\ndef tokenize(sql):\n # split on anything that is not a word character, excluding dots\n return [t for t in re.split(r\"([^\\w.])\", sql) if t != \"\"]\n\n\ndef scan(tokens):\n literal_start_idx = None\n literal_started = None\n prev_was_escape = False\n lexeme = []\n\n i = 0\n while i < len(tokens):\n token = tokens[i]\n if literal_start_idx:\n if prev_was_escape:\n prev_was_escape = False\n lexeme.append(token)\n else:\n if token == literal_started:\n if literal_started == \"'\" and len(tokens) > i + 1 and tokens[i + 1] == \"'\": # double quotes\n i += 1\n lexeme.append(\"'\")\n else:\n yield i, Literal(literal_started, \"\".join(lexeme))\n literal_start_idx = None\n literal_started = None\n lexeme = []\n else:\n if token == \"\\\\\":\n prev_was_escape = token\n else:\n prev_was_escape = False\n lexeme.append(token)\n elif literal_start_idx is None:\n if token in [\"'\", '\"', \"`\"]:\n literal_start_idx = i\n literal_started = token\n elif token == \"$\":\n # Postgres can use arbitrary characters between two $'s as a\n # literal separation token, e.g.: $fish$ literal $fish$\n # This part will detect that and skip over the literal.\n try:\n # Closing dollar of the opening quote,\n # i.e. the second $ in the first $fish$\n closing_dollar_idx = tokens.index(\"$\", i + 1)\n except ValueError:\n pass\n else:\n quote = tokens[i : closing_dollar_idx + 1]\n length = len(quote)\n # Opening dollar of the closing quote,\n # i.e. the first $ in the second $fish$\n closing_quote_idx = closing_dollar_idx + 1\n while True:\n try:\n closing_quote_idx = tokens.index(\"$\", closing_quote_idx)\n except ValueError:\n break\n if tokens[closing_quote_idx : closing_quote_idx + length] == quote:\n yield i, Literal(\n \"\".join(quote), \"\".join(tokens[closing_dollar_idx + 1 : closing_quote_idx])\n )\n i = closing_quote_idx + length\n break\n closing_quote_idx += 1\n else:\n if token != \" \":\n yield i, token\n i += 1\n\n if lexeme:\n yield i, lexeme\n\n\ndef extract_signature(sql):\n \"\"\"\n Extracts a minimal signature from a given SQL query\n :param sql: the SQL statement\n :return: a string representing the signature\n \"\"\"\n sql = force_text(sql)\n sql = sql.strip()\n first_space = sql.find(\" \")\n if first_space < 0:\n return sql\n\n second_space = sql.find(\" \", first_space + 1)\n\n sql_type = sql[0:first_space].upper()\n\n if sql_type in [\"INSERT\", \"DELETE\"]:\n keyword = \"INTO\" if sql_type == \"INSERT\" else \"FROM\"\n sql_type = sql_type + \" \" + keyword\n\n object_name = look_for_table(sql, keyword)\n elif sql_type in [\"CREATE\", \"DROP\"]:\n # 2nd word is part of SQL type\n sql_type = sql_type + sql[first_space:second_space]\n object_name = \"\"\n elif sql_type == \"UPDATE\":\n object_name = look_for_table(sql, \"UPDATE\")\n elif sql_type == \"SELECT\":\n # Name is first table\n try:\n sql_type = \"SELECT FROM\"\n object_name = look_for_table(sql, \"FROM\")\n except Exception:\n object_name = \"\"\n elif sql_type in [\"EXEC\", \"EXECUTE\"]:\n sql_type = \"EXECUTE\"\n end = second_space if second_space > first_space else len(sql)\n object_name = sql[first_space + 1 : end]\n elif sql_type == \"CALL\":\n first_paren = sql.find(\"(\", first_space)\n end = first_paren if first_paren > first_space else len(sql)\n procedure_name = sql[first_space + 1 : end].rstrip(\";\")\n object_name = procedure_name + \"()\"\n else:\n # No name\n object_name = \"\"\n\n signature = \" \".join(filter(bool, [sql_type, object_name]))\n return signature\n\n\nQUERY_ACTION = \"query\"\nEXEC_ACTION = \"exec\"\nPROCEDURE_STATEMENTS = [\"EXEC\", \"EXECUTE\", \"CALL\"]\n\n\ndef extract_action_from_signature(signature, default):\n if signature.split(\" \")[0] in PROCEDURE_STATEMENTS:\n return EXEC_ACTION\n return default\n\n\nclass CursorProxy(wrapt.ObjectProxy):\n provider_name = None\n DML_QUERIES = (\"INSERT\", \"DELETE\", \"UPDATE\")\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(CursorProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info or {}\n\n def callproc(self, procname, params=None):\n return self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION)\n\n def execute(self, sql, params=None):\n return self._trace_sql(self.__wrapped__.execute, sql, params)\n\n def executemany(self, sql, param_list):\n return self._trace_sql(self.__wrapped__.executemany, sql, param_list)\n\n def _bake_sql(self, sql):\n \"\"\"\n Method to turn the \"sql\" argument into a string. Most database backends simply return\n the given object, as it is already a string\n \"\"\"\n return sql\n\n def _trace_sql(self, method, sql, params, action=QUERY_ACTION):\n sql_string = self._bake_sql(sql)\n if action == EXEC_ACTION:\n signature = sql_string + \"()\"\n else:\n signature = self.extract_signature(sql_string)\n action = extract_action_from_signature(signature, action)\n\n # Truncate sql_string to 10000 characters to prevent large queries from\n # causing an error to APM server.\n sql_string = shorten(sql_string, string_length=10000)\n\n with capture_span(\n signature,\n span_type=\"db\",\n span_subtype=self.provider_name,\n span_action=action,\n extra={\n \"db\": {\"type\": \"sql\", \"statement\": sql_string, \"instance\": getattr(self, \"_self_database\", None)},\n \"destination\": self._self_destination_info,\n },\n skip_frames=1,\n leaf=True,\n ) as span:\n if params is None:\n result = method(sql)\n else:\n result = method(sql, params)\n # store \"rows affected\", but only for DML queries like insert/update/delete\n if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES):\n span.update_context(\"db\", {\"rows_affected\": self.rowcount})\n return result\n\n def extract_signature(self, sql):\n raise NotImplementedError()\n\n\nclass ConnectionProxy(wrapt.ObjectProxy):\n cursor_proxy = CursorProxy\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(ConnectionProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info\n\n def cursor(self, *args, **kwargs):\n return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info)\n\n\nclass DbApi2Instrumentation(AbstractInstrumentedModule):\n connect_method = None\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n return ConnectionProxy(wrapped(*args, **kwargs))\n\n def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n # Contrasting to the superclass implementation, we *always* want to\n # return a proxied connection, even if there is no ongoing elasticapm\n # transaction yet. This ensures that we instrument the cursor once\n # the transaction started.\n return self.call(module, method, wrapped, instance, args, kwargs)\n", "path": "elasticapm/instrumentation/packages/dbapi2.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Provides classes to instrument dbapi2 providers\n\nhttps://www.python.org/dev/peps/pep-0249/\n\"\"\"\n\nimport re\n\nimport wrapt\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.encoding import force_text, shorten\n\n\nclass Literal(object):\n def __init__(self, literal_type, content) -> None:\n self.literal_type = literal_type\n self.content = content\n\n def __eq__(self, other):\n return isinstance(other, Literal) and self.literal_type == other.literal_type and self.content == other.content\n\n def __repr__(self):\n return \"<Literal {}{}{}>\".format(self.literal_type, self.content, self.literal_type)\n\n\ndef look_for_table(sql, keyword):\n tokens = tokenize(sql)\n table_name = _scan_for_table_with_tokens(tokens, keyword)\n if isinstance(table_name, Literal):\n table_name = table_name.content.strip(table_name.literal_type)\n return table_name\n\n\ndef _scan_for_table_with_tokens(tokens, keyword):\n seen_keyword = False\n for idx, lexeme in scan(tokens):\n if seen_keyword:\n if lexeme == \"(\":\n return _scan_for_table_with_tokens(tokens[idx:], keyword)\n else:\n return lexeme\n\n if isinstance(lexeme, str) and lexeme.upper() == keyword:\n seen_keyword = True\n\n\ndef tokenize(sql):\n # split on anything that is not a word character or a square bracket, excluding dots\n return [t for t in re.split(r\"([^\\w.\\[\\]])\", sql) if t != \"\"]\n\n\ndef scan(tokens):\n literal_start_idx = None\n literal_started = None\n prev_was_escape = False\n lexeme = []\n\n i = 0\n while i < len(tokens):\n token = tokens[i]\n if literal_start_idx:\n if prev_was_escape:\n prev_was_escape = False\n lexeme.append(token)\n else:\n if token == literal_started:\n if literal_started == \"'\" and len(tokens) > i + 1 and tokens[i + 1] == \"'\": # double quotes\n i += 1\n lexeme.append(\"'\")\n else:\n yield i, Literal(literal_started, \"\".join(lexeme))\n literal_start_idx = None\n literal_started = None\n lexeme = []\n else:\n if token == \"\\\\\":\n prev_was_escape = token\n else:\n prev_was_escape = False\n lexeme.append(token)\n elif literal_start_idx is None:\n if token in [\"'\", '\"', \"`\"]:\n literal_start_idx = i\n literal_started = token\n elif token == \"$\":\n # Postgres can use arbitrary characters between two $'s as a\n # literal separation token, e.g.: $fish$ literal $fish$\n # This part will detect that and skip over the literal.\n try:\n # Closing dollar of the opening quote,\n # i.e. the second $ in the first $fish$\n closing_dollar_idx = tokens.index(\"$\", i + 1)\n except ValueError:\n pass\n else:\n quote = tokens[i : closing_dollar_idx + 1]\n length = len(quote)\n # Opening dollar of the closing quote,\n # i.e. the first $ in the second $fish$\n closing_quote_idx = closing_dollar_idx + 1\n while True:\n try:\n closing_quote_idx = tokens.index(\"$\", closing_quote_idx)\n except ValueError:\n break\n if tokens[closing_quote_idx : closing_quote_idx + length] == quote:\n yield i, Literal(\n \"\".join(quote), \"\".join(tokens[closing_dollar_idx + 1 : closing_quote_idx])\n )\n i = closing_quote_idx + length\n break\n closing_quote_idx += 1\n else:\n if token != \" \":\n yield i, token\n i += 1\n\n if lexeme:\n yield i, lexeme\n\n\ndef extract_signature(sql):\n \"\"\"\n Extracts a minimal signature from a given SQL query\n :param sql: the SQL statement\n :return: a string representing the signature\n \"\"\"\n sql = force_text(sql)\n sql = sql.strip()\n first_space = sql.find(\" \")\n if first_space < 0:\n return sql\n\n second_space = sql.find(\" \", first_space + 1)\n\n sql_type = sql[0:first_space].upper()\n\n if sql_type in [\"INSERT\", \"DELETE\"]:\n keyword = \"INTO\" if sql_type == \"INSERT\" else \"FROM\"\n sql_type = sql_type + \" \" + keyword\n\n object_name = look_for_table(sql, keyword)\n elif sql_type in [\"CREATE\", \"DROP\"]:\n # 2nd word is part of SQL type\n sql_type = sql_type + sql[first_space:second_space]\n object_name = \"\"\n elif sql_type == \"UPDATE\":\n object_name = look_for_table(sql, \"UPDATE\")\n elif sql_type == \"SELECT\":\n # Name is first table\n try:\n sql_type = \"SELECT FROM\"\n object_name = look_for_table(sql, \"FROM\")\n except Exception:\n object_name = \"\"\n elif sql_type in [\"EXEC\", \"EXECUTE\"]:\n sql_type = \"EXECUTE\"\n end = second_space if second_space > first_space else len(sql)\n object_name = sql[first_space + 1 : end]\n elif sql_type == \"CALL\":\n first_paren = sql.find(\"(\", first_space)\n end = first_paren if first_paren > first_space else len(sql)\n procedure_name = sql[first_space + 1 : end].rstrip(\";\")\n object_name = procedure_name + \"()\"\n else:\n # No name\n object_name = \"\"\n\n signature = \" \".join(filter(bool, [sql_type, object_name]))\n return signature\n\n\nQUERY_ACTION = \"query\"\nEXEC_ACTION = \"exec\"\nPROCEDURE_STATEMENTS = [\"EXEC\", \"EXECUTE\", \"CALL\"]\n\n\ndef extract_action_from_signature(signature, default):\n if signature.split(\" \")[0] in PROCEDURE_STATEMENTS:\n return EXEC_ACTION\n return default\n\n\nclass CursorProxy(wrapt.ObjectProxy):\n provider_name = None\n DML_QUERIES = (\"INSERT\", \"DELETE\", \"UPDATE\")\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(CursorProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info or {}\n\n def callproc(self, procname, params=None):\n return self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION)\n\n def execute(self, sql, params=None):\n return self._trace_sql(self.__wrapped__.execute, sql, params)\n\n def executemany(self, sql, param_list):\n return self._trace_sql(self.__wrapped__.executemany, sql, param_list)\n\n def _bake_sql(self, sql):\n \"\"\"\n Method to turn the \"sql\" argument into a string. Most database backends simply return\n the given object, as it is already a string\n \"\"\"\n return sql\n\n def _trace_sql(self, method, sql, params, action=QUERY_ACTION):\n sql_string = self._bake_sql(sql)\n if action == EXEC_ACTION:\n signature = sql_string + \"()\"\n else:\n signature = self.extract_signature(sql_string)\n action = extract_action_from_signature(signature, action)\n\n # Truncate sql_string to 10000 characters to prevent large queries from\n # causing an error to APM server.\n sql_string = shorten(sql_string, string_length=10000)\n\n with capture_span(\n signature,\n span_type=\"db\",\n span_subtype=self.provider_name,\n span_action=action,\n extra={\n \"db\": {\"type\": \"sql\", \"statement\": sql_string, \"instance\": getattr(self, \"_self_database\", None)},\n \"destination\": self._self_destination_info,\n },\n skip_frames=1,\n leaf=True,\n ) as span:\n if params is None:\n result = method(sql)\n else:\n result = method(sql, params)\n # store \"rows affected\", but only for DML queries like insert/update/delete\n if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES):\n span.update_context(\"db\", {\"rows_affected\": self.rowcount})\n return result\n\n def extract_signature(self, sql):\n raise NotImplementedError()\n\n\nclass ConnectionProxy(wrapt.ObjectProxy):\n cursor_proxy = CursorProxy\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(ConnectionProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info\n\n def cursor(self, *args, **kwargs):\n return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info)\n\n\nclass DbApi2Instrumentation(AbstractInstrumentedModule):\n connect_method = None\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n return ConnectionProxy(wrapped(*args, **kwargs))\n\n def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n # Contrasting to the superclass implementation, we *always* want to\n # return a proxied connection, even if there is no ongoing elasticapm\n # transaction yet. This ensures that we instrument the cursor once\n # the transaction started.\n return self.call(module, method, wrapped, instance, args, kwargs)\n", "path": "elasticapm/instrumentation/packages/dbapi2.py"}]}
3,919
169
gh_patches_debug_67110
rasdani/github-patches
git_diff
beeware__toga-1499
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Window is always resizable, minimizable and closeable on windows **Describe the bug** Window seems to ignore `resizable`, `minimizable` and `closeable` args on OS Windows: if they're set to `False`, window still can be resized, minimized and closed. **To Reproduce** Steps to reproduce the behavior: 1. Pass `minimizable=False`, `resizable=False` and `closeable=False` to `Window.__init__`. 2. Resize it, minimize it, close it. **Expected behavior** Window should keep its size, and all of titlebar buttons should be hidden (minimize and close due to corresponding args, and fullscreen due to resizable). **Simple example** ```python import toga class App(toga.App): def open_window(self, widget: toga.Widget): # resizable, minimizable and closeable are False window = toga.Window( "window id", "window title", size=(320, 240), resizeable=False, minimizable=False, closeable=False ) window.content = toga.Box() self.windows += window window.show() def startup(self): # resizable and minimizable are False self.main_window = toga.MainWindow( title=self.formal_name, factory=self.factory, resizeable=False, minimizable=False ) box = toga.Box() btn = toga.Button("Open second window", on_press=self.open_window) box.add(btn) self.main_window.content = box self.main_window.show() def main(): App("testing", "testing").main_loop() if __name__ == "__main__": main() ``` **Screenshots** ![resizing](https://user-images.githubusercontent.com/2825638/152692206-4b1f3a1b-0945-4895-a71b-31c379e26fe6.gif) **Environment:** - Operating System: Windows 10 - Python version: 3.10.1 - Software versions: - Toga: 0.3.0.dev30 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/winforms/toga_winforms/window.py` Content: ``` 1 from toga import GROUP_BREAK, SECTION_BREAK 2 3 from .libs import Point, Size, WinForms 4 5 6 class WinFormsViewport: 7 def __init__(self, native, frame): 8 self.native = native 9 self.frame = frame 10 self.baseline_dpi = 96 11 12 @property 13 def width(self): 14 # Treat `native=None` as a 0x0 viewport 15 if self.native is None: 16 return 0 17 return self.native.ClientSize.Width 18 19 @property 20 def height(self): 21 if self.native is None: 22 return 0 23 # Subtract any vertical shift of the frame. This is to allow 24 # for toolbars, or any other viewport-level decoration. 25 return self.native.ClientSize.Height - self.frame.vertical_shift 26 27 @property 28 def dpi(self): 29 if self.native is None: 30 return self.baseline_dpi 31 return self.native.CreateGraphics().DpiX 32 33 34 class Window: 35 def __init__(self, interface, title, position, size): 36 self.interface = interface 37 self.interface._impl = self 38 39 # Winforms close handling is caught on the FormClosing handler. To allow 40 # for async close handling, we need to be able to abort this close 41 # event, and then manually cause the close as part of the async result 42 # handling. However, this then causes an is_closing event, which we need 43 # to ignore. The `_is_closing` flag lets us easily identify if the 44 # window is in the process of closing. 45 self._is_closing = False 46 47 self.native = WinForms.Form() 48 self.native.interface = self.interface 49 self.native.FormClosing += self.winforms_FormClosing 50 51 self.set_title(title) 52 self.set_size(size) 53 self.set_position(position) 54 55 self.toolbar_native = None 56 self.toolbar_items = None 57 if self.native.interface.resizeable: 58 self.native.Resize += self.winforms_resize 59 else: 60 self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle 61 self.native.MaximizeBox = False 62 63 def create_toolbar(self): 64 self.toolbar_native = WinForms.ToolStrip() 65 for cmd in self.interface.toolbar: 66 if cmd == GROUP_BREAK: 67 item = WinForms.ToolStripSeparator() 68 elif cmd == SECTION_BREAK: 69 item = WinForms.ToolStripSeparator() 70 else: 71 if cmd.icon is not None: 72 native_icon = cmd.icon._impl.native 73 item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap()) 74 else: 75 item = WinForms.ToolStripMenuItem(cmd.label) 76 item.Click += cmd._impl.as_handler() 77 cmd._impl.native.append(item) 78 self.toolbar_native.Items.Add(item) 79 80 def get_position(self): 81 return (self.native.Location.X, self.native.Location.Y) 82 83 def set_position(self, position): 84 self.native.Location = Point(*position) 85 86 def get_size(self): 87 return (self.native.ClientSize.Width, self.native.ClientSize.Height) 88 89 def set_size(self, size): 90 self.native.ClientSize = Size(*size) 91 92 def set_app(self, app): 93 if app is None: 94 return 95 icon_impl = app.interface.icon._impl 96 if icon_impl is None: 97 return 98 self.native.Icon = icon_impl.native 99 100 @property 101 def vertical_shift(self): 102 # vertical shift is the toolbar height or 0 103 result = 0 104 try: 105 result += self.native.interface._impl.toolbar_native.Height 106 except AttributeError: 107 pass 108 try: 109 result += self.native.interface._impl.native.MainMenuStrip.Height 110 except AttributeError: 111 pass 112 return result 113 114 def set_content(self, widget): 115 if self.toolbar_native: 116 self.native.Controls.Add(self.toolbar_native) 117 # Create the lookup table of menu items, 118 # then force the creation of the menus. 119 self.native.Controls.Add(widget.native) 120 121 # Set the widget's viewport to be based on the window's content. 122 widget.viewport = WinFormsViewport(native=self.native, frame=self) 123 widget.frame = self 124 125 # Add all children to the content widget. 126 for child in widget.interface.children: 127 child._impl.container = widget 128 129 def get_title(self): 130 return self.native.Text 131 132 def set_title(self, title): 133 self.native.Text = title 134 135 def show(self): 136 # The first render of the content will establish the 137 # minimum possible content size; use that to enforce 138 # a minimum window size. 139 TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight 140 # Now that the content is visible, we can do our initial hinting, 141 # and use that as the basis for setting the minimum window size. 142 self.interface.content._impl.rehint() 143 self.interface.content.style.layout( 144 self.interface.content, 145 WinFormsViewport(native=None, frame=None), 146 ) 147 self.native.MinimumSize = Size( 148 int(self.interface.content.layout.width), 149 int(self.interface.content.layout.height) + TITLEBAR_HEIGHT 150 ) 151 self.interface.content.refresh() 152 153 if self.interface is not self.interface.app._main_window: 154 self.native.Icon = self.interface.app.icon.bind(self.interface.factory).native 155 self.native.Show() 156 157 def winforms_FormClosing(self, sender, event): 158 # If the app is exiting, or a manual close has been requested, 159 # don't get confirmation; just close. 160 if not self.interface.app._impl._is_exiting and not self._is_closing: 161 if not self.interface.closeable: 162 # Closeability is implemented by shortcutting the close handler. 163 event.Cancel = True 164 elif self.interface.on_close: 165 # If there is an on_close event handler, process it; 166 # but then cancel the close event. If the result of 167 # on_close handling indicates the window should close, 168 # then it will be manually triggered as part of that 169 # result handling. 170 self.interface.on_close(self) 171 event.Cancel = True 172 173 def set_full_screen(self, is_full_screen): 174 self.interface.factory.not_implemented('Window.set_full_screen()') 175 176 def set_on_close(self, handler): 177 pass 178 179 def close(self): 180 self._is_closing = True 181 self.native.Close() 182 183 def winforms_resize(self, sender, args): 184 if self.interface.content: 185 # Re-layout the content 186 self.interface.content.refresh() 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py --- a/src/winforms/toga_winforms/window.py +++ b/src/winforms/toga_winforms/window.py @@ -48,6 +48,8 @@ self.native.interface = self.interface self.native.FormClosing += self.winforms_FormClosing + self.native.MinimizeBox = self.native.interface.minimizable + self.set_title(title) self.set_size(size) self.set_position(position)
{"golden_diff": "diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py\n--- a/src/winforms/toga_winforms/window.py\n+++ b/src/winforms/toga_winforms/window.py\n@@ -48,6 +48,8 @@\n self.native.interface = self.interface\n self.native.FormClosing += self.winforms_FormClosing\n \n+ self.native.MinimizeBox = self.native.interface.minimizable\n+\n self.set_title(title)\n self.set_size(size)\n self.set_position(position)\n", "issue": "Window is always resizable, minimizable and closeable on windows\n**Describe the bug**\r\nWindow seems to ignore `resizable`, `minimizable` and `closeable` args on OS Windows: if they're set to `False`, window still can be resized, minimized and closed.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Pass `minimizable=False`, `resizable=False` and `closeable=False` to `Window.__init__`.\r\n2. Resize it, minimize it, close it.\r\n\r\n**Expected behavior**\r\nWindow should keep its size, and all of titlebar buttons should be hidden (minimize and close due to corresponding args, and fullscreen due to resizable).\r\n\r\n**Simple example**\r\n```python\r\nimport toga\r\n\r\n\r\nclass App(toga.App):\r\n def open_window(self, widget: toga.Widget):\r\n # resizable, minimizable and closeable are False\r\n window = toga.Window(\r\n \"window id\", \"window title\", size=(320, 240), resizeable=False, minimizable=False, closeable=False\r\n )\r\n window.content = toga.Box()\r\n self.windows += window\r\n window.show()\r\n\r\n def startup(self):\r\n # resizable and minimizable are False\r\n self.main_window = toga.MainWindow(\r\n title=self.formal_name, factory=self.factory, resizeable=False, minimizable=False\r\n )\r\n\r\n box = toga.Box()\r\n btn = toga.Button(\"Open second window\", on_press=self.open_window)\r\n box.add(btn)\r\n self.main_window.content = box\r\n\r\n self.main_window.show()\r\n\r\n\r\ndef main():\r\n App(\"testing\", \"testing\").main_loop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n```\r\n\r\n**Screenshots**\r\n![resizing](https://user-images.githubusercontent.com/2825638/152692206-4b1f3a1b-0945-4895-a71b-31c379e26fe6.gif)\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows 10\r\n - Python version: 3.10.1\r\n - Software versions:\r\n - Toga: 0.3.0.dev30\n", "before_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Point, Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface, title, position, size):\n self.interface = interface\n self.interface._impl = self\n\n # Winforms close handling is caught on the FormClosing handler. To allow\n # for async close handling, we need to be able to abort this close\n # event, and then manually cause the close as part of the async result\n # handling. However, this then causes an is_closing event, which we need\n # to ignore. The `_is_closing` flag lets us easily identify if the\n # window is in the process of closing.\n self._is_closing = False\n\n self.native = WinForms.Form()\n self.native.interface = self.interface\n self.native.FormClosing += self.winforms_FormClosing\n\n self.set_title(title)\n self.set_size(size)\n self.set_position(position)\n\n self.toolbar_native = None\n self.toolbar_items = None\n if self.native.interface.resizeable:\n self.native.Resize += self.winforms_resize\n else:\n self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle\n self.native.MaximizeBox = False\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def get_position(self):\n return (self.native.Location.X, self.native.Location.Y)\n\n def set_position(self, position):\n self.native.Location = Point(*position)\n\n def get_size(self):\n return (self.native.ClientSize.Width, self.native.ClientSize.Height)\n\n def set_size(self, size):\n self.native.ClientSize = Size(*size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def get_title(self):\n return self.native.Text\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n if self.interface is not self.interface.app._main_window:\n self.native.Icon = self.interface.app.icon.bind(self.interface.factory).native\n self.native.Show()\n\n def winforms_FormClosing(self, sender, event):\n # If the app is exiting, or a manual close has been requested,\n # don't get confirmation; just close.\n if not self.interface.app._impl._is_exiting and not self._is_closing:\n if not self.interface.closeable:\n # Closeability is implemented by shortcutting the close handler.\n event.Cancel = True\n elif self.interface.on_close:\n # If there is an on_close event handler, process it;\n # but then cancel the close event. If the result of\n # on_close handling indicates the window should close,\n # then it will be manually triggered as part of that\n # result handling.\n self.interface.on_close(self)\n event.Cancel = True\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def set_on_close(self, handler):\n pass\n\n def close(self):\n self._is_closing = True\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n", "path": "src/winforms/toga_winforms/window.py"}], "after_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Point, Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface, title, position, size):\n self.interface = interface\n self.interface._impl = self\n\n # Winforms close handling is caught on the FormClosing handler. To allow\n # for async close handling, we need to be able to abort this close\n # event, and then manually cause the close as part of the async result\n # handling. However, this then causes an is_closing event, which we need\n # to ignore. The `_is_closing` flag lets us easily identify if the\n # window is in the process of closing.\n self._is_closing = False\n\n self.native = WinForms.Form()\n self.native.interface = self.interface\n self.native.FormClosing += self.winforms_FormClosing\n\n self.native.MinimizeBox = self.native.interface.minimizable\n\n self.set_title(title)\n self.set_size(size)\n self.set_position(position)\n\n self.toolbar_native = None\n self.toolbar_items = None\n if self.native.interface.resizeable:\n self.native.Resize += self.winforms_resize\n else:\n self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle\n self.native.MaximizeBox = False\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def get_position(self):\n return (self.native.Location.X, self.native.Location.Y)\n\n def set_position(self, position):\n self.native.Location = Point(*position)\n\n def get_size(self):\n return (self.native.ClientSize.Width, self.native.ClientSize.Height)\n\n def set_size(self, size):\n self.native.ClientSize = Size(*size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def get_title(self):\n return self.native.Text\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n if self.interface is not self.interface.app._main_window:\n self.native.Icon = self.interface.app.icon.bind(self.interface.factory).native\n self.native.Show()\n\n def winforms_FormClosing(self, sender, event):\n # If the app is exiting, or a manual close has been requested,\n # don't get confirmation; just close.\n if not self.interface.app._impl._is_exiting and not self._is_closing:\n if not self.interface.closeable:\n # Closeability is implemented by shortcutting the close handler.\n event.Cancel = True\n elif self.interface.on_close:\n # If there is an on_close event handler, process it;\n # but then cancel the close event. If the result of\n # on_close handling indicates the window should close,\n # then it will be manually triggered as part of that\n # result handling.\n self.interface.on_close(self)\n event.Cancel = True\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def set_on_close(self, handler):\n pass\n\n def close(self):\n self._is_closing = True\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n", "path": "src/winforms/toga_winforms/window.py"}]}
2,550
113
gh_patches_debug_23487
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1205
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [V3 Cleanup] Handle getting coroutine prefixes for [p]cleanup bot ### Type - Bug ### Description Coroutine prefixes aren't implemented for `cleanup bot`. This is kind of important since Red's default prefix is a coroutine. This is pretty easy; we can just use `await bot.get_prefix(ctx.message)`, since this actually just returns a list of server prefixes. [Relevant Sentry issue](https://sentry.io/will-tekulve/bot-development/issues/420947439/) ``` TypeError: argument of type 'coroutine' is not iterable File "discord/ext/commands/core.py", line 62, in wrapped ret = yield from coro(*args, **kwargs) File "cleanup/cleanup.py", line 234, in cleanup_bot if '' in prefixes: Exception in command 'cleanup bot' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redbot/cogs/cleanup/cleanup.py` Content: ``` 1 import asyncio 2 import re 3 4 import discord 5 from discord.ext import commands 6 7 from redbot.core import checks 8 from redbot.core.bot import Red 9 from redbot.core.i18n import CogI18n 10 from redbot.core.utils.mod import slow_deletion, mass_purge 11 from redbot.cogs.mod.log import log 12 from redbot.core.context import RedContext 13 14 _ = CogI18n("Cleanup", __file__) 15 16 17 class Cleanup: 18 """Commands for cleaning messages""" 19 20 def __init__(self, bot: Red): 21 self.bot = bot 22 23 @commands.group() 24 @checks.mod_or_permissions(manage_messages=True) 25 async def cleanup(self, ctx: RedContext): 26 """Deletes messages.""" 27 if ctx.invoked_subcommand is None: 28 await ctx.send_help() 29 30 @cleanup.command() 31 @commands.guild_only() 32 @commands.bot_has_permissions(manage_messages=True) 33 async def text(self, ctx: commands.Context, text: str, number: int): 34 """Deletes last X messages matching the specified text. 35 36 Example: 37 cleanup text \"test\" 5 38 39 Remember to use double quotes.""" 40 41 channel = ctx.channel 42 author = ctx.author 43 is_bot = self.bot.user.bot 44 45 def check(m): 46 if text in m.content: 47 return True 48 elif m == ctx.message: 49 return True 50 else: 51 return False 52 53 to_delete = [ctx.message] 54 too_old = False 55 tmp = ctx.message 56 57 while not too_old and len(to_delete) - 1 < number: 58 async for message in channel.history(limit=1000, 59 before=tmp): 60 if len(to_delete) - 1 < number and check(message) and\ 61 (ctx.message.created_at - message.created_at).days < 14: 62 to_delete.append(message) 63 elif (ctx.message.created_at - message.created_at).days >= 14: 64 too_old = True 65 break 66 elif len(to_delete) >= number: 67 break 68 tmp = message 69 70 reason = "{}({}) deleted {} messages "\ 71 " containing '{}' in channel {}".format(author.name, 72 author.id, len(to_delete), text, channel.id) 73 log.info(reason) 74 75 if is_bot: 76 await mass_purge(to_delete, channel) 77 else: 78 await slow_deletion(to_delete) 79 80 @cleanup.command() 81 @commands.guild_only() 82 @commands.bot_has_permissions(manage_messages=True) 83 async def user(self, ctx: commands.Context, user: discord.Member or int, number: int): 84 """Deletes last X messages from specified user. 85 86 Examples: 87 cleanup user @\u200bTwentysix 2 88 cleanup user Red 6""" 89 90 channel = ctx.channel 91 author = ctx.author 92 is_bot = self.bot.user.bot 93 94 def check(m): 95 if isinstance(user, discord.Member) and m.author == user: 96 return True 97 elif m.author.id == user: # Allow finding messages based on an ID 98 return True 99 elif m == ctx.message: 100 return True 101 else: 102 return False 103 104 to_delete = [] 105 too_old = False 106 tmp = ctx.message 107 108 while not too_old and len(to_delete) - 1 < number: 109 async for message in channel.history(limit=1000, 110 before=tmp): 111 if len(to_delete) - 1 < number and check(message) and\ 112 (ctx.message.created_at - message.created_at).days < 14: 113 to_delete.append(message) 114 elif (ctx.message.created_at - message.created_at).days >= 14: 115 too_old = True 116 break 117 elif len(to_delete) >= number: 118 break 119 tmp = message 120 reason = "{}({}) deleted {} messages "\ 121 " made by {}({}) in channel {}"\ 122 "".format(author.name, author.id, len(to_delete), 123 user.name, user.id, channel.name) 124 log.info(reason) 125 126 if is_bot: 127 # For whatever reason the purge endpoint requires manage_messages 128 await mass_purge(to_delete, channel) 129 else: 130 await slow_deletion(to_delete) 131 132 @cleanup.command() 133 @commands.guild_only() 134 @commands.bot_has_permissions(manage_messages=True) 135 async def after(self, ctx: commands.Context, message_id: int): 136 """Deletes all messages after specified message 137 138 To get a message id, enable developer mode in Discord's 139 settings, 'appearance' tab. Then right click a message 140 and copy its id. 141 142 This command only works on bots running as bot accounts. 143 """ 144 145 channel = ctx.channel 146 author = ctx.author 147 is_bot = self.bot.user.bot 148 149 if not is_bot: 150 await ctx.send(_("This command can only be used on bots with " 151 "bot accounts.")) 152 return 153 154 after = await channel.get_message(message_id) 155 156 if not after: 157 await ctx.send(_("Message not found.")) 158 return 159 160 to_delete = [] 161 162 async for message in channel.history(after=after): 163 if (ctx.message.created_at - message.created_at).days < 14: 164 # Only add messages that are less than 165 # 14 days old to the deletion queue 166 to_delete.append(message) 167 168 reason = "{}({}) deleted {} messages in channel {}"\ 169 "".format(author.name, author.id, 170 len(to_delete), channel.name) 171 log.info(reason) 172 173 await mass_purge(to_delete, channel) 174 175 @cleanup.command() 176 @commands.guild_only() 177 @commands.bot_has_permissions(manage_messages=True) 178 async def messages(self, ctx: commands.Context, number: int): 179 """Deletes last X messages. 180 181 Example: 182 cleanup messages 26""" 183 184 channel = ctx.channel 185 author = ctx.author 186 187 is_bot = self.bot.user.bot 188 189 to_delete = [] 190 tmp = ctx.message 191 192 done = False 193 194 while len(to_delete) - 1 < number and not done: 195 async for message in channel.history(limit=1000, before=tmp): 196 if len(to_delete) - 1 < number and \ 197 (ctx.message.created_at - message.created_at).days < 14: 198 to_delete.append(message) 199 elif (ctx.message.created_at - message.created_at).days >= 14: 200 done = True 201 break 202 tmp = message 203 204 reason = "{}({}) deleted {} messages in channel {}"\ 205 "".format(author.name, author.id, 206 number, channel.name) 207 log.info(reason) 208 209 if is_bot: 210 await mass_purge(to_delete, channel) 211 else: 212 await slow_deletion(to_delete) 213 214 @cleanup.command(name='bot') 215 @commands.guild_only() 216 @commands.bot_has_permissions(manage_messages=True) 217 async def cleanup_bot(self, ctx: commands.Context, number: int): 218 """Cleans up command messages and messages from the bot""" 219 220 channel = ctx.message.channel 221 author = ctx.message.author 222 is_bot = self.bot.user.bot 223 224 prefixes = self.bot.command_prefix 225 if isinstance(prefixes, str): 226 prefixes = [prefixes] 227 elif callable(prefixes): 228 if asyncio.iscoroutine(prefixes): 229 await ctx.send(_('Coroutine prefixes not yet implemented.')) 230 return 231 prefixes = prefixes(self.bot, ctx.message) 232 233 # In case some idiot sets a null prefix 234 if '' in prefixes: 235 prefixes.remove('') 236 237 def check(m): 238 if m.author.id == self.bot.user.id: 239 return True 240 elif m == ctx.message: 241 return True 242 p = discord.utils.find(m.content.startswith, prefixes) 243 if p and len(p) > 0: 244 return m.content[len(p):].startswith(tuple(self.bot.commands)) 245 return False 246 247 to_delete = [ctx.message] 248 too_old = False 249 tmp = ctx.message 250 251 while not too_old and len(to_delete) - 1 < number: 252 async for message in channel.history(limit=1000, before=tmp): 253 if len(to_delete) - 1 < number and check(message) and\ 254 (ctx.message.created_at - message.created_at).days < 14: 255 to_delete.append(message) 256 elif (ctx.message.created_at - message.created_at).days >= 14: 257 too_old = True 258 break 259 elif len(to_delete) >= number: 260 break 261 tmp = message 262 263 reason = "{}({}) deleted {} "\ 264 " command messages in channel {}"\ 265 "".format(author.name, author.id, len(to_delete), 266 channel.name) 267 log.info(reason) 268 269 if is_bot: 270 await mass_purge(to_delete, channel) 271 else: 272 await slow_deletion(to_delete) 273 274 @cleanup.command(name='self') 275 async def cleanup_self(self, ctx: commands.Context, number: int, match_pattern: str = None): 276 """Cleans up messages owned by the bot. 277 278 By default, all messages are cleaned. If a third argument is specified, 279 it is used for pattern matching: If it begins with r( and ends with ), 280 then it is interpreted as a regex, and messages that match it are 281 deleted. Otherwise, it is used in a simple substring test. 282 283 Some helpful regex flags to include in your pattern: 284 Dots match newlines: (?s); Ignore case: (?i); Both: (?si) 285 """ 286 channel = ctx.channel 287 author = ctx.message.author 288 is_bot = self.bot.user.bot 289 290 # You can always delete your own messages, this is needed to purge 291 can_mass_purge = False 292 if type(author) is discord.Member: 293 me = ctx.guild.me 294 can_mass_purge = channel.permissions_for(me).manage_messages 295 296 use_re = (match_pattern and match_pattern.startswith('r(') and 297 match_pattern.endswith(')')) 298 299 if use_re: 300 match_pattern = match_pattern[1:] # strip 'r' 301 match_re = re.compile(match_pattern) 302 303 def content_match(c): 304 return bool(match_re.match(c)) 305 elif match_pattern: 306 def content_match(c): 307 return match_pattern in c 308 else: 309 def content_match(_): 310 return True 311 312 def check(m): 313 if m.author.id != self.bot.user.id: 314 return False 315 elif content_match(m.content): 316 return True 317 return False 318 319 to_delete = [] 320 # Selfbot convenience, delete trigger message 321 if author == self.bot.user: 322 to_delete.append(ctx.message) 323 number += 1 324 too_old = False 325 tmp = ctx.message 326 while not too_old and len(to_delete) < number: 327 async for message in channel.history(limit=1000, before=tmp): 328 if len(to_delete) < number and check(message) and\ 329 (ctx.message.created_at - message.created_at).days < 14: 330 to_delete.append(message) 331 elif (ctx.message.created_at - message.created_at).days >= 14: 332 # Found a message that is 14 or more days old, stop here 333 too_old = True 334 break 335 elif len(to_delete) >= number: 336 break 337 tmp = message 338 339 if channel.name: 340 channel_name = 'channel ' + channel.name 341 else: 342 channel_name = str(channel) 343 344 reason = "{}({}) deleted {} messages "\ 345 "sent by the bot in {}"\ 346 "".format(author.name, author.id, len(to_delete), 347 channel_name) 348 log.info(reason) 349 350 if is_bot and can_mass_purge: 351 await mass_purge(to_delete, channel) 352 else: 353 await slow_deletion(to_delete) 354 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redbot/cogs/cleanup/cleanup.py b/redbot/cogs/cleanup/cleanup.py --- a/redbot/cogs/cleanup/cleanup.py +++ b/redbot/cogs/cleanup/cleanup.py @@ -221,14 +221,9 @@ author = ctx.message.author is_bot = self.bot.user.bot - prefixes = self.bot.command_prefix + prefixes = await self.bot.get_prefix(ctx.message) # This returns all server prefixes if isinstance(prefixes, str): prefixes = [prefixes] - elif callable(prefixes): - if asyncio.iscoroutine(prefixes): - await ctx.send(_('Coroutine prefixes not yet implemented.')) - return - prefixes = prefixes(self.bot, ctx.message) # In case some idiot sets a null prefix if '' in prefixes: @@ -241,7 +236,8 @@ return True p = discord.utils.find(m.content.startswith, prefixes) if p and len(p) > 0: - return m.content[len(p):].startswith(tuple(self.bot.commands)) + cmd_name = m.content[len(p):].split(' ')[0] + return bool(self.bot.get_command(cmd_name)) return False to_delete = [ctx.message]
{"golden_diff": "diff --git a/redbot/cogs/cleanup/cleanup.py b/redbot/cogs/cleanup/cleanup.py\n--- a/redbot/cogs/cleanup/cleanup.py\n+++ b/redbot/cogs/cleanup/cleanup.py\n@@ -221,14 +221,9 @@\n author = ctx.message.author\n is_bot = self.bot.user.bot\n \n- prefixes = self.bot.command_prefix\n+ prefixes = await self.bot.get_prefix(ctx.message) # This returns all server prefixes\n if isinstance(prefixes, str):\n prefixes = [prefixes]\n- elif callable(prefixes):\n- if asyncio.iscoroutine(prefixes):\n- await ctx.send(_('Coroutine prefixes not yet implemented.'))\n- return\n- prefixes = prefixes(self.bot, ctx.message)\n \n # In case some idiot sets a null prefix\n if '' in prefixes:\n@@ -241,7 +236,8 @@\n return True\n p = discord.utils.find(m.content.startswith, prefixes)\n if p and len(p) > 0:\n- return m.content[len(p):].startswith(tuple(self.bot.commands))\n+ cmd_name = m.content[len(p):].split(' ')[0]\n+ return bool(self.bot.get_command(cmd_name))\n return False\n \n to_delete = [ctx.message]\n", "issue": "[V3 Cleanup] Handle getting coroutine prefixes for [p]cleanup bot\n### Type\n\n- Bug\n\n### Description\nCoroutine prefixes aren't implemented for `cleanup bot`. This is kind of important since Red's default prefix is a coroutine.\n\nThis is pretty easy; we can just use `await bot.get_prefix(ctx.message)`, since this actually just returns a list of server prefixes.\n\n[Relevant Sentry issue](https://sentry.io/will-tekulve/bot-development/issues/420947439/)\n\n```\nTypeError: argument of type 'coroutine' is not iterable\n File \"discord/ext/commands/core.py\", line 62, in wrapped\n ret = yield from coro(*args, **kwargs)\n File \"cleanup/cleanup.py\", line 234, in cleanup_bot\n if '' in prefixes:\n\nException in command 'cleanup bot'\n```\n", "before_files": [{"content": "import asyncio\nimport re\n\nimport discord\nfrom discord.ext import commands\n\nfrom redbot.core import checks\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import CogI18n\nfrom redbot.core.utils.mod import slow_deletion, mass_purge\nfrom redbot.cogs.mod.log import log\nfrom redbot.core.context import RedContext\n\n_ = CogI18n(\"Cleanup\", __file__)\n\n\nclass Cleanup:\n \"\"\"Commands for cleaning messages\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.mod_or_permissions(manage_messages=True)\n async def cleanup(self, ctx: RedContext):\n \"\"\"Deletes messages.\"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send_help()\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def text(self, ctx: commands.Context, text: str, number: int):\n \"\"\"Deletes last X messages matching the specified text.\n\n Example:\n cleanup text \\\"test\\\" 5\n\n Remember to use double quotes.\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n def check(m):\n if text in m.content:\n return True\n elif m == ctx.message:\n return True\n else:\n return False\n\n to_delete = [ctx.message]\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000,\n before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n reason = \"{}({}) deleted {} messages \"\\\n \" containing '{}' in channel {}\".format(author.name,\n author.id, len(to_delete), text, channel.id)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def user(self, ctx: commands.Context, user: discord.Member or int, number: int):\n \"\"\"Deletes last X messages from specified user.\n\n Examples:\n cleanup user @\\u200bTwentysix 2\n cleanup user Red 6\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n def check(m):\n if isinstance(user, discord.Member) and m.author == user:\n return True\n elif m.author.id == user: # Allow finding messages based on an ID\n return True\n elif m == ctx.message:\n return True\n else:\n return False\n\n to_delete = []\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000,\n before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n reason = \"{}({}) deleted {} messages \"\\\n \" made by {}({}) in channel {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n user.name, user.id, channel.name)\n log.info(reason)\n\n if is_bot:\n # For whatever reason the purge endpoint requires manage_messages\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def after(self, ctx: commands.Context, message_id: int):\n \"\"\"Deletes all messages after specified message\n\n To get a message id, enable developer mode in Discord's\n settings, 'appearance' tab. Then right click a message\n and copy its id.\n\n This command only works on bots running as bot accounts.\n \"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n if not is_bot:\n await ctx.send(_(\"This command can only be used on bots with \"\n \"bot accounts.\"))\n return\n\n after = await channel.get_message(message_id)\n\n if not after:\n await ctx.send(_(\"Message not found.\"))\n return\n\n to_delete = []\n\n async for message in channel.history(after=after):\n if (ctx.message.created_at - message.created_at).days < 14:\n # Only add messages that are less than\n # 14 days old to the deletion queue\n to_delete.append(message)\n\n reason = \"{}({}) deleted {} messages in channel {}\"\\\n \"\".format(author.name, author.id,\n len(to_delete), channel.name)\n log.info(reason)\n\n await mass_purge(to_delete, channel)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def messages(self, ctx: commands.Context, number: int):\n \"\"\"Deletes last X messages.\n\n Example:\n cleanup messages 26\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n\n is_bot = self.bot.user.bot\n\n to_delete = []\n tmp = ctx.message\n\n done = False\n\n while len(to_delete) - 1 < number and not done:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) - 1 < number and \\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n done = True\n break\n tmp = message\n\n reason = \"{}({}) deleted {} messages in channel {}\"\\\n \"\".format(author.name, author.id,\n number, channel.name)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command(name='bot')\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def cleanup_bot(self, ctx: commands.Context, number: int):\n \"\"\"Cleans up command messages and messages from the bot\"\"\"\n\n channel = ctx.message.channel\n author = ctx.message.author\n is_bot = self.bot.user.bot\n\n prefixes = self.bot.command_prefix\n if isinstance(prefixes, str):\n prefixes = [prefixes]\n elif callable(prefixes):\n if asyncio.iscoroutine(prefixes):\n await ctx.send(_('Coroutine prefixes not yet implemented.'))\n return\n prefixes = prefixes(self.bot, ctx.message)\n\n # In case some idiot sets a null prefix\n if '' in prefixes:\n prefixes.remove('')\n\n def check(m):\n if m.author.id == self.bot.user.id:\n return True\n elif m == ctx.message:\n return True\n p = discord.utils.find(m.content.startswith, prefixes)\n if p and len(p) > 0:\n return m.content[len(p):].startswith(tuple(self.bot.commands))\n return False\n\n to_delete = [ctx.message]\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n reason = \"{}({}) deleted {} \"\\\n \" command messages in channel {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n channel.name)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command(name='self')\n async def cleanup_self(self, ctx: commands.Context, number: int, match_pattern: str = None):\n \"\"\"Cleans up messages owned by the bot.\n\n By default, all messages are cleaned. If a third argument is specified,\n it is used for pattern matching: If it begins with r( and ends with ),\n then it is interpreted as a regex, and messages that match it are\n deleted. Otherwise, it is used in a simple substring test.\n\n Some helpful regex flags to include in your pattern:\n Dots match newlines: (?s); Ignore case: (?i); Both: (?si)\n \"\"\"\n channel = ctx.channel\n author = ctx.message.author\n is_bot = self.bot.user.bot\n\n # You can always delete your own messages, this is needed to purge\n can_mass_purge = False\n if type(author) is discord.Member:\n me = ctx.guild.me\n can_mass_purge = channel.permissions_for(me).manage_messages\n\n use_re = (match_pattern and match_pattern.startswith('r(') and\n match_pattern.endswith(')'))\n\n if use_re:\n match_pattern = match_pattern[1:] # strip 'r'\n match_re = re.compile(match_pattern)\n\n def content_match(c):\n return bool(match_re.match(c))\n elif match_pattern:\n def content_match(c):\n return match_pattern in c\n else:\n def content_match(_):\n return True\n\n def check(m):\n if m.author.id != self.bot.user.id:\n return False\n elif content_match(m.content):\n return True\n return False\n\n to_delete = []\n # Selfbot convenience, delete trigger message\n if author == self.bot.user:\n to_delete.append(ctx.message)\n number += 1\n too_old = False\n tmp = ctx.message\n while not too_old and len(to_delete) < number:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n # Found a message that is 14 or more days old, stop here\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n if channel.name:\n channel_name = 'channel ' + channel.name\n else:\n channel_name = str(channel)\n\n reason = \"{}({}) deleted {} messages \"\\\n \"sent by the bot in {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n channel_name)\n log.info(reason)\n\n if is_bot and can_mass_purge:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n", "path": "redbot/cogs/cleanup/cleanup.py"}], "after_files": [{"content": "import asyncio\nimport re\n\nimport discord\nfrom discord.ext import commands\n\nfrom redbot.core import checks\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import CogI18n\nfrom redbot.core.utils.mod import slow_deletion, mass_purge\nfrom redbot.cogs.mod.log import log\nfrom redbot.core.context import RedContext\n\n_ = CogI18n(\"Cleanup\", __file__)\n\n\nclass Cleanup:\n \"\"\"Commands for cleaning messages\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.mod_or_permissions(manage_messages=True)\n async def cleanup(self, ctx: RedContext):\n \"\"\"Deletes messages.\"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send_help()\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def text(self, ctx: commands.Context, text: str, number: int):\n \"\"\"Deletes last X messages matching the specified text.\n\n Example:\n cleanup text \\\"test\\\" 5\n\n Remember to use double quotes.\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n def check(m):\n if text in m.content:\n return True\n elif m == ctx.message:\n return True\n else:\n return False\n\n to_delete = [ctx.message]\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000,\n before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n reason = \"{}({}) deleted {} messages \"\\\n \" containing '{}' in channel {}\".format(author.name,\n author.id, len(to_delete), text, channel.id)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def user(self, ctx: commands.Context, user: discord.Member or int, number: int):\n \"\"\"Deletes last X messages from specified user.\n\n Examples:\n cleanup user @\\u200bTwentysix 2\n cleanup user Red 6\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n def check(m):\n if isinstance(user, discord.Member) and m.author == user:\n return True\n elif m.author.id == user: # Allow finding messages based on an ID\n return True\n elif m == ctx.message:\n return True\n else:\n return False\n\n to_delete = []\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000,\n before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n reason = \"{}({}) deleted {} messages \"\\\n \" made by {}({}) in channel {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n user.name, user.id, channel.name)\n log.info(reason)\n\n if is_bot:\n # For whatever reason the purge endpoint requires manage_messages\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def after(self, ctx: commands.Context, message_id: int):\n \"\"\"Deletes all messages after specified message\n\n To get a message id, enable developer mode in Discord's\n settings, 'appearance' tab. Then right click a message\n and copy its id.\n\n This command only works on bots running as bot accounts.\n \"\"\"\n\n channel = ctx.channel\n author = ctx.author\n is_bot = self.bot.user.bot\n\n if not is_bot:\n await ctx.send(_(\"This command can only be used on bots with \"\n \"bot accounts.\"))\n return\n\n after = await channel.get_message(message_id)\n\n if not after:\n await ctx.send(_(\"Message not found.\"))\n return\n\n to_delete = []\n\n async for message in channel.history(after=after):\n if (ctx.message.created_at - message.created_at).days < 14:\n # Only add messages that are less than\n # 14 days old to the deletion queue\n to_delete.append(message)\n\n reason = \"{}({}) deleted {} messages in channel {}\"\\\n \"\".format(author.name, author.id,\n len(to_delete), channel.name)\n log.info(reason)\n\n await mass_purge(to_delete, channel)\n\n @cleanup.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def messages(self, ctx: commands.Context, number: int):\n \"\"\"Deletes last X messages.\n\n Example:\n cleanup messages 26\"\"\"\n\n channel = ctx.channel\n author = ctx.author\n\n is_bot = self.bot.user.bot\n\n to_delete = []\n tmp = ctx.message\n\n done = False\n\n while len(to_delete) - 1 < number and not done:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) - 1 < number and \\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n done = True\n break\n tmp = message\n\n reason = \"{}({}) deleted {} messages in channel {}\"\\\n \"\".format(author.name, author.id,\n number, channel.name)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command(name='bot')\n @commands.guild_only()\n @commands.bot_has_permissions(manage_messages=True)\n async def cleanup_bot(self, ctx: commands.Context, number: int):\n \"\"\"Cleans up command messages and messages from the bot\"\"\"\n\n channel = ctx.message.channel\n author = ctx.message.author\n is_bot = self.bot.user.bot\n\n prefixes = await self.bot.get_prefix(ctx.message) # This returns all server prefixes\n if isinstance(prefixes, str):\n prefixes = [prefixes]\n\n # In case some idiot sets a null prefix\n if '' in prefixes:\n prefixes.remove('')\n\n def check(m):\n if m.author.id == self.bot.user.id:\n return True\n elif m == ctx.message:\n return True\n p = discord.utils.find(m.content.startswith, prefixes)\n if p and len(p) > 0:\n cmd_name = m.content[len(p):].split(' ')[0]\n return bool(self.bot.get_command(cmd_name))\n return False\n\n to_delete = [ctx.message]\n too_old = False\n tmp = ctx.message\n\n while not too_old and len(to_delete) - 1 < number:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) - 1 < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n reason = \"{}({}) deleted {} \"\\\n \" command messages in channel {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n channel.name)\n log.info(reason)\n\n if is_bot:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n\n @cleanup.command(name='self')\n async def cleanup_self(self, ctx: commands.Context, number: int, match_pattern: str = None):\n \"\"\"Cleans up messages owned by the bot.\n\n By default, all messages are cleaned. If a third argument is specified,\n it is used for pattern matching: If it begins with r( and ends with ),\n then it is interpreted as a regex, and messages that match it are\n deleted. Otherwise, it is used in a simple substring test.\n\n Some helpful regex flags to include in your pattern:\n Dots match newlines: (?s); Ignore case: (?i); Both: (?si)\n \"\"\"\n channel = ctx.channel\n author = ctx.message.author\n is_bot = self.bot.user.bot\n\n # You can always delete your own messages, this is needed to purge\n can_mass_purge = False\n if type(author) is discord.Member:\n me = ctx.guild.me\n can_mass_purge = channel.permissions_for(me).manage_messages\n\n use_re = (match_pattern and match_pattern.startswith('r(') and\n match_pattern.endswith(')'))\n\n if use_re:\n match_pattern = match_pattern[1:] # strip 'r'\n match_re = re.compile(match_pattern)\n\n def content_match(c):\n return bool(match_re.match(c))\n elif match_pattern:\n def content_match(c):\n return match_pattern in c\n else:\n def content_match(_):\n return True\n\n def check(m):\n if m.author.id != self.bot.user.id:\n return False\n elif content_match(m.content):\n return True\n return False\n\n to_delete = []\n # Selfbot convenience, delete trigger message\n if author == self.bot.user:\n to_delete.append(ctx.message)\n number += 1\n too_old = False\n tmp = ctx.message\n while not too_old and len(to_delete) < number:\n async for message in channel.history(limit=1000, before=tmp):\n if len(to_delete) < number and check(message) and\\\n (ctx.message.created_at - message.created_at).days < 14:\n to_delete.append(message)\n elif (ctx.message.created_at - message.created_at).days >= 14:\n # Found a message that is 14 or more days old, stop here\n too_old = True\n break\n elif len(to_delete) >= number:\n break\n tmp = message\n\n if channel.name:\n channel_name = 'channel ' + channel.name\n else:\n channel_name = str(channel)\n\n reason = \"{}({}) deleted {} messages \"\\\n \"sent by the bot in {}\"\\\n \"\".format(author.name, author.id, len(to_delete),\n channel_name)\n log.info(reason)\n\n if is_bot and can_mass_purge:\n await mass_purge(to_delete, channel)\n else:\n await slow_deletion(to_delete)\n", "path": "redbot/cogs/cleanup/cleanup.py"}]}
3,950
280
gh_patches_debug_33482
rasdani/github-patches
git_diff
interlegis__sapl-1301
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Migração de documentos Foram retiradas do arquivo sapl/utils.py as duas funções abaixo no seguinte PR https://github.com/interlegis/sapl/commit/71491c33dd3769b66bc98199e6d8027f6862e4e5#diff-248fb73d01307eb54567bfe0ba11d306 def save_texto(sender, instance, **kwargs): update_index = UpdateIndexCommand() update_index.start() def delete_texto(sender, instance, **kwargs): update_index = UpdateIndexCommand() update_index.start() No entanto o migrador de documentos precisa delas https://github.com/interlegis/sapl/blob/master/sapl/legacy/migracao_documentos.py#L17 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sapl/legacy/migracao_documentos.py` Content: ``` 1 import mimetypes 2 import os 3 import re 4 5 import magic 6 from django.db.models.signals import post_delete, post_save 7 8 from sapl.base.models import CasaLegislativa 9 from sapl.materia.models import (DocumentoAcessorio, MateriaLegislativa, 10 Proposicao) 11 from sapl.norma.models import NormaJuridica 12 from sapl.parlamentares.models import Parlamentar 13 from sapl.protocoloadm.models import (DocumentoAcessorioAdministrativo, 14 DocumentoAdministrativo) 15 from sapl.sessao.models import SessaoPlenaria 16 from sapl.settings import MEDIA_ROOT 17 from sapl.utils import delete_texto, save_texto 18 19 # MIGRAÇÃO DE DOCUMENTOS ################################################### 20 EXTENSOES = { 21 'application/msword': '.doc', 22 'application/pdf': '.pdf', 23 'application/vnd.oasis.opendocument.text': '.odt', 24 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx', # noqa 25 'application/xml': '.xml', 26 'application/zip': '.zip', 27 'image/jpeg': '.jpeg', 28 'image/png': '.png', 29 'text/html': '.html', 30 'text/rtf': '.rtf', 31 'text/x-python': '.py', 32 'text/plain': '.ksh', 33 'text/plain': '.c', 34 'text/plain': '.h', 35 'text/plain': '.txt', 36 'text/plain': '.bat', 37 'text/plain': '.pl', 38 'text/plain': '.asc', 39 'text/plain': '.text', 40 'text/plain': '.pot', 41 'text/plain': '.brf', 42 'text/plain': '.srt', 43 44 # sem extensao 45 'application/octet-stream': '', # binário 46 'inode/x-empty': '', # vazio 47 } 48 49 DOCS = { 50 CasaLegislativa: [( 51 'logotipo', 52 'props_sapl/logo_casa.gif', 53 'casa/logotipo/logo_casa.gif')], 54 Parlamentar: [( 55 'fotografia', 56 'parlamentar/fotos/{}_foto_parlamentar', 57 'public/parlamentar/{0}/{0}_foto_parlamentar{1}')], 58 MateriaLegislativa: [( 59 'texto_original', 60 'materia/{}_texto_integral', 61 'public/materialegislativa/{2}/{0}/{0}_texto_integral{1}')], 62 DocumentoAcessorio: [( 63 'arquivo', 64 'materia/{}', 65 'public/documentoacessorio/{2}/{0}/{0}{1}')], 66 NormaJuridica: [( 67 'texto_integral', 68 'norma_juridica/{}_texto_integral', 69 'public/normajuridica/{2}/{0}/{0}_texto_integral{1}')], 70 SessaoPlenaria: [ 71 ('upload_ata', 72 'ata_sessao/{}_ata_sessao', 73 'public/sessaoplenaria/{0}/ata/{0}_ata_sessao{1}'), 74 ('upload_anexo', 75 'anexo_sessao/{}_texto_anexado', 76 'public/sessaoplenaria/{0}/anexo/{0}_texto_anexado{1}') 77 ], 78 Proposicao: [( 79 'texto_original', 80 'proposicao/{}', 81 'private/proposicao/{0}/{0}{1}')], 82 DocumentoAdministrativo: [( 83 'texto_integral', 84 'administrativo/{}_texto_integral', 85 'private/documentoadministrativo/{0}/{0}_texto_integral{1}') 86 ], 87 DocumentoAcessorioAdministrativo: [( 88 'arquivo', 89 'administrativo/{}', 90 'private/documentoacessorioadministrativo/{0}/{0}_acessorio_administrativo{1}') 91 ], 92 } 93 94 DOCS = {tipo: [(campo, 95 os.path.join('sapl_documentos', origem), 96 os.path.join('sapl', destino)) 97 for campo, origem, destino in campos] 98 for tipo, campos in DOCS.items()} 99 100 101 def em_media(caminho): 102 return os.path.join(MEDIA_ROOT, caminho) 103 104 105 def mover_documento(origem, destino): 106 origem, destino = [em_media(c) if not os.path.isabs(c) else c 107 for c in (origem, destino)] 108 os.makedirs(os.path.dirname(destino), exist_ok=True) 109 os.rename(origem, destino) 110 111 112 def get_casa_legislativa(): 113 casa = CasaLegislativa.objects.first() 114 if not casa: 115 casa = CasaLegislativa.objects.create(**{k: 'PREENCHER...' for k in [ 116 'codigo', 'nome', 'sigla', 'endereco', 'cep', 'municipio', 'uf', 117 ]}) 118 return casa 119 120 121 def migrar_docs_logo(): 122 print('#### Migrando logotipo da casa ####') 123 [(_, origem, destino)] = DOCS[CasaLegislativa] 124 props_sapl = os.path.dirname(origem) 125 126 # a pasta props_sapl deve conter apenas o origem e metadatas! 127 # Edit: Aparentemente há diretório que contém properties ao invés de 128 # metadata. O assert foi modificado para essa situação. 129 assert set(os.listdir(em_media(props_sapl))) < { 130 'logo_casa.gif', '.metadata', 'logo_casa.gif.metadata', 131 '.properties', 'logo_casa.gif.properties', '.objects'} 132 133 mover_documento(origem, destino) 134 casa = get_casa_legislativa() 135 casa.logotipo = destino 136 casa.save() 137 138 139 def get_extensao(caminho): 140 mime = magic.from_file(caminho, mime=True) 141 try: 142 return EXTENSOES[mime] 143 except KeyError as e: 144 raise Exception('\n'.join([ 145 'Extensão não conhecida para o arquivo:', 146 caminho, 147 'E mimetype:', 148 mime, 149 ' Algumas possibilidades são:', ] + 150 [" '{}': '{}',".format(mime, ext) 151 for ext in mimetypes.guess_all_extensions(mime)] + 152 ['Atualize o código do dicionário EXTENSOES!'] 153 )) from e 154 155 156 def migrar_docs_por_ids(tipo): 157 for campo, base_origem, base_destino in DOCS[tipo]: 158 print('#### Migrando {} de {} ####'.format(campo, tipo.__name__)) 159 160 dir_origem, nome_origem = os.path.split(em_media(base_origem)) 161 pat = re.compile('^{}$'.format(nome_origem.format('(\d+)'))) 162 163 if not os.path.isdir(dir_origem): 164 print(' >>> O diretório {} não existe! Abortado.'.format( 165 dir_origem)) 166 continue 167 168 for arq in os.listdir(dir_origem): 169 match = pat.match(arq) 170 if match: 171 # associa documento ao objeto 172 try: 173 origem = os.path.join(dir_origem, match.group(0)) 174 id = match.group(1) 175 obj = tipo.objects.get(pk=id) 176 177 extensao = get_extensao(origem) 178 if hasattr(obj, "ano"): 179 destino = base_destino.format(id, extensao, obj.ano) 180 elif isinstance(obj, DocumentoAcessorio): 181 destino = base_destino.format( 182 id, extensao, obj.materia.ano) 183 else: 184 destino = base_destino.format(id, extensao) 185 mover_documento(origem, destino) 186 187 setattr(obj, campo, destino) 188 obj.save() 189 except tipo.DoesNotExist: 190 msg = ' {} (pk={}) não encontrado para documento em [{}]' 191 print(msg.format( 192 tipo.__name__, id, destino)) 193 194 195 def desconecta_sinais_indexacao(): 196 post_save.disconnect(save_texto, NormaJuridica) 197 post_save.disconnect(save_texto, DocumentoAcessorio) 198 post_save.disconnect(save_texto, MateriaLegislativa) 199 post_delete.disconnect(delete_texto, NormaJuridica) 200 post_delete.disconnect(delete_texto, DocumentoAcessorio) 201 post_delete.disconnect(delete_texto, MateriaLegislativa) 202 203 204 def conecta_sinais_indexacao(): 205 post_save.connect(save_texto, NormaJuridica) 206 post_save.connect(save_texto, DocumentoAcessorio) 207 post_save.connect(save_texto, MateriaLegislativa) 208 post_delete.connect(delete_texto, NormaJuridica) 209 post_delete.connect(delete_texto, DocumentoAcessorio) 210 post_delete.connect(delete_texto, MateriaLegislativa) 211 212 213 def migrar_documentos(): 214 # precisamos excluir os sinais de post_save e post_delete para não que o 215 # computador não trave com a criação de threads desnecessárias 216 desconecta_sinais_indexacao() 217 218 # aqui supomos que uma pasta chamada sapl_documentos está em MEDIA_ROOT 219 # com o conteúdo da pasta de mesmo nome do zope 220 # Os arquivos da pasta serão movidos para a nova estrutura e a pasta será 221 # apagada 222 migrar_docs_logo() 223 for tipo in [ 224 Parlamentar, 225 MateriaLegislativa, 226 DocumentoAcessorio, 227 NormaJuridica, 228 SessaoPlenaria, 229 Proposicao, 230 DocumentoAdministrativo, 231 DocumentoAcessorioAdministrativo, 232 ]: 233 migrar_docs_por_ids(tipo) 234 235 sobrando = [os.path.join(dir, file) 236 for (dir, _, files) in os.walk(em_media('sapl_documentos')) 237 for file in files] 238 if sobrando: 239 print('\n#### Encerrado ####\n\n' 240 '{} documentos sobraram sem ser migrados!!!'.format( 241 len(sobrando))) 242 for doc in sobrando: 243 print(' {}'. format(doc)) 244 # 245 # reconexão dos sinais desligados no inicio da migração de documentos 246 conecta_sinais_indexacao() 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sapl/legacy/migracao_documentos.py b/sapl/legacy/migracao_documentos.py --- a/sapl/legacy/migracao_documentos.py +++ b/sapl/legacy/migracao_documentos.py @@ -14,7 +14,6 @@ DocumentoAdministrativo) from sapl.sessao.models import SessaoPlenaria from sapl.settings import MEDIA_ROOT -from sapl.utils import delete_texto, save_texto # MIGRAÇÃO DE DOCUMENTOS ################################################### EXTENSOES = { @@ -192,29 +191,7 @@ tipo.__name__, id, destino)) -def desconecta_sinais_indexacao(): - post_save.disconnect(save_texto, NormaJuridica) - post_save.disconnect(save_texto, DocumentoAcessorio) - post_save.disconnect(save_texto, MateriaLegislativa) - post_delete.disconnect(delete_texto, NormaJuridica) - post_delete.disconnect(delete_texto, DocumentoAcessorio) - post_delete.disconnect(delete_texto, MateriaLegislativa) - - -def conecta_sinais_indexacao(): - post_save.connect(save_texto, NormaJuridica) - post_save.connect(save_texto, DocumentoAcessorio) - post_save.connect(save_texto, MateriaLegislativa) - post_delete.connect(delete_texto, NormaJuridica) - post_delete.connect(delete_texto, DocumentoAcessorio) - post_delete.connect(delete_texto, MateriaLegislativa) - - def migrar_documentos(): - # precisamos excluir os sinais de post_save e post_delete para não que o - # computador não trave com a criação de threads desnecessárias - desconecta_sinais_indexacao() - # aqui supomos que uma pasta chamada sapl_documentos está em MEDIA_ROOT # com o conteúdo da pasta de mesmo nome do zope # Os arquivos da pasta serão movidos para a nova estrutura e a pasta será @@ -241,6 +218,3 @@ len(sobrando))) for doc in sobrando: print(' {}'. format(doc)) - # - # reconexão dos sinais desligados no inicio da migração de documentos - conecta_sinais_indexacao()
{"golden_diff": "diff --git a/sapl/legacy/migracao_documentos.py b/sapl/legacy/migracao_documentos.py\n--- a/sapl/legacy/migracao_documentos.py\n+++ b/sapl/legacy/migracao_documentos.py\n@@ -14,7 +14,6 @@\n DocumentoAdministrativo)\n from sapl.sessao.models import SessaoPlenaria\n from sapl.settings import MEDIA_ROOT\n-from sapl.utils import delete_texto, save_texto\n \n # MIGRA\u00c7\u00c3O DE DOCUMENTOS ###################################################\n EXTENSOES = {\n@@ -192,29 +191,7 @@\n tipo.__name__, id, destino))\n \n \n-def desconecta_sinais_indexacao():\n- post_save.disconnect(save_texto, NormaJuridica)\n- post_save.disconnect(save_texto, DocumentoAcessorio)\n- post_save.disconnect(save_texto, MateriaLegislativa)\n- post_delete.disconnect(delete_texto, NormaJuridica)\n- post_delete.disconnect(delete_texto, DocumentoAcessorio)\n- post_delete.disconnect(delete_texto, MateriaLegislativa)\n-\n-\n-def conecta_sinais_indexacao():\n- post_save.connect(save_texto, NormaJuridica)\n- post_save.connect(save_texto, DocumentoAcessorio)\n- post_save.connect(save_texto, MateriaLegislativa)\n- post_delete.connect(delete_texto, NormaJuridica)\n- post_delete.connect(delete_texto, DocumentoAcessorio)\n- post_delete.connect(delete_texto, MateriaLegislativa)\n-\n-\n def migrar_documentos():\n- # precisamos excluir os sinais de post_save e post_delete para n\u00e3o que o\n- # computador n\u00e3o trave com a cria\u00e7\u00e3o de threads desnecess\u00e1rias\n- desconecta_sinais_indexacao()\n-\n # aqui supomos que uma pasta chamada sapl_documentos est\u00e1 em MEDIA_ROOT\n # com o conte\u00fado da pasta de mesmo nome do zope\n # Os arquivos da pasta ser\u00e3o movidos para a nova estrutura e a pasta ser\u00e1\n@@ -241,6 +218,3 @@\n len(sobrando)))\n for doc in sobrando:\n print(' {}'. format(doc))\n- #\n- # reconex\u00e3o dos sinais desligados no inicio da migra\u00e7\u00e3o de documentos\n- conecta_sinais_indexacao()\n", "issue": "Migra\u00e7\u00e3o de documentos\nForam retiradas do arquivo sapl/utils.py as duas fun\u00e7\u00f5es abaixo no seguinte PR\r\nhttps://github.com/interlegis/sapl/commit/71491c33dd3769b66bc98199e6d8027f6862e4e5#diff-248fb73d01307eb54567bfe0ba11d306\r\n\r\ndef save_texto(sender, instance, **kwargs):\r\n update_index = UpdateIndexCommand()\r\n update_index.start()\r\n\r\n\r\ndef delete_texto(sender, instance, **kwargs):\r\n update_index = UpdateIndexCommand()\r\n update_index.start()\r\n\r\nNo entanto o migrador de documentos precisa delas\r\nhttps://github.com/interlegis/sapl/blob/master/sapl/legacy/migracao_documentos.py#L17\n", "before_files": [{"content": "import mimetypes\nimport os\nimport re\n\nimport magic\nfrom django.db.models.signals import post_delete, post_save\n\nfrom sapl.base.models import CasaLegislativa\nfrom sapl.materia.models import (DocumentoAcessorio, MateriaLegislativa,\n Proposicao)\nfrom sapl.norma.models import NormaJuridica\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import (DocumentoAcessorioAdministrativo,\n DocumentoAdministrativo)\nfrom sapl.sessao.models import SessaoPlenaria\nfrom sapl.settings import MEDIA_ROOT\nfrom sapl.utils import delete_texto, save_texto\n\n# MIGRA\u00c7\u00c3O DE DOCUMENTOS ###################################################\nEXTENSOES = {\n 'application/msword': '.doc',\n 'application/pdf': '.pdf',\n 'application/vnd.oasis.opendocument.text': '.odt',\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx', # noqa\n 'application/xml': '.xml',\n 'application/zip': '.zip',\n 'image/jpeg': '.jpeg',\n 'image/png': '.png',\n 'text/html': '.html',\n 'text/rtf': '.rtf',\n 'text/x-python': '.py',\n 'text/plain': '.ksh',\n 'text/plain': '.c',\n 'text/plain': '.h',\n 'text/plain': '.txt',\n 'text/plain': '.bat',\n 'text/plain': '.pl',\n 'text/plain': '.asc',\n 'text/plain': '.text',\n 'text/plain': '.pot',\n 'text/plain': '.brf',\n 'text/plain': '.srt',\n\n # sem extensao\n 'application/octet-stream': '', # bin\u00e1rio\n 'inode/x-empty': '', # vazio\n}\n\nDOCS = {\n CasaLegislativa: [(\n 'logotipo',\n 'props_sapl/logo_casa.gif',\n 'casa/logotipo/logo_casa.gif')],\n Parlamentar: [(\n 'fotografia',\n 'parlamentar/fotos/{}_foto_parlamentar',\n 'public/parlamentar/{0}/{0}_foto_parlamentar{1}')],\n MateriaLegislativa: [(\n 'texto_original',\n 'materia/{}_texto_integral',\n 'public/materialegislativa/{2}/{0}/{0}_texto_integral{1}')],\n DocumentoAcessorio: [(\n 'arquivo',\n 'materia/{}',\n 'public/documentoacessorio/{2}/{0}/{0}{1}')],\n NormaJuridica: [(\n 'texto_integral',\n 'norma_juridica/{}_texto_integral',\n 'public/normajuridica/{2}/{0}/{0}_texto_integral{1}')],\n SessaoPlenaria: [\n ('upload_ata',\n 'ata_sessao/{}_ata_sessao',\n 'public/sessaoplenaria/{0}/ata/{0}_ata_sessao{1}'),\n ('upload_anexo',\n 'anexo_sessao/{}_texto_anexado',\n 'public/sessaoplenaria/{0}/anexo/{0}_texto_anexado{1}')\n ],\n Proposicao: [(\n 'texto_original',\n 'proposicao/{}',\n 'private/proposicao/{0}/{0}{1}')],\n DocumentoAdministrativo: [(\n 'texto_integral',\n 'administrativo/{}_texto_integral',\n 'private/documentoadministrativo/{0}/{0}_texto_integral{1}')\n ],\n DocumentoAcessorioAdministrativo: [(\n 'arquivo',\n 'administrativo/{}',\n 'private/documentoacessorioadministrativo/{0}/{0}_acessorio_administrativo{1}')\n ],\n}\n\nDOCS = {tipo: [(campo,\n os.path.join('sapl_documentos', origem),\n os.path.join('sapl', destino))\n for campo, origem, destino in campos]\n for tipo, campos in DOCS.items()}\n\n\ndef em_media(caminho):\n return os.path.join(MEDIA_ROOT, caminho)\n\n\ndef mover_documento(origem, destino):\n origem, destino = [em_media(c) if not os.path.isabs(c) else c\n for c in (origem, destino)]\n os.makedirs(os.path.dirname(destino), exist_ok=True)\n os.rename(origem, destino)\n\n\ndef get_casa_legislativa():\n casa = CasaLegislativa.objects.first()\n if not casa:\n casa = CasaLegislativa.objects.create(**{k: 'PREENCHER...' for k in [\n 'codigo', 'nome', 'sigla', 'endereco', 'cep', 'municipio', 'uf',\n ]})\n return casa\n\n\ndef migrar_docs_logo():\n print('#### Migrando logotipo da casa ####')\n [(_, origem, destino)] = DOCS[CasaLegislativa]\n props_sapl = os.path.dirname(origem)\n\n # a pasta props_sapl deve conter apenas o origem e metadatas!\n # Edit: Aparentemente h\u00e1 diret\u00f3rio que cont\u00e9m properties ao inv\u00e9s de\n # metadata. O assert foi modificado para essa situa\u00e7\u00e3o.\n assert set(os.listdir(em_media(props_sapl))) < {\n 'logo_casa.gif', '.metadata', 'logo_casa.gif.metadata',\n '.properties', 'logo_casa.gif.properties', '.objects'}\n\n mover_documento(origem, destino)\n casa = get_casa_legislativa()\n casa.logotipo = destino\n casa.save()\n\n\ndef get_extensao(caminho):\n mime = magic.from_file(caminho, mime=True)\n try:\n return EXTENSOES[mime]\n except KeyError as e:\n raise Exception('\\n'.join([\n 'Extens\u00e3o n\u00e3o conhecida para o arquivo:',\n caminho,\n 'E mimetype:',\n mime,\n ' Algumas possibilidades s\u00e3o:', ] +\n [\" '{}': '{}',\".format(mime, ext)\n for ext in mimetypes.guess_all_extensions(mime)] +\n ['Atualize o c\u00f3digo do dicion\u00e1rio EXTENSOES!']\n )) from e\n\n\ndef migrar_docs_por_ids(tipo):\n for campo, base_origem, base_destino in DOCS[tipo]:\n print('#### Migrando {} de {} ####'.format(campo, tipo.__name__))\n\n dir_origem, nome_origem = os.path.split(em_media(base_origem))\n pat = re.compile('^{}$'.format(nome_origem.format('(\\d+)')))\n\n if not os.path.isdir(dir_origem):\n print(' >>> O diret\u00f3rio {} n\u00e3o existe! Abortado.'.format(\n dir_origem))\n continue\n\n for arq in os.listdir(dir_origem):\n match = pat.match(arq)\n if match:\n # associa documento ao objeto\n try:\n origem = os.path.join(dir_origem, match.group(0))\n id = match.group(1)\n obj = tipo.objects.get(pk=id)\n\n extensao = get_extensao(origem)\n if hasattr(obj, \"ano\"):\n destino = base_destino.format(id, extensao, obj.ano)\n elif isinstance(obj, DocumentoAcessorio):\n destino = base_destino.format(\n id, extensao, obj.materia.ano)\n else:\n destino = base_destino.format(id, extensao)\n mover_documento(origem, destino)\n\n setattr(obj, campo, destino)\n obj.save()\n except tipo.DoesNotExist:\n msg = ' {} (pk={}) n\u00e3o encontrado para documento em [{}]'\n print(msg.format(\n tipo.__name__, id, destino))\n\n\ndef desconecta_sinais_indexacao():\n post_save.disconnect(save_texto, NormaJuridica)\n post_save.disconnect(save_texto, DocumentoAcessorio)\n post_save.disconnect(save_texto, MateriaLegislativa)\n post_delete.disconnect(delete_texto, NormaJuridica)\n post_delete.disconnect(delete_texto, DocumentoAcessorio)\n post_delete.disconnect(delete_texto, MateriaLegislativa)\n\n\ndef conecta_sinais_indexacao():\n post_save.connect(save_texto, NormaJuridica)\n post_save.connect(save_texto, DocumentoAcessorio)\n post_save.connect(save_texto, MateriaLegislativa)\n post_delete.connect(delete_texto, NormaJuridica)\n post_delete.connect(delete_texto, DocumentoAcessorio)\n post_delete.connect(delete_texto, MateriaLegislativa)\n\n\ndef migrar_documentos():\n # precisamos excluir os sinais de post_save e post_delete para n\u00e3o que o\n # computador n\u00e3o trave com a cria\u00e7\u00e3o de threads desnecess\u00e1rias\n desconecta_sinais_indexacao()\n\n # aqui supomos que uma pasta chamada sapl_documentos est\u00e1 em MEDIA_ROOT\n # com o conte\u00fado da pasta de mesmo nome do zope\n # Os arquivos da pasta ser\u00e3o movidos para a nova estrutura e a pasta ser\u00e1\n # apagada\n migrar_docs_logo()\n for tipo in [\n Parlamentar,\n MateriaLegislativa,\n DocumentoAcessorio,\n NormaJuridica,\n SessaoPlenaria,\n Proposicao,\n DocumentoAdministrativo,\n \tDocumentoAcessorioAdministrativo,\n ]:\n migrar_docs_por_ids(tipo)\n\n sobrando = [os.path.join(dir, file)\n for (dir, _, files) in os.walk(em_media('sapl_documentos'))\n for file in files]\n if sobrando:\n print('\\n#### Encerrado ####\\n\\n'\n '{} documentos sobraram sem ser migrados!!!'.format(\n len(sobrando)))\n for doc in sobrando:\n print(' {}'. format(doc))\n #\n # reconex\u00e3o dos sinais desligados no inicio da migra\u00e7\u00e3o de documentos\n conecta_sinais_indexacao()\n", "path": "sapl/legacy/migracao_documentos.py"}], "after_files": [{"content": "import mimetypes\nimport os\nimport re\n\nimport magic\nfrom django.db.models.signals import post_delete, post_save\n\nfrom sapl.base.models import CasaLegislativa\nfrom sapl.materia.models import (DocumentoAcessorio, MateriaLegislativa,\n Proposicao)\nfrom sapl.norma.models import NormaJuridica\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import (DocumentoAcessorioAdministrativo,\n DocumentoAdministrativo)\nfrom sapl.sessao.models import SessaoPlenaria\nfrom sapl.settings import MEDIA_ROOT\n\n# MIGRA\u00c7\u00c3O DE DOCUMENTOS ###################################################\nEXTENSOES = {\n 'application/msword': '.doc',\n 'application/pdf': '.pdf',\n 'application/vnd.oasis.opendocument.text': '.odt',\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx', # noqa\n 'application/xml': '.xml',\n 'application/zip': '.zip',\n 'image/jpeg': '.jpeg',\n 'image/png': '.png',\n 'text/html': '.html',\n 'text/rtf': '.rtf',\n 'text/x-python': '.py',\n 'text/plain': '.ksh',\n 'text/plain': '.c',\n 'text/plain': '.h',\n 'text/plain': '.txt',\n 'text/plain': '.bat',\n 'text/plain': '.pl',\n 'text/plain': '.asc',\n 'text/plain': '.text',\n 'text/plain': '.pot',\n 'text/plain': '.brf',\n 'text/plain': '.srt',\n\n # sem extensao\n 'application/octet-stream': '', # bin\u00e1rio\n 'inode/x-empty': '', # vazio\n}\n\nDOCS = {\n CasaLegislativa: [(\n 'logotipo',\n 'props_sapl/logo_casa.gif',\n 'casa/logotipo/logo_casa.gif')],\n Parlamentar: [(\n 'fotografia',\n 'parlamentar/fotos/{}_foto_parlamentar',\n 'public/parlamentar/{0}/{0}_foto_parlamentar{1}')],\n MateriaLegislativa: [(\n 'texto_original',\n 'materia/{}_texto_integral',\n 'public/materialegislativa/{2}/{0}/{0}_texto_integral{1}')],\n DocumentoAcessorio: [(\n 'arquivo',\n 'materia/{}',\n 'public/documentoacessorio/{2}/{0}/{0}{1}')],\n NormaJuridica: [(\n 'texto_integral',\n 'norma_juridica/{}_texto_integral',\n 'public/normajuridica/{2}/{0}/{0}_texto_integral{1}')],\n SessaoPlenaria: [\n ('upload_ata',\n 'ata_sessao/{}_ata_sessao',\n 'public/sessaoplenaria/{0}/ata/{0}_ata_sessao{1}'),\n ('upload_anexo',\n 'anexo_sessao/{}_texto_anexado',\n 'public/sessaoplenaria/{0}/anexo/{0}_texto_anexado{1}')\n ],\n Proposicao: [(\n 'texto_original',\n 'proposicao/{}',\n 'private/proposicao/{0}/{0}{1}')],\n DocumentoAdministrativo: [(\n 'texto_integral',\n 'administrativo/{}_texto_integral',\n 'private/documentoadministrativo/{0}/{0}_texto_integral{1}')\n ],\n DocumentoAcessorioAdministrativo: [(\n 'arquivo',\n 'administrativo/{}',\n 'private/documentoacessorioadministrativo/{0}/{0}_acessorio_administrativo{1}')\n ],\n}\n\nDOCS = {tipo: [(campo,\n os.path.join('sapl_documentos', origem),\n os.path.join('sapl', destino))\n for campo, origem, destino in campos]\n for tipo, campos in DOCS.items()}\n\n\ndef em_media(caminho):\n return os.path.join(MEDIA_ROOT, caminho)\n\n\ndef mover_documento(origem, destino):\n origem, destino = [em_media(c) if not os.path.isabs(c) else c\n for c in (origem, destino)]\n os.makedirs(os.path.dirname(destino), exist_ok=True)\n os.rename(origem, destino)\n\n\ndef get_casa_legislativa():\n casa = CasaLegislativa.objects.first()\n if not casa:\n casa = CasaLegislativa.objects.create(**{k: 'PREENCHER...' for k in [\n 'codigo', 'nome', 'sigla', 'endereco', 'cep', 'municipio', 'uf',\n ]})\n return casa\n\n\ndef migrar_docs_logo():\n print('#### Migrando logotipo da casa ####')\n [(_, origem, destino)] = DOCS[CasaLegislativa]\n props_sapl = os.path.dirname(origem)\n\n # a pasta props_sapl deve conter apenas o origem e metadatas!\n # Edit: Aparentemente h\u00e1 diret\u00f3rio que cont\u00e9m properties ao inv\u00e9s de\n # metadata. O assert foi modificado para essa situa\u00e7\u00e3o.\n assert set(os.listdir(em_media(props_sapl))) < {\n 'logo_casa.gif', '.metadata', 'logo_casa.gif.metadata',\n '.properties', 'logo_casa.gif.properties', '.objects'}\n\n mover_documento(origem, destino)\n casa = get_casa_legislativa()\n casa.logotipo = destino\n casa.save()\n\n\ndef get_extensao(caminho):\n mime = magic.from_file(caminho, mime=True)\n try:\n return EXTENSOES[mime]\n except KeyError as e:\n raise Exception('\\n'.join([\n 'Extens\u00e3o n\u00e3o conhecida para o arquivo:',\n caminho,\n 'E mimetype:',\n mime,\n ' Algumas possibilidades s\u00e3o:', ] +\n [\" '{}': '{}',\".format(mime, ext)\n for ext in mimetypes.guess_all_extensions(mime)] +\n ['Atualize o c\u00f3digo do dicion\u00e1rio EXTENSOES!']\n )) from e\n\n\ndef migrar_docs_por_ids(tipo):\n for campo, base_origem, base_destino in DOCS[tipo]:\n print('#### Migrando {} de {} ####'.format(campo, tipo.__name__))\n\n dir_origem, nome_origem = os.path.split(em_media(base_origem))\n pat = re.compile('^{}$'.format(nome_origem.format('(\\d+)')))\n\n if not os.path.isdir(dir_origem):\n print(' >>> O diret\u00f3rio {} n\u00e3o existe! Abortado.'.format(\n dir_origem))\n continue\n\n for arq in os.listdir(dir_origem):\n match = pat.match(arq)\n if match:\n # associa documento ao objeto\n try:\n origem = os.path.join(dir_origem, match.group(0))\n id = match.group(1)\n obj = tipo.objects.get(pk=id)\n\n extensao = get_extensao(origem)\n if hasattr(obj, \"ano\"):\n destino = base_destino.format(id, extensao, obj.ano)\n elif isinstance(obj, DocumentoAcessorio):\n destino = base_destino.format(\n id, extensao, obj.materia.ano)\n else:\n destino = base_destino.format(id, extensao)\n mover_documento(origem, destino)\n\n setattr(obj, campo, destino)\n obj.save()\n except tipo.DoesNotExist:\n msg = ' {} (pk={}) n\u00e3o encontrado para documento em [{}]'\n print(msg.format(\n tipo.__name__, id, destino))\n\n\ndef migrar_documentos():\n # aqui supomos que uma pasta chamada sapl_documentos est\u00e1 em MEDIA_ROOT\n # com o conte\u00fado da pasta de mesmo nome do zope\n # Os arquivos da pasta ser\u00e3o movidos para a nova estrutura e a pasta ser\u00e1\n # apagada\n migrar_docs_logo()\n for tipo in [\n Parlamentar,\n MateriaLegislativa,\n DocumentoAcessorio,\n NormaJuridica,\n SessaoPlenaria,\n Proposicao,\n DocumentoAdministrativo,\n \tDocumentoAcessorioAdministrativo,\n ]:\n migrar_docs_por_ids(tipo)\n\n sobrando = [os.path.join(dir, file)\n for (dir, _, files) in os.walk(em_media('sapl_documentos'))\n for file in files]\n if sobrando:\n print('\\n#### Encerrado ####\\n\\n'\n '{} documentos sobraram sem ser migrados!!!'.format(\n len(sobrando)))\n for doc in sobrando:\n print(' {}'. format(doc))\n", "path": "sapl/legacy/migracao_documentos.py"}]}
3,326
537
gh_patches_debug_13692
rasdani/github-patches
git_diff
pyca__cryptography-1992
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 1.0 release http://semver.org > Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable. Should we bump our version number all the way to 1.0 for the next release? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/__about__.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 __all__ = [ 8 "__title__", "__summary__", "__uri__", "__version__", "__author__", 9 "__email__", "__license__", "__copyright__", 10 ] 11 12 __title__ = "cryptography" 13 __summary__ = ("cryptography is a package which provides cryptographic recipes" 14 " and primitives to Python developers.") 15 __uri__ = "https://github.com/pyca/cryptography" 16 17 __version__ = "0.10.dev1" 18 19 __author__ = "The cryptography developers" 20 __email__ = "[email protected]" 21 22 __license__ = "BSD or Apache License, Version 2.0" 23 __copyright__ = "Copyright 2013-2015 {0}".format(__author__) 24 ``` Path: `vectors/cryptography_vectors/__about__.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 __all__ = [ 8 "__title__", "__summary__", "__uri__", "__version__", "__author__", 9 "__email__", "__license__", "__copyright__", 10 ] 11 12 __title__ = "cryptography_vectors" 13 __summary__ = "Test vectors for the cryptography package." 14 15 __uri__ = "https://github.com/pyca/cryptography" 16 17 __version__ = "0.10.dev1" 18 19 __author__ = "The cryptography developers" 20 __email__ = "[email protected]" 21 22 __license__ = "BSD or Apache License, Version 2.0" 23 __copyright__ = "Copyright 2013-2015 %s" % __author__ 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py --- a/src/cryptography/__about__.py +++ b/src/cryptography/__about__.py @@ -14,7 +14,7 @@ " and primitives to Python developers.") __uri__ = "https://github.com/pyca/cryptography" -__version__ = "0.10.dev1" +__version__ = "1.0.dev1" __author__ = "The cryptography developers" __email__ = "[email protected]" diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py --- a/vectors/cryptography_vectors/__about__.py +++ b/vectors/cryptography_vectors/__about__.py @@ -14,7 +14,7 @@ __uri__ = "https://github.com/pyca/cryptography" -__version__ = "0.10.dev1" +__version__ = "1.0.dev1" __author__ = "The cryptography developers" __email__ = "[email protected]"
{"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -14,7 +14,7 @@\n \" and primitives to Python developers.\")\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -14,7 +14,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\n", "issue": "1.0 release\nhttp://semver.org\n\n> Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable.\n\nShould we bump our version number all the way to 1.0 for the next release?\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 {0}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"1.0.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 {0}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"1.0.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]}
848
245
gh_patches_debug_4441
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-18281
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problem with joj.sk ## Please follow the guide below - You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`) - Use the *Preview* tab to see what your issue will actually look like --- ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.11.23*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.11.23** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones - [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser ### What is the purpose of your *issue*? - [x] Bug report (encountered problems with youtube-dl) - [ ] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue* --- ``` [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: ['https://videoportal.joj.sk/som-mama/epizoda/59636-som-mama', '-v'] [debug] Encodings: locale cp1250, fs mbcs, out cp852, pref cp1250 [debug] youtube-dl version 2018.11.23 [debug] Python version 3.4.4 (CPython) - Windows-10-10.0.17134 [debug] exe versions: ffmpeg N-69406-ga73c411, ffprobe 3.3.2 [debug] Proxy map: {} [generic] 59636-som-mama: Requesting header WARNING: Falling back on generic information extractor. [generic] 59636-som-mama: Downloading webpage [generic] 59636-som-mama: Extracting information [download] Downloading playlist: SOM MAMA | Som mama | Relácie A-Z | Videoportál - najlepšie seriály a relácie TV JOJ [generic] playlist SOM MAMA | Som mama | Relácie A-Z | Videoportál - najlepšie seriály a relácie TV JOJ: Collected 1 video ids (downloading 1 of them) [download] Downloading video 1 of 1 [Joj] 1a2dokc: Downloading webpage [Joj] 1a2dokc: Downloading XML ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. Traceback (most recent call last): File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp6bh3thhm\build\youtube_dl\YoutubeDL.py", line 792, in extract_info File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp6bh3thhm\build\youtube_dl\extractor\common.py", line 508, in extract File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp6bh3thhm\build\youtube_dl\extractor\joj.py", line 95, in _real_extract File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp6bh3thhm\build\youtube_dl\extractor\common.py", line 1292, in _sort_formats youtube_dl.utils.ExtractorError: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. ``` --- ### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**): - Single video: https://videoportal.joj.sk/som-mama/epizoda/59636-som-mama --- ### Description of your *issue*, suggested solution and other information Downloading is not working. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/joj.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import re 5 6 from .common import InfoExtractor 7 from ..compat import compat_str 8 from ..utils import ( 9 int_or_none, 10 js_to_json, 11 try_get, 12 ) 13 14 15 class JojIE(InfoExtractor): 16 _VALID_URL = r'''(?x) 17 (?: 18 joj:| 19 https?://media\.joj\.sk/embed/ 20 ) 21 (?P<id>[^/?#^]+) 22 ''' 23 _TESTS = [{ 24 'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932', 25 'info_dict': { 26 'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932', 27 'ext': 'mp4', 28 'title': 'NOVÉ BÝVANIE', 29 'thumbnail': r're:^https?://.*\.jpg$', 30 'duration': 3118, 31 } 32 }, { 33 'url': 'https://media.joj.sk/embed/9i1cxv', 34 'only_matching': True, 35 }, { 36 'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932', 37 'only_matching': True, 38 }, { 39 'url': 'joj:9i1cxv', 40 'only_matching': True, 41 }] 42 43 @staticmethod 44 def _extract_urls(webpage): 45 return [ 46 mobj.group('url') 47 for mobj in re.finditer( 48 r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1', 49 webpage)] 50 51 def _real_extract(self, url): 52 video_id = self._match_id(url) 53 54 webpage = self._download_webpage( 55 'https://media.joj.sk/embed/%s' % video_id, video_id) 56 57 title = self._search_regex( 58 (r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1', 59 r'<title>(?P<title>[^<]+)'), webpage, 'title', 60 default=None, group='title') or self._og_search_title(webpage) 61 62 bitrates = self._parse_json( 63 self._search_regex( 64 r'(?s)bitrates\s*=\s*({.+?});', webpage, 'bitrates', 65 default='{}'), 66 video_id, transform_source=js_to_json, fatal=False) 67 68 formats = [] 69 for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []: 70 if isinstance(format_url, compat_str): 71 height = self._search_regex( 72 r'(\d+)[pP]\.', format_url, 'height', default=None) 73 formats.append({ 74 'url': format_url, 75 'format_id': '%sp' % height if height else None, 76 'height': int(height), 77 }) 78 if not formats: 79 playlist = self._download_xml( 80 'https://media.joj.sk/services/Video.php?clip=%s' % video_id, 81 video_id) 82 for file_el in playlist.findall('./files/file'): 83 path = file_el.get('path') 84 if not path: 85 continue 86 format_id = file_el.get('id') or file_el.get('label') 87 formats.append({ 88 'url': 'http://n16.joj.sk/storage/%s' % path.replace( 89 'dat/', '', 1), 90 'format_id': format_id, 91 'height': int_or_none(self._search_regex( 92 r'(\d+)[pP]', format_id or path, 'height', 93 default=None)), 94 }) 95 self._sort_formats(formats) 96 97 thumbnail = self._og_search_thumbnail(webpage) 98 99 duration = int_or_none(self._search_regex( 100 r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) 101 102 return { 103 'id': video_id, 104 'title': title, 105 'thumbnail': thumbnail, 106 'duration': duration, 107 'formats': formats, 108 } 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/joj.py b/youtube_dl/extractor/joj.py --- a/youtube_dl/extractor/joj.py +++ b/youtube_dl/extractor/joj.py @@ -61,7 +61,7 @@ bitrates = self._parse_json( self._search_regex( - r'(?s)bitrates\s*=\s*({.+?});', webpage, 'bitrates', + r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates', default='{}'), video_id, transform_source=js_to_json, fatal=False)
{"golden_diff": "diff --git a/youtube_dl/extractor/joj.py b/youtube_dl/extractor/joj.py\n--- a/youtube_dl/extractor/joj.py\n+++ b/youtube_dl/extractor/joj.py\n@@ -61,7 +61,7 @@\n \r\n bitrates = self._parse_json(\r\n self._search_regex(\r\n- r'(?s)bitrates\\s*=\\s*({.+?});', webpage, 'bitrates',\r\n+ r'(?s)(?:src|bitrates)\\s*=\\s*({.+?});', webpage, 'bitrates',\r\n default='{}'),\r\n video_id, transform_source=js_to_json, fatal=False)\n", "issue": "Problem with joj.sk\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.11.23*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.11.23**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['https://videoportal.joj.sk/som-mama/epizoda/59636-som-mama', '-v']\r\n[debug] Encodings: locale cp1250, fs mbcs, out cp852, pref cp1250\r\n[debug] youtube-dl version 2018.11.23\r\n[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.17134\r\n[debug] exe versions: ffmpeg N-69406-ga73c411, ffprobe 3.3.2\r\n[debug] Proxy map: {}\r\n[generic] 59636-som-mama: Requesting header\r\nWARNING: Falling back on generic information extractor.\r\n[generic] 59636-som-mama: Downloading webpage\r\n[generic] 59636-som-mama: Extracting information\r\n[download] Downloading playlist: SOM MAMA | Som mama | Rel\u00e1cie A-Z | Videoport\u00e1l - najlep\u0161ie seri\u00e1ly a rel\u00e1cie TV JOJ\r\n[generic] playlist SOM MAMA | Som mama | Rel\u00e1cie A-Z | Videoport\u00e1l - najlep\u0161ie seri\u00e1ly a rel\u00e1cie TV JOJ: Collected 1 video ids (downloading 1 of them)\r\n[download] Downloading video 1 of 1\r\n[Joj] 1a2dokc: Downloading webpage\r\n[Joj] 1a2dokc: Downloading XML\r\nERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp6bh3thhm\\build\\youtube_dl\\YoutubeDL.py\", line 792, in extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp6bh3thhm\\build\\youtube_dl\\extractor\\common.py\", line 508, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp6bh3thhm\\build\\youtube_dl\\extractor\\joj.py\", line 95, in _real_extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp6bh3thhm\\build\\youtube_dl\\extractor\\common.py\", line 1292, in _sort_formats\r\nyoutube_dl.utils.ExtractorError: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n\r\n```\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: https://videoportal.joj.sk/som-mama/epizoda/59636-som-mama\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\nDownloading is not working.\n", "before_files": [{"content": "# coding: utf-8\r\nfrom __future__ import unicode_literals\r\n\r\nimport re\r\n\r\nfrom .common import InfoExtractor\r\nfrom ..compat import compat_str\r\nfrom ..utils import (\r\n int_or_none,\r\n js_to_json,\r\n try_get,\r\n)\r\n\r\n\r\nclass JojIE(InfoExtractor):\r\n _VALID_URL = r'''(?x)\r\n (?:\r\n joj:|\r\n https?://media\\.joj\\.sk/embed/\r\n )\r\n (?P<id>[^/?#^]+)\r\n '''\r\n _TESTS = [{\r\n 'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'info_dict': {\r\n 'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'ext': 'mp4',\r\n 'title': 'NOV\u00c9 B\u00ddVANIE',\r\n 'thumbnail': r're:^https?://.*\\.jpg$',\r\n 'duration': 3118,\r\n }\r\n }, {\r\n 'url': 'https://media.joj.sk/embed/9i1cxv',\r\n 'only_matching': True,\r\n }, {\r\n 'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'only_matching': True,\r\n }, {\r\n 'url': 'joj:9i1cxv',\r\n 'only_matching': True,\r\n }]\r\n\r\n @staticmethod\r\n def _extract_urls(webpage):\r\n return [\r\n mobj.group('url')\r\n for mobj in re.finditer(\r\n r'<iframe\\b[^>]+\\bsrc=([\"\\'])(?P<url>(?:https?:)?//media\\.joj\\.sk/embed/(?:(?!\\1).)+)\\1',\r\n webpage)]\r\n\r\n def _real_extract(self, url):\r\n video_id = self._match_id(url)\r\n\r\n webpage = self._download_webpage(\r\n 'https://media.joj.sk/embed/%s' % video_id, video_id)\r\n\r\n title = self._search_regex(\r\n (r'videoTitle\\s*:\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1',\r\n r'<title>(?P<title>[^<]+)'), webpage, 'title',\r\n default=None, group='title') or self._og_search_title(webpage)\r\n\r\n bitrates = self._parse_json(\r\n self._search_regex(\r\n r'(?s)bitrates\\s*=\\s*({.+?});', webpage, 'bitrates',\r\n default='{}'),\r\n video_id, transform_source=js_to_json, fatal=False)\r\n\r\n formats = []\r\n for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:\r\n if isinstance(format_url, compat_str):\r\n height = self._search_regex(\r\n r'(\\d+)[pP]\\.', format_url, 'height', default=None)\r\n formats.append({\r\n 'url': format_url,\r\n 'format_id': '%sp' % height if height else None,\r\n 'height': int(height),\r\n })\r\n if not formats:\r\n playlist = self._download_xml(\r\n 'https://media.joj.sk/services/Video.php?clip=%s' % video_id,\r\n video_id)\r\n for file_el in playlist.findall('./files/file'):\r\n path = file_el.get('path')\r\n if not path:\r\n continue\r\n format_id = file_el.get('id') or file_el.get('label')\r\n formats.append({\r\n 'url': 'http://n16.joj.sk/storage/%s' % path.replace(\r\n 'dat/', '', 1),\r\n 'format_id': format_id,\r\n 'height': int_or_none(self._search_regex(\r\n r'(\\d+)[pP]', format_id or path, 'height',\r\n default=None)),\r\n })\r\n self._sort_formats(formats)\r\n\r\n thumbnail = self._og_search_thumbnail(webpage)\r\n\r\n duration = int_or_none(self._search_regex(\r\n r'videoDuration\\s*:\\s*(\\d+)', webpage, 'duration', fatal=False))\r\n\r\n return {\r\n 'id': video_id,\r\n 'title': title,\r\n 'thumbnail': thumbnail,\r\n 'duration': duration,\r\n 'formats': formats,\r\n }\r\n", "path": "youtube_dl/extractor/joj.py"}], "after_files": [{"content": "# coding: utf-8\r\nfrom __future__ import unicode_literals\r\n\r\nimport re\r\n\r\nfrom .common import InfoExtractor\r\nfrom ..compat import compat_str\r\nfrom ..utils import (\r\n int_or_none,\r\n js_to_json,\r\n try_get,\r\n)\r\n\r\n\r\nclass JojIE(InfoExtractor):\r\n _VALID_URL = r'''(?x)\r\n (?:\r\n joj:|\r\n https?://media\\.joj\\.sk/embed/\r\n )\r\n (?P<id>[^/?#^]+)\r\n '''\r\n _TESTS = [{\r\n 'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'info_dict': {\r\n 'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'ext': 'mp4',\r\n 'title': 'NOV\u00c9 B\u00ddVANIE',\r\n 'thumbnail': r're:^https?://.*\\.jpg$',\r\n 'duration': 3118,\r\n }\r\n }, {\r\n 'url': 'https://media.joj.sk/embed/9i1cxv',\r\n 'only_matching': True,\r\n }, {\r\n 'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',\r\n 'only_matching': True,\r\n }, {\r\n 'url': 'joj:9i1cxv',\r\n 'only_matching': True,\r\n }]\r\n\r\n @staticmethod\r\n def _extract_urls(webpage):\r\n return [\r\n mobj.group('url')\r\n for mobj in re.finditer(\r\n r'<iframe\\b[^>]+\\bsrc=([\"\\'])(?P<url>(?:https?:)?//media\\.joj\\.sk/embed/(?:(?!\\1).)+)\\1',\r\n webpage)]\r\n\r\n def _real_extract(self, url):\r\n video_id = self._match_id(url)\r\n\r\n webpage = self._download_webpage(\r\n 'https://media.joj.sk/embed/%s' % video_id, video_id)\r\n\r\n title = self._search_regex(\r\n (r'videoTitle\\s*:\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1',\r\n r'<title>(?P<title>[^<]+)'), webpage, 'title',\r\n default=None, group='title') or self._og_search_title(webpage)\r\n\r\n bitrates = self._parse_json(\r\n self._search_regex(\r\n r'(?s)(?:src|bitrates)\\s*=\\s*({.+?});', webpage, 'bitrates',\r\n default='{}'),\r\n video_id, transform_source=js_to_json, fatal=False)\r\n\r\n formats = []\r\n for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:\r\n if isinstance(format_url, compat_str):\r\n height = self._search_regex(\r\n r'(\\d+)[pP]\\.', format_url, 'height', default=None)\r\n formats.append({\r\n 'url': format_url,\r\n 'format_id': '%sp' % height if height else None,\r\n 'height': int(height),\r\n })\r\n if not formats:\r\n playlist = self._download_xml(\r\n 'https://media.joj.sk/services/Video.php?clip=%s' % video_id,\r\n video_id)\r\n for file_el in playlist.findall('./files/file'):\r\n path = file_el.get('path')\r\n if not path:\r\n continue\r\n format_id = file_el.get('id') or file_el.get('label')\r\n formats.append({\r\n 'url': 'http://n16.joj.sk/storage/%s' % path.replace(\r\n 'dat/', '', 1),\r\n 'format_id': format_id,\r\n 'height': int_or_none(self._search_regex(\r\n r'(\\d+)[pP]', format_id or path, 'height',\r\n default=None)),\r\n })\r\n self._sort_formats(formats)\r\n\r\n thumbnail = self._og_search_thumbnail(webpage)\r\n\r\n duration = int_or_none(self._search_regex(\r\n r'videoDuration\\s*:\\s*(\\d+)', webpage, 'duration', fatal=False))\r\n\r\n return {\r\n 'id': video_id,\r\n 'title': title,\r\n 'thumbnail': thumbnail,\r\n 'duration': duration,\r\n 'formats': formats,\r\n }\r\n", "path": "youtube_dl/extractor/joj.py"}]}
2,701
149
gh_patches_debug_18574
rasdani/github-patches
git_diff
urllib3__urllib3-1819
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TestSNI:test_hostname_in_first_request_packet hangs My system supports IPv6 and resolves `localhost` to `127.0.0.1`. When I run the test suite on my system, `TestSNI:test_hostname_in_first_request_packet` hangs. This also fail about 80 more tests with `MaxRetryError`. ### Expected Result 1. The test should pass. 2. The test should never hang. ### Actual Result The test uses `_start_server` to create a `SocketServerThread` that binds on an IPv6 address by default (if possible): ```python 70 HAS_IPV6_AND_DNS = _has_ipv6("localhost") ... 90 USE_IPV6 = HAS_IPV6_AND_DNS ... 101 if self.USE_IPV6: 102 sock = socket.socket(socket.AF_INET6) ``` but the client connection might still resolve to IPv4 address and use it: ```python 61 for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): 62 af, socktype, proto, canonname, sa = res 63 sock = None 64 try: 65 sock = socket.socket(af, socktype, proto) ... 103 if HAS_IPV6: 104 family = socket.AF_UNSPEC ``` The test waits until an event is set, which is never, because the server socket never gets accepted. ### Reproduction Steps ``` $ cat noxfile-single.patch 30a31,32 > "-o", "log_cli=true", > "-k", "test_hostname_in_first_request_packet", $ patch noxfile.py noxfile-single.patch patching file noxfile.py $ nox --sessions test-2.7 nox > Running session test-2.7 nox > Creating virtual environment (virtualenv) using python2.7 in .nox/test-2-7 nox > pip install -r dev-requirements.txt nox > pip install .[socks,secure,brotli] nox > pip --version pip 20.0.2 from /home/user/dev/urllib3/.nox/test-2-7/lib/python2.7/site-packages/pip (python 2.7) nox > python --version Python 2.7.17 nox > python -c import struct; print(struct.calcsize('P') * 8) 64 nox > python -m OpenSSL.debug pyOpenSSL: 19.1.0 cryptography: 2.8 cffi: 1.14.0 cryptography's compiled against OpenSSL: OpenSSL 1.1.1d 10 Sep 2019 cryptography's linked OpenSSL: OpenSSL 1.1.1d 10 Sep 2019 Pythons's OpenSSL: OpenSSL 1.1.1 11 Sep 2018 Python executable: /home/user/dev/urllib3/.nox/test-2-7/bin/python Python version: 2.7.17 (default, Nov 7 2019, 10:07:09) [GCC 7.4.0] Platform: linux2 sys.path: ['', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/home/user/dev/urllib3/.nox/test-2-7/lib/python2.7/site-packages'] nox > coverage run --parallel-mode -m pytest -o log_cli=true -k test_hostname_in_first_request_packet -r a --tb=native --no-success-flaky-report test/ ==================================== test session starts ==================================== platform linux2 -- Python 2.7.17, pytest-4.6.6, py-1.8.1, pluggy-0.13.1 rootdir: /home/user/dev/urllib3, inifile: setup.cfg plugins: flaky-3.6.1, timeout-1.3.3 collected 1342 items / 1339 deselected / 3 selected test/contrib/test_pyopenssl.py::TestSNI::test_hostname_in_first_request_packet [hangs] ``` ### System Information ``` $ git rev-parse --short HEAD eee53a69 $ cat /etc/hosts 127.0.0.1 localhost 127.0.1.1 ubuntu # The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ``` ### Fix I can add a timeout to the test, but that's a solution to the root problem. I suggest that `ServerSocketThread` treat a system where `localhost` does not resolve to `::1` as having insane IPv6 configuration and use the current IPv4 fallback mechanism. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dummyserver/server.py` Content: ``` 1 #!/usr/bin/env python 2 3 """ 4 Dummy server used for unit testing. 5 """ 6 from __future__ import print_function 7 8 import logging 9 import os 10 import random 11 import string 12 import sys 13 import threading 14 import socket 15 import warnings 16 import ssl 17 from datetime import datetime 18 19 from urllib3.exceptions import HTTPWarning 20 21 from cryptography.hazmat.backends import default_backend 22 from cryptography.hazmat.primitives import serialization 23 import tornado.httpserver 24 import tornado.ioloop 25 import tornado.netutil 26 import tornado.web 27 import trustme 28 29 30 log = logging.getLogger(__name__) 31 32 CERTS_PATH = os.path.join(os.path.dirname(__file__), "certs") 33 DEFAULT_CERTS = { 34 "certfile": os.path.join(CERTS_PATH, "server.crt"), 35 "keyfile": os.path.join(CERTS_PATH, "server.key"), 36 "cert_reqs": ssl.CERT_OPTIONAL, 37 "ca_certs": os.path.join(CERTS_PATH, "cacert.pem"), 38 } 39 DEFAULT_CA = os.path.join(CERTS_PATH, "cacert.pem") 40 DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key") 41 42 43 def _has_ipv6(host): 44 """ Returns True if the system can bind an IPv6 address. """ 45 sock = None 46 has_ipv6 = False 47 48 if socket.has_ipv6: 49 # has_ipv6 returns true if cPython was compiled with IPv6 support. 50 # It does not tell us if the system has IPv6 support enabled. To 51 # determine that we must bind to an IPv6 address. 52 # https://github.com/urllib3/urllib3/pull/611 53 # https://bugs.python.org/issue658327 54 try: 55 sock = socket.socket(socket.AF_INET6) 56 sock.bind((host, 0)) 57 has_ipv6 = True 58 except Exception: 59 pass 60 61 if sock: 62 sock.close() 63 return has_ipv6 64 65 66 # Some systems may have IPv6 support but DNS may not be configured 67 # properly. We can not count that localhost will resolve to ::1 on all 68 # systems. See https://github.com/urllib3/urllib3/pull/611 and 69 # https://bugs.python.org/issue18792 70 HAS_IPV6_AND_DNS = _has_ipv6("localhost") 71 HAS_IPV6 = _has_ipv6("::1") 72 73 74 # Different types of servers we have: 75 76 77 class NoIPv6Warning(HTTPWarning): 78 "IPv6 is not available" 79 pass 80 81 82 class SocketServerThread(threading.Thread): 83 """ 84 :param socket_handler: Callable which receives a socket argument for one 85 request. 86 :param ready_event: Event which gets set when the socket handler is 87 ready to receive requests. 88 """ 89 90 USE_IPV6 = HAS_IPV6_AND_DNS 91 92 def __init__(self, socket_handler, host="localhost", port=8081, ready_event=None): 93 threading.Thread.__init__(self) 94 self.daemon = True 95 96 self.socket_handler = socket_handler 97 self.host = host 98 self.ready_event = ready_event 99 100 def _start_server(self): 101 if self.USE_IPV6: 102 sock = socket.socket(socket.AF_INET6) 103 else: 104 warnings.warn("No IPv6 support. Falling back to IPv4.", NoIPv6Warning) 105 sock = socket.socket(socket.AF_INET) 106 if sys.platform != "win32": 107 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 108 sock.bind((self.host, 0)) 109 self.port = sock.getsockname()[1] 110 111 # Once listen() returns, the server socket is ready 112 sock.listen(1) 113 114 if self.ready_event: 115 self.ready_event.set() 116 117 self.socket_handler(sock) 118 sock.close() 119 120 def run(self): 121 self.server = self._start_server() 122 123 124 def run_tornado_app(app, io_loop, certs, scheme, host): 125 assert io_loop == tornado.ioloop.IOLoop.current() 126 127 # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll 128 # just construct the datetime object directly. 129 app.last_req = datetime(1970, 1, 1) 130 131 if scheme == "https": 132 http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs) 133 else: 134 http_server = tornado.httpserver.HTTPServer(app) 135 136 sockets = tornado.netutil.bind_sockets(None, address=host) 137 port = sockets[0].getsockname()[1] 138 http_server.add_sockets(sockets) 139 return http_server, port 140 141 142 def run_loop_in_thread(io_loop): 143 t = threading.Thread(target=io_loop.start) 144 t.start() 145 return t 146 147 148 def get_unreachable_address(): 149 while True: 150 host = "".join(random.choice(string.ascii_lowercase) for _ in range(60)) 151 sockaddr = (host, 54321) 152 153 # check if we are really "lucky" and hit an actual server 154 try: 155 s = socket.create_connection(sockaddr) 156 except socket.error: 157 return sockaddr 158 else: 159 s.close() 160 161 162 if __name__ == "__main__": 163 # For debugging dummyserver itself - python -m dummyserver.server 164 from .testcase import TestingApp 165 166 host = "127.0.0.1" 167 168 io_loop = tornado.ioloop.IOLoop.current() 169 app = tornado.web.Application([(r".*", TestingApp)]) 170 server, port = run_tornado_app(app, io_loop, None, "http", host) 171 server_thread = run_loop_in_thread(io_loop) 172 173 print("Listening on http://{host}:{port}".format(host=host, port=port)) 174 175 176 def encrypt_key_pem(private_key_pem, password): 177 private_key = serialization.load_pem_private_key( 178 private_key_pem.bytes(), password=None, backend=default_backend() 179 ) 180 encrypted_key = private_key.private_bytes( 181 serialization.Encoding.PEM, 182 serialization.PrivateFormat.TraditionalOpenSSL, 183 serialization.BestAvailableEncryption(password), 184 ) 185 return trustme.Blob(encrypted_key) 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -40,6 +40,20 @@ DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key") +def _resolves_to_ipv6(host): + """ Returns True if the system resolves host to an IPv6 address by default. """ + resolves_to_ipv6 = False + try: + for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC): + af, _, _, _, _ = res + if af == socket.AF_INET6: + resolves_to_ipv6 = True + except socket.gaierror: + pass + + return resolves_to_ipv6 + + def _has_ipv6(host): """ Returns True if the system can bind an IPv6 address. """ sock = None @@ -54,7 +68,7 @@ try: sock = socket.socket(socket.AF_INET6) sock.bind((host, 0)) - has_ipv6 = True + has_ipv6 = _resolves_to_ipv6("localhost") except Exception: pass
{"golden_diff": "diff --git a/dummyserver/server.py b/dummyserver/server.py\n--- a/dummyserver/server.py\n+++ b/dummyserver/server.py\n@@ -40,6 +40,20 @@\n DEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n \n \n+def _resolves_to_ipv6(host):\n+ \"\"\" Returns True if the system resolves host to an IPv6 address by default. \"\"\"\n+ resolves_to_ipv6 = False\n+ try:\n+ for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n+ af, _, _, _, _ = res\n+ if af == socket.AF_INET6:\n+ resolves_to_ipv6 = True\n+ except socket.gaierror:\n+ pass\n+\n+ return resolves_to_ipv6\n+\n+\n def _has_ipv6(host):\n \"\"\" Returns True if the system can bind an IPv6 address. \"\"\"\n sock = None\n@@ -54,7 +68,7 @@\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n- has_ipv6 = True\n+ has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n", "issue": "TestSNI:test_hostname_in_first_request_packet hangs\nMy system supports IPv6 and resolves `localhost` to `127.0.0.1`.\r\nWhen I run the test suite on my system, `TestSNI:test_hostname_in_first_request_packet` hangs.\r\nThis also fail about 80 more tests with `MaxRetryError`.\r\n\r\n### Expected Result\r\n1. The test should pass.\r\n2. The test should never hang.\r\n\r\n### Actual Result\r\nThe test uses `_start_server` to create a `SocketServerThread` that binds on an IPv6 address by default (if possible):\r\n```python\r\n 70 HAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\r\n...\r\n 90 USE_IPV6 = HAS_IPV6_AND_DNS\r\n...\r\n101 if self.USE_IPV6:\r\n102 sock = socket.socket(socket.AF_INET6)\r\n```\r\nbut the client connection might still resolve to IPv4 address and use it:\r\n```python\r\n 61 for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\r\n 62 af, socktype, proto, canonname, sa = res\r\n 63 sock = None\r\n 64 try:\r\n 65 sock = socket.socket(af, socktype, proto)\r\n...\r\n103 if HAS_IPV6:\r\n104 family = socket.AF_UNSPEC\r\n```\r\nThe test waits until an event is set, which is never, because the server socket never gets accepted.\r\n\r\n### Reproduction Steps\r\n```\r\n$ cat noxfile-single.patch\r\n30a31,32\r\n> \"-o\", \"log_cli=true\",\r\n> \"-k\", \"test_hostname_in_first_request_packet\",\r\n$ patch noxfile.py noxfile-single.patch\r\npatching file noxfile.py\r\n$ nox --sessions test-2.7\r\nnox > Running session test-2.7\r\nnox > Creating virtual environment (virtualenv) using python2.7 in .nox/test-2-7\r\nnox > pip install -r dev-requirements.txt\r\nnox > pip install .[socks,secure,brotli]\r\nnox > pip --version\r\npip 20.0.2 from /home/user/dev/urllib3/.nox/test-2-7/lib/python2.7/site-packages/pip (python 2.7)\r\nnox > python --version\r\nPython 2.7.17\r\nnox > python -c import struct; print(struct.calcsize('P') * 8)\r\n64\r\nnox > python -m OpenSSL.debug\r\npyOpenSSL: 19.1.0\r\ncryptography: 2.8\r\ncffi: 1.14.0\r\ncryptography's compiled against OpenSSL: OpenSSL 1.1.1d 10 Sep 2019\r\ncryptography's linked OpenSSL: OpenSSL 1.1.1d 10 Sep 2019\r\nPythons's OpenSSL: OpenSSL 1.1.1 11 Sep 2018\r\nPython executable: /home/user/dev/urllib3/.nox/test-2-7/bin/python\r\nPython version: 2.7.17 (default, Nov 7 2019, 10:07:09)\r\n[GCC 7.4.0]\r\nPlatform: linux2\r\nsys.path: ['', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/home/user/dev/urllib3/.nox/test-2-7/lib/python2.7/site-packages']\r\nnox > coverage run --parallel-mode -m pytest -o log_cli=true -k test_hostname_in_first_request_packet -r a --tb=native --no-success-flaky-report test/\r\n==================================== test session starts ====================================\r\nplatform linux2 -- Python 2.7.17, pytest-4.6.6, py-1.8.1, pluggy-0.13.1\r\nrootdir: /home/user/dev/urllib3, inifile: setup.cfg\r\nplugins: flaky-3.6.1, timeout-1.3.3\r\ncollected 1342 items / 1339 deselected / 3 selected\r\n\r\ntest/contrib/test_pyopenssl.py::TestSNI::test_hostname_in_first_request_packet\r\n[hangs]\r\n```\r\n\r\n### System Information\r\n```\r\n$ git rev-parse --short HEAD\r\neee53a69\r\n$ cat /etc/hosts\r\n127.0.0.1 localhost\r\n127.0.1.1 ubuntu\r\n\r\n# The following lines are desirable for IPv6 capable hosts\r\n::1 ip6-localhost ip6-loopback\r\nfe00::0 ip6-localnet\r\nff00::0 ip6-mcastprefix\r\nff02::1 ip6-allnodes\r\nff02::2 ip6-allrouters\r\n```\r\n\r\n### Fix\r\nI can add a timeout to the test, but that's a solution to the root problem.\r\nI suggest that `ServerSocketThread` treat a system where `localhost` does not resolve to `::1` as having insane IPv6 configuration and use the current IPv4 fallback mechanism.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport random\nimport string\nimport sys\nimport threading\nimport socket\nimport warnings\nimport ssl\nfrom datetime import datetime\n\nfrom urllib3.exceptions import HTTPWarning\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.netutil\nimport tornado.web\nimport trustme\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _has_ipv6(host):\n \"\"\" Returns True if the system can bind an IPv6 address. \"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = True\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n pass\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(self, socket_handler, host=\"localhost\", port=8081, ready_event=None):\n threading.Thread.__init__(self)\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self):\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n sock.close()\n\n def run(self):\n self.server = self._start_server()\n\n\ndef run_tornado_app(app, io_loop, certs, scheme, host):\n assert io_loop == tornado.ioloop.IOLoop.current()\n\n # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll\n # just construct the datetime object directly.\n app.last_req = datetime(1970, 1, 1)\n\n if scheme == \"https\":\n http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs)\n else:\n http_server = tornado.httpserver.HTTPServer(app)\n\n sockets = tornado.netutil.bind_sockets(None, address=host)\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n\n\ndef run_loop_in_thread(io_loop):\n t = threading.Thread(target=io_loop.start)\n t.start()\n return t\n\n\ndef get_unreachable_address():\n while True:\n host = \"\".join(random.choice(string.ascii_lowercase) for _ in range(60))\n sockaddr = (host, 54321)\n\n # check if we are really \"lucky\" and hit an actual server\n try:\n s = socket.create_connection(sockaddr)\n except socket.error:\n return sockaddr\n else:\n s.close()\n\n\nif __name__ == \"__main__\":\n # For debugging dummyserver itself - python -m dummyserver.server\n from .testcase import TestingApp\n\n host = \"127.0.0.1\"\n\n io_loop = tornado.ioloop.IOLoop.current()\n app = tornado.web.Application([(r\".*\", TestingApp)])\n server, port = run_tornado_app(app, io_loop, None, \"http\", host)\n server_thread = run_loop_in_thread(io_loop)\n\n print(\"Listening on http://{host}:{port}\".format(host=host, port=port))\n\n\ndef encrypt_key_pem(private_key_pem, password):\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n", "path": "dummyserver/server.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport random\nimport string\nimport sys\nimport threading\nimport socket\nimport warnings\nimport ssl\nfrom datetime import datetime\n\nfrom urllib3.exceptions import HTTPWarning\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.netutil\nimport tornado.web\nimport trustme\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _resolves_to_ipv6(host):\n \"\"\" Returns True if the system resolves host to an IPv6 address by default. \"\"\"\n resolves_to_ipv6 = False\n try:\n for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n af, _, _, _, _ = res\n if af == socket.AF_INET6:\n resolves_to_ipv6 = True\n except socket.gaierror:\n pass\n\n return resolves_to_ipv6\n\n\ndef _has_ipv6(host):\n \"\"\" Returns True if the system can bind an IPv6 address. \"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n pass\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(self, socket_handler, host=\"localhost\", port=8081, ready_event=None):\n threading.Thread.__init__(self)\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self):\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n sock.close()\n\n def run(self):\n self.server = self._start_server()\n\n\ndef run_tornado_app(app, io_loop, certs, scheme, host):\n assert io_loop == tornado.ioloop.IOLoop.current()\n\n # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll\n # just construct the datetime object directly.\n app.last_req = datetime(1970, 1, 1)\n\n if scheme == \"https\":\n http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs)\n else:\n http_server = tornado.httpserver.HTTPServer(app)\n\n sockets = tornado.netutil.bind_sockets(None, address=host)\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n\n\ndef run_loop_in_thread(io_loop):\n t = threading.Thread(target=io_loop.start)\n t.start()\n return t\n\n\ndef get_unreachable_address():\n while True:\n host = \"\".join(random.choice(string.ascii_lowercase) for _ in range(60))\n sockaddr = (host, 54321)\n\n # check if we are really \"lucky\" and hit an actual server\n try:\n s = socket.create_connection(sockaddr)\n except socket.error:\n return sockaddr\n else:\n s.close()\n\n\nif __name__ == \"__main__\":\n # For debugging dummyserver itself - python -m dummyserver.server\n from .testcase import TestingApp\n\n host = \"127.0.0.1\"\n\n io_loop = tornado.ioloop.IOLoop.current()\n app = tornado.web.Application([(r\".*\", TestingApp)])\n server, port = run_tornado_app(app, io_loop, None, \"http\", host)\n server_thread = run_loop_in_thread(io_loop)\n\n print(\"Listening on http://{host}:{port}\".format(host=host, port=port))\n\n\ndef encrypt_key_pem(private_key_pem, password):\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n", "path": "dummyserver/server.py"}]}
3,215
267
gh_patches_debug_31586
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-1405
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Hamiltonians created via addition produce incorrect results with `ExpvalCost(optimize=True)` Good afternoon, I was having problem with vqe when I set the flag: optmize = True . I noticed that there were errors when entering Hamiltonians of the form Z1@Z2 + Z2@Z1. We know it is the same as 2.Z1.Z2 but the optimizer seems to have some kind of problem with this kind of Hamiltonians (vqe with optimize = False works correctly in this situation). Although by preprocessing the Hamiltonian we avoid the problem, I was writing to take the problem into account. Greetings! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pennylane/grouping/group_observables.py` Content: ``` 1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This module contains the high-level Pauli-word-partitioning functionality used in measurement optimization. 16 """ 17 18 from pennylane.wires import Wires 19 from pennylane.grouping.utils import ( 20 observables_to_binary_matrix, 21 binary_to_pauli, 22 are_identical_pauli_words, 23 qwc_complement_adj_matrix, 24 ) 25 from pennylane.grouping.graph_colouring import largest_first, recursive_largest_first 26 import numpy as np 27 28 GROUPING_TYPES = frozenset(["qwc", "commuting", "anticommuting"]) 29 GRAPH_COLOURING_METHODS = {"lf": largest_first, "rlf": recursive_largest_first} 30 31 32 class PauliGroupingStrategy: # pylint: disable=too-many-instance-attributes 33 """ 34 Class for partitioning a list of Pauli words according to some binary symmetric relation. 35 36 Partitions are defined by the binary symmetric relation of interest, e.g., all Pauli words in a 37 partition being mutually commuting. The partitioning is accomplished by formulating the list of 38 Pauli words as a graph where nodes represent Pauli words and edges between nodes denotes that 39 the two corresponding Pauli words satisfy the symmetric binary relation. 40 41 Obtaining the fewest number of partitions such that all Pauli terms within a partition mutually 42 satisfy the binary relation can then be accomplished by finding a partition of the graph nodes 43 such that each partition is a fully connected subgraph (a "clique"). The problem of finding the 44 optimal partitioning, i.e., the fewest number of cliques, is the minimum clique cover (MCC) 45 problem. The solution of MCC may be found by graph colouring on the complementary graph. Both 46 MCC and graph colouring are NP-Hard, so heuristic graph colouring algorithms are employed to 47 find approximate solutions in polynomial time. 48 49 Args: 50 observables (list[Observable]): a list of Pauli words to be partitioned according to a 51 grouping strategy 52 grouping_type (str): the binary relation used to define partitions of 53 the Pauli words, can be ``'qwc'`` (qubit-wise commuting), ``'commuting'``, or 54 ``'anticommuting'``. 55 graph_colourer (str): the heuristic algorithm to employ for graph 56 colouring, can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive 57 Largest First) 58 59 Raises: 60 ValueError: if arguments specified for ``grouping_type`` or 61 ``graph_colourer`` are not recognized 62 """ 63 64 def __init__(self, observables, grouping_type="qwc", graph_colourer="rlf"): 65 66 if grouping_type.lower() not in GROUPING_TYPES: 67 raise ValueError( 68 "Grouping type must be one of: {}, instead got {}.".format( 69 GROUPING_TYPES, grouping_type 70 ) 71 ) 72 73 self.grouping_type = grouping_type.lower() 74 75 if graph_colourer.lower() not in GRAPH_COLOURING_METHODS.keys(): 76 raise ValueError( 77 "Graph colouring method must be one of: {}, instead got {}.".format( 78 list(GRAPH_COLOURING_METHODS.keys()), graph_colourer 79 ) 80 ) 81 82 self.graph_colourer = GRAPH_COLOURING_METHODS[graph_colourer.lower()] 83 self.observables = observables 84 self._wire_map = None 85 self._n_qubits = None 86 self.binary_observables = None 87 self.adj_matrix = None 88 self.grouped_paulis = None 89 90 def binary_repr(self, n_qubits=None, wire_map=None): 91 """Converts the list of Pauli words to a binary matrix. 92 93 Args: 94 n_qubits (int): number of qubits to specify dimension of binary vector representation 95 wire_map (dict): dictionary containing all wire labels used in the Pauli word as keys, 96 and unique integer labels as their values 97 98 Returns: 99 array[int]: a column matrix of the Pauli words in binary vector representation 100 """ 101 102 if wire_map is None: 103 self._wire_map = { 104 wire: c 105 for c, wire in enumerate( 106 Wires.all_wires([obs.wires for obs in self.observables]).tolist() 107 ) 108 } 109 110 else: 111 self._wire_map = wire_map 112 113 self._n_qubits = n_qubits 114 115 return observables_to_binary_matrix(self.observables, n_qubits, self._wire_map) 116 117 def complement_adj_matrix_for_operator(self): 118 """Constructs the adjacency matrix for the complement of the Pauli graph. 119 120 The adjacency matrix for an undirected graph of N vertices is an N by N symmetric binary 121 matrix, where matrix elements of 1 denote an edge, and matrix elements of 0 denote no edge. 122 123 Returns: 124 array[int]: the square and symmetric adjacency matrix 125 """ 126 127 if self.binary_observables is None: 128 self.binary_observables = self.binary_repr() 129 130 n_qubits = int(np.shape(self.binary_observables)[1] / 2) 131 132 if self.grouping_type == "qwc": 133 adj = qwc_complement_adj_matrix(self.binary_observables) 134 135 elif self.grouping_type in frozenset(["commuting", "anticommuting"]): 136 symplectic_form = np.block( 137 [ 138 [np.zeros((n_qubits, n_qubits)), np.eye(n_qubits)], 139 [np.eye(n_qubits), np.zeros((n_qubits, n_qubits))], 140 ] 141 ) 142 mat_prod = ( 143 self.binary_observables @ symplectic_form @ np.transpose(self.binary_observables) 144 ) 145 146 if self.grouping_type == "commuting": 147 148 adj = mat_prod % 2 149 150 elif self.grouping_type == "anticommuting": 151 152 adj = (mat_prod + 1) % 2 153 np.fill_diagonal(adj, 0) 154 155 return adj 156 157 def colour_pauli_graph(self): 158 """ 159 Runs the graph colouring heuristic algorithm to obtain the partitioned Pauli words. 160 161 Returns: 162 list[list[Observable]]: a list of the obtained groupings. Each grouping is itself a 163 list of Pauli word ``Observable`` instances 164 """ 165 166 if self.adj_matrix is None: 167 self.adj_matrix = self.complement_adj_matrix_for_operator() 168 169 coloured_binary_paulis = self.graph_colourer(self.binary_observables, self.adj_matrix) 170 171 self.grouped_paulis = [ 172 [binary_to_pauli(pauli_word, wire_map=self._wire_map) for pauli_word in grouping] 173 for grouping in coloured_binary_paulis.values() 174 ] 175 176 return self.grouped_paulis 177 178 179 def group_observables(observables, coefficients=None, grouping_type="qwc", method="rlf"): 180 """Partitions a list of observables (Pauli operations and tensor products thereof) into 181 groupings according to a binary relation (qubit-wise commuting, fully-commuting, or 182 anticommuting). 183 184 Partitions are found by 1) mapping the list of observables to a graph where vertices represent 185 observables and edges encode the binary relation, then 2) solving minimum clique cover for the 186 graph using graph-colouring heuristic algorithms. 187 188 Args: 189 observables (list[Observable]): a list of Pauli word ``Observable`` instances (Pauli 190 operation instances and :class:`~.Tensor` instances thereof) 191 coefficients (list[float]): A list of float coefficients. If not specified, 192 output ``partitioned_coeffs`` is not returned. 193 grouping_type (str): The type of binary relation between Pauli words. 194 Can be ``'qwc'``, ``'commuting'``, or ``'anticommuting'``. 195 method (str): the graph coloring heuristic to use in solving minimum clique cover, which 196 can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive Largest First) 197 198 Returns: 199 tuple: 200 201 * list[list[Observable]]: A list of the obtained groupings. Each grouping 202 is itself a list of Pauli word ``Observable`` instances. 203 * list[list[float]]: A list of coefficient groupings. Each coefficient 204 grouping is itself a list of the grouping's corresponding coefficients. This is only 205 output if coefficients are specified. 206 207 Raises: 208 IndexError: if the input list of coefficients is not of the same length as the input list 209 of Pauli words 210 211 **Example** 212 213 >>> obs = [qml.PauliY(0), qml.PauliX(0) @ qml.PauliX(1), qml.PauliZ(1)] 214 >>> coeffs = [1.43, 4.21, 0.97] 215 >>> obs_groupings, coeffs_groupings = group_observables(obs, coeffs, 'anticommuting', 'lf') 216 >>> obs_groupings 217 [[PauliZ(wires=[1]), PauliX(wires=[0]) @ PauliX(wires=[1])], 218 [PauliY(wires=[0])]] 219 >>> coeffs_groupings 220 [[0.97, 4.21], [1.43]] 221 """ 222 223 if coefficients is not None: 224 if len(coefficients) != len(observables): 225 raise IndexError( 226 "The coefficients list must be the same length as the observables list." 227 ) 228 229 pauli_grouping = PauliGroupingStrategy( 230 observables, grouping_type=grouping_type, graph_colourer=method 231 ) 232 partitioned_paulis = pauli_grouping.colour_pauli_graph() 233 234 if coefficients is None: 235 return partitioned_paulis 236 237 partitioned_coeffs = [[0] * len(g) for g in partitioned_paulis] 238 239 for i, partition in enumerate(partitioned_paulis): 240 for j, pauli_word in enumerate(partition): 241 for observable in observables: 242 if are_identical_pauli_words(pauli_word, observable): 243 partitioned_coeffs[i][j] = coefficients[observables.index(observable)] 244 break 245 246 return partitioned_paulis, partitioned_coeffs 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pennylane/grouping/group_observables.py b/pennylane/grouping/group_observables.py --- a/pennylane/grouping/group_observables.py +++ b/pennylane/grouping/group_observables.py @@ -15,15 +15,18 @@ This module contains the high-level Pauli-word-partitioning functionality used in measurement optimization. """ -from pennylane.wires import Wires +from copy import copy + +import numpy as np + +from pennylane.grouping.graph_colouring import largest_first, recursive_largest_first from pennylane.grouping.utils import ( - observables_to_binary_matrix, - binary_to_pauli, are_identical_pauli_words, + binary_to_pauli, + observables_to_binary_matrix, qwc_complement_adj_matrix, ) -from pennylane.grouping.graph_colouring import largest_first, recursive_largest_first -import numpy as np +from pennylane.wires import Wires GROUPING_TYPES = frozenset(["qwc", "commuting", "anticommuting"]) GRAPH_COLOURING_METHODS = {"lf": largest_first, "rlf": recursive_largest_first} @@ -236,11 +239,16 @@ partitioned_coeffs = [[0] * len(g) for g in partitioned_paulis] + observables = copy(observables) + coefficients = copy(coefficients) for i, partition in enumerate(partitioned_paulis): for j, pauli_word in enumerate(partition): for observable in observables: if are_identical_pauli_words(pauli_word, observable): - partitioned_coeffs[i][j] = coefficients[observables.index(observable)] + ind = observables.index(observable) + partitioned_coeffs[i][j] = coefficients[ind] + observables.pop(ind) + coefficients.pop(ind) break return partitioned_paulis, partitioned_coeffs
{"golden_diff": "diff --git a/pennylane/grouping/group_observables.py b/pennylane/grouping/group_observables.py\n--- a/pennylane/grouping/group_observables.py\n+++ b/pennylane/grouping/group_observables.py\n@@ -15,15 +15,18 @@\n This module contains the high-level Pauli-word-partitioning functionality used in measurement optimization.\n \"\"\"\n \n-from pennylane.wires import Wires\n+from copy import copy\n+\n+import numpy as np\n+\n+from pennylane.grouping.graph_colouring import largest_first, recursive_largest_first\n from pennylane.grouping.utils import (\n- observables_to_binary_matrix,\n- binary_to_pauli,\n are_identical_pauli_words,\n+ binary_to_pauli,\n+ observables_to_binary_matrix,\n qwc_complement_adj_matrix,\n )\n-from pennylane.grouping.graph_colouring import largest_first, recursive_largest_first\n-import numpy as np\n+from pennylane.wires import Wires\n \n GROUPING_TYPES = frozenset([\"qwc\", \"commuting\", \"anticommuting\"])\n GRAPH_COLOURING_METHODS = {\"lf\": largest_first, \"rlf\": recursive_largest_first}\n@@ -236,11 +239,16 @@\n \n partitioned_coeffs = [[0] * len(g) for g in partitioned_paulis]\n \n+ observables = copy(observables)\n+ coefficients = copy(coefficients)\n for i, partition in enumerate(partitioned_paulis):\n for j, pauli_word in enumerate(partition):\n for observable in observables:\n if are_identical_pauli_words(pauli_word, observable):\n- partitioned_coeffs[i][j] = coefficients[observables.index(observable)]\n+ ind = observables.index(observable)\n+ partitioned_coeffs[i][j] = coefficients[ind]\n+ observables.pop(ind)\n+ coefficients.pop(ind)\n break\n \n return partitioned_paulis, partitioned_coeffs\n", "issue": "Hamiltonians created via addition produce incorrect results with `ExpvalCost(optimize=True)`\nGood afternoon, \r\nI was having problem with vqe when I set the flag: optmize = True . I noticed that there were errors when entering Hamiltonians of the form Z1@Z2 + Z2@Z1. We know it is the same as 2.Z1.Z2 but the optimizer seems to have some kind of problem with this kind of Hamiltonians (vqe with optimize = False works correctly in this situation).\r\nAlthough by preprocessing the Hamiltonian we avoid the problem, I was writing to take the problem into account.\r\nGreetings!\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the high-level Pauli-word-partitioning functionality used in measurement optimization.\n\"\"\"\n\nfrom pennylane.wires import Wires\nfrom pennylane.grouping.utils import (\n observables_to_binary_matrix,\n binary_to_pauli,\n are_identical_pauli_words,\n qwc_complement_adj_matrix,\n)\nfrom pennylane.grouping.graph_colouring import largest_first, recursive_largest_first\nimport numpy as np\n\nGROUPING_TYPES = frozenset([\"qwc\", \"commuting\", \"anticommuting\"])\nGRAPH_COLOURING_METHODS = {\"lf\": largest_first, \"rlf\": recursive_largest_first}\n\n\nclass PauliGroupingStrategy: # pylint: disable=too-many-instance-attributes\n \"\"\"\n Class for partitioning a list of Pauli words according to some binary symmetric relation.\n\n Partitions are defined by the binary symmetric relation of interest, e.g., all Pauli words in a\n partition being mutually commuting. The partitioning is accomplished by formulating the list of\n Pauli words as a graph where nodes represent Pauli words and edges between nodes denotes that\n the two corresponding Pauli words satisfy the symmetric binary relation.\n\n Obtaining the fewest number of partitions such that all Pauli terms within a partition mutually\n satisfy the binary relation can then be accomplished by finding a partition of the graph nodes\n such that each partition is a fully connected subgraph (a \"clique\"). The problem of finding the\n optimal partitioning, i.e., the fewest number of cliques, is the minimum clique cover (MCC)\n problem. The solution of MCC may be found by graph colouring on the complementary graph. Both\n MCC and graph colouring are NP-Hard, so heuristic graph colouring algorithms are employed to\n find approximate solutions in polynomial time.\n\n Args:\n observables (list[Observable]): a list of Pauli words to be partitioned according to a\n grouping strategy\n grouping_type (str): the binary relation used to define partitions of\n the Pauli words, can be ``'qwc'`` (qubit-wise commuting), ``'commuting'``, or\n ``'anticommuting'``.\n graph_colourer (str): the heuristic algorithm to employ for graph\n colouring, can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive\n Largest First)\n\n Raises:\n ValueError: if arguments specified for ``grouping_type`` or\n ``graph_colourer`` are not recognized\n \"\"\"\n\n def __init__(self, observables, grouping_type=\"qwc\", graph_colourer=\"rlf\"):\n\n if grouping_type.lower() not in GROUPING_TYPES:\n raise ValueError(\n \"Grouping type must be one of: {}, instead got {}.\".format(\n GROUPING_TYPES, grouping_type\n )\n )\n\n self.grouping_type = grouping_type.lower()\n\n if graph_colourer.lower() not in GRAPH_COLOURING_METHODS.keys():\n raise ValueError(\n \"Graph colouring method must be one of: {}, instead got {}.\".format(\n list(GRAPH_COLOURING_METHODS.keys()), graph_colourer\n )\n )\n\n self.graph_colourer = GRAPH_COLOURING_METHODS[graph_colourer.lower()]\n self.observables = observables\n self._wire_map = None\n self._n_qubits = None\n self.binary_observables = None\n self.adj_matrix = None\n self.grouped_paulis = None\n\n def binary_repr(self, n_qubits=None, wire_map=None):\n \"\"\"Converts the list of Pauli words to a binary matrix.\n\n Args:\n n_qubits (int): number of qubits to specify dimension of binary vector representation\n wire_map (dict): dictionary containing all wire labels used in the Pauli word as keys,\n and unique integer labels as their values\n\n Returns:\n array[int]: a column matrix of the Pauli words in binary vector representation\n \"\"\"\n\n if wire_map is None:\n self._wire_map = {\n wire: c\n for c, wire in enumerate(\n Wires.all_wires([obs.wires for obs in self.observables]).tolist()\n )\n }\n\n else:\n self._wire_map = wire_map\n\n self._n_qubits = n_qubits\n\n return observables_to_binary_matrix(self.observables, n_qubits, self._wire_map)\n\n def complement_adj_matrix_for_operator(self):\n \"\"\"Constructs the adjacency matrix for the complement of the Pauli graph.\n\n The adjacency matrix for an undirected graph of N vertices is an N by N symmetric binary\n matrix, where matrix elements of 1 denote an edge, and matrix elements of 0 denote no edge.\n\n Returns:\n array[int]: the square and symmetric adjacency matrix\n \"\"\"\n\n if self.binary_observables is None:\n self.binary_observables = self.binary_repr()\n\n n_qubits = int(np.shape(self.binary_observables)[1] / 2)\n\n if self.grouping_type == \"qwc\":\n adj = qwc_complement_adj_matrix(self.binary_observables)\n\n elif self.grouping_type in frozenset([\"commuting\", \"anticommuting\"]):\n symplectic_form = np.block(\n [\n [np.zeros((n_qubits, n_qubits)), np.eye(n_qubits)],\n [np.eye(n_qubits), np.zeros((n_qubits, n_qubits))],\n ]\n )\n mat_prod = (\n self.binary_observables @ symplectic_form @ np.transpose(self.binary_observables)\n )\n\n if self.grouping_type == \"commuting\":\n\n adj = mat_prod % 2\n\n elif self.grouping_type == \"anticommuting\":\n\n adj = (mat_prod + 1) % 2\n np.fill_diagonal(adj, 0)\n\n return adj\n\n def colour_pauli_graph(self):\n \"\"\"\n Runs the graph colouring heuristic algorithm to obtain the partitioned Pauli words.\n\n Returns:\n list[list[Observable]]: a list of the obtained groupings. Each grouping is itself a\n list of Pauli word ``Observable`` instances\n \"\"\"\n\n if self.adj_matrix is None:\n self.adj_matrix = self.complement_adj_matrix_for_operator()\n\n coloured_binary_paulis = self.graph_colourer(self.binary_observables, self.adj_matrix)\n\n self.grouped_paulis = [\n [binary_to_pauli(pauli_word, wire_map=self._wire_map) for pauli_word in grouping]\n for grouping in coloured_binary_paulis.values()\n ]\n\n return self.grouped_paulis\n\n\ndef group_observables(observables, coefficients=None, grouping_type=\"qwc\", method=\"rlf\"):\n \"\"\"Partitions a list of observables (Pauli operations and tensor products thereof) into\n groupings according to a binary relation (qubit-wise commuting, fully-commuting, or\n anticommuting).\n\n Partitions are found by 1) mapping the list of observables to a graph where vertices represent\n observables and edges encode the binary relation, then 2) solving minimum clique cover for the\n graph using graph-colouring heuristic algorithms.\n\n Args:\n observables (list[Observable]): a list of Pauli word ``Observable`` instances (Pauli\n operation instances and :class:`~.Tensor` instances thereof)\n coefficients (list[float]): A list of float coefficients. If not specified,\n output ``partitioned_coeffs`` is not returned.\n grouping_type (str): The type of binary relation between Pauli words.\n Can be ``'qwc'``, ``'commuting'``, or ``'anticommuting'``.\n method (str): the graph coloring heuristic to use in solving minimum clique cover, which\n can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive Largest First)\n\n Returns:\n tuple:\n\n * list[list[Observable]]: A list of the obtained groupings. Each grouping\n is itself a list of Pauli word ``Observable`` instances.\n * list[list[float]]: A list of coefficient groupings. Each coefficient\n grouping is itself a list of the grouping's corresponding coefficients. This is only\n output if coefficients are specified.\n\n Raises:\n IndexError: if the input list of coefficients is not of the same length as the input list\n of Pauli words\n\n **Example**\n\n >>> obs = [qml.PauliY(0), qml.PauliX(0) @ qml.PauliX(1), qml.PauliZ(1)]\n >>> coeffs = [1.43, 4.21, 0.97]\n >>> obs_groupings, coeffs_groupings = group_observables(obs, coeffs, 'anticommuting', 'lf')\n >>> obs_groupings\n [[PauliZ(wires=[1]), PauliX(wires=[0]) @ PauliX(wires=[1])],\n [PauliY(wires=[0])]]\n >>> coeffs_groupings\n [[0.97, 4.21], [1.43]]\n \"\"\"\n\n if coefficients is not None:\n if len(coefficients) != len(observables):\n raise IndexError(\n \"The coefficients list must be the same length as the observables list.\"\n )\n\n pauli_grouping = PauliGroupingStrategy(\n observables, grouping_type=grouping_type, graph_colourer=method\n )\n partitioned_paulis = pauli_grouping.colour_pauli_graph()\n\n if coefficients is None:\n return partitioned_paulis\n\n partitioned_coeffs = [[0] * len(g) for g in partitioned_paulis]\n\n for i, partition in enumerate(partitioned_paulis):\n for j, pauli_word in enumerate(partition):\n for observable in observables:\n if are_identical_pauli_words(pauli_word, observable):\n partitioned_coeffs[i][j] = coefficients[observables.index(observable)]\n break\n\n return partitioned_paulis, partitioned_coeffs\n", "path": "pennylane/grouping/group_observables.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the high-level Pauli-word-partitioning functionality used in measurement optimization.\n\"\"\"\n\nfrom copy import copy\n\nimport numpy as np\n\nfrom pennylane.grouping.graph_colouring import largest_first, recursive_largest_first\nfrom pennylane.grouping.utils import (\n are_identical_pauli_words,\n binary_to_pauli,\n observables_to_binary_matrix,\n qwc_complement_adj_matrix,\n)\nfrom pennylane.wires import Wires\n\nGROUPING_TYPES = frozenset([\"qwc\", \"commuting\", \"anticommuting\"])\nGRAPH_COLOURING_METHODS = {\"lf\": largest_first, \"rlf\": recursive_largest_first}\n\n\nclass PauliGroupingStrategy: # pylint: disable=too-many-instance-attributes\n \"\"\"\n Class for partitioning a list of Pauli words according to some binary symmetric relation.\n\n Partitions are defined by the binary symmetric relation of interest, e.g., all Pauli words in a\n partition being mutually commuting. The partitioning is accomplished by formulating the list of\n Pauli words as a graph where nodes represent Pauli words and edges between nodes denotes that\n the two corresponding Pauli words satisfy the symmetric binary relation.\n\n Obtaining the fewest number of partitions such that all Pauli terms within a partition mutually\n satisfy the binary relation can then be accomplished by finding a partition of the graph nodes\n such that each partition is a fully connected subgraph (a \"clique\"). The problem of finding the\n optimal partitioning, i.e., the fewest number of cliques, is the minimum clique cover (MCC)\n problem. The solution of MCC may be found by graph colouring on the complementary graph. Both\n MCC and graph colouring are NP-Hard, so heuristic graph colouring algorithms are employed to\n find approximate solutions in polynomial time.\n\n Args:\n observables (list[Observable]): a list of Pauli words to be partitioned according to a\n grouping strategy\n grouping_type (str): the binary relation used to define partitions of\n the Pauli words, can be ``'qwc'`` (qubit-wise commuting), ``'commuting'``, or\n ``'anticommuting'``.\n graph_colourer (str): the heuristic algorithm to employ for graph\n colouring, can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive\n Largest First)\n\n Raises:\n ValueError: if arguments specified for ``grouping_type`` or\n ``graph_colourer`` are not recognized\n \"\"\"\n\n def __init__(self, observables, grouping_type=\"qwc\", graph_colourer=\"rlf\"):\n\n if grouping_type.lower() not in GROUPING_TYPES:\n raise ValueError(\n \"Grouping type must be one of: {}, instead got {}.\".format(\n GROUPING_TYPES, grouping_type\n )\n )\n\n self.grouping_type = grouping_type.lower()\n\n if graph_colourer.lower() not in GRAPH_COLOURING_METHODS.keys():\n raise ValueError(\n \"Graph colouring method must be one of: {}, instead got {}.\".format(\n list(GRAPH_COLOURING_METHODS.keys()), graph_colourer\n )\n )\n\n self.graph_colourer = GRAPH_COLOURING_METHODS[graph_colourer.lower()]\n self.observables = observables\n self._wire_map = None\n self._n_qubits = None\n self.binary_observables = None\n self.adj_matrix = None\n self.grouped_paulis = None\n\n def binary_repr(self, n_qubits=None, wire_map=None):\n \"\"\"Converts the list of Pauli words to a binary matrix.\n\n Args:\n n_qubits (int): number of qubits to specify dimension of binary vector representation\n wire_map (dict): dictionary containing all wire labels used in the Pauli word as keys,\n and unique integer labels as their values\n\n Returns:\n array[int]: a column matrix of the Pauli words in binary vector representation\n \"\"\"\n\n if wire_map is None:\n self._wire_map = {\n wire: c\n for c, wire in enumerate(\n Wires.all_wires([obs.wires for obs in self.observables]).tolist()\n )\n }\n\n else:\n self._wire_map = wire_map\n\n self._n_qubits = n_qubits\n\n return observables_to_binary_matrix(self.observables, n_qubits, self._wire_map)\n\n def complement_adj_matrix_for_operator(self):\n \"\"\"Constructs the adjacency matrix for the complement of the Pauli graph.\n\n The adjacency matrix for an undirected graph of N vertices is an N by N symmetric binary\n matrix, where matrix elements of 1 denote an edge, and matrix elements of 0 denote no edge.\n\n Returns:\n array[int]: the square and symmetric adjacency matrix\n \"\"\"\n\n if self.binary_observables is None:\n self.binary_observables = self.binary_repr()\n\n n_qubits = int(np.shape(self.binary_observables)[1] / 2)\n\n if self.grouping_type == \"qwc\":\n adj = qwc_complement_adj_matrix(self.binary_observables)\n\n elif self.grouping_type in frozenset([\"commuting\", \"anticommuting\"]):\n symplectic_form = np.block(\n [\n [np.zeros((n_qubits, n_qubits)), np.eye(n_qubits)],\n [np.eye(n_qubits), np.zeros((n_qubits, n_qubits))],\n ]\n )\n mat_prod = (\n self.binary_observables @ symplectic_form @ np.transpose(self.binary_observables)\n )\n\n if self.grouping_type == \"commuting\":\n\n adj = mat_prod % 2\n\n elif self.grouping_type == \"anticommuting\":\n\n adj = (mat_prod + 1) % 2\n np.fill_diagonal(adj, 0)\n\n return adj\n\n def colour_pauli_graph(self):\n \"\"\"\n Runs the graph colouring heuristic algorithm to obtain the partitioned Pauli words.\n\n Returns:\n list[list[Observable]]: a list of the obtained groupings. Each grouping is itself a\n list of Pauli word ``Observable`` instances\n \"\"\"\n\n if self.adj_matrix is None:\n self.adj_matrix = self.complement_adj_matrix_for_operator()\n\n coloured_binary_paulis = self.graph_colourer(self.binary_observables, self.adj_matrix)\n\n self.grouped_paulis = [\n [binary_to_pauli(pauli_word, wire_map=self._wire_map) for pauli_word in grouping]\n for grouping in coloured_binary_paulis.values()\n ]\n\n return self.grouped_paulis\n\n\ndef group_observables(observables, coefficients=None, grouping_type=\"qwc\", method=\"rlf\"):\n \"\"\"Partitions a list of observables (Pauli operations and tensor products thereof) into\n groupings according to a binary relation (qubit-wise commuting, fully-commuting, or\n anticommuting).\n\n Partitions are found by 1) mapping the list of observables to a graph where vertices represent\n observables and edges encode the binary relation, then 2) solving minimum clique cover for the\n graph using graph-colouring heuristic algorithms.\n\n Args:\n observables (list[Observable]): a list of Pauli word ``Observable`` instances (Pauli\n operation instances and :class:`~.Tensor` instances thereof)\n coefficients (list[float]): A list of float coefficients. If not specified,\n output ``partitioned_coeffs`` is not returned.\n grouping_type (str): The type of binary relation between Pauli words.\n Can be ``'qwc'``, ``'commuting'``, or ``'anticommuting'``.\n method (str): the graph coloring heuristic to use in solving minimum clique cover, which\n can be ``'lf'`` (Largest First) or ``'rlf'`` (Recursive Largest First)\n\n Returns:\n tuple:\n\n * list[list[Observable]]: A list of the obtained groupings. Each grouping\n is itself a list of Pauli word ``Observable`` instances.\n * list[list[float]]: A list of coefficient groupings. Each coefficient\n grouping is itself a list of the grouping's corresponding coefficients. This is only\n output if coefficients are specified.\n\n Raises:\n IndexError: if the input list of coefficients is not of the same length as the input list\n of Pauli words\n\n **Example**\n\n >>> obs = [qml.PauliY(0), qml.PauliX(0) @ qml.PauliX(1), qml.PauliZ(1)]\n >>> coeffs = [1.43, 4.21, 0.97]\n >>> obs_groupings, coeffs_groupings = group_observables(obs, coeffs, 'anticommuting', 'lf')\n >>> obs_groupings\n [[PauliZ(wires=[1]), PauliX(wires=[0]) @ PauliX(wires=[1])],\n [PauliY(wires=[0])]]\n >>> coeffs_groupings\n [[0.97, 4.21], [1.43]]\n \"\"\"\n\n if coefficients is not None:\n if len(coefficients) != len(observables):\n raise IndexError(\n \"The coefficients list must be the same length as the observables list.\"\n )\n\n pauli_grouping = PauliGroupingStrategy(\n observables, grouping_type=grouping_type, graph_colourer=method\n )\n partitioned_paulis = pauli_grouping.colour_pauli_graph()\n\n if coefficients is None:\n return partitioned_paulis\n\n partitioned_coeffs = [[0] * len(g) for g in partitioned_paulis]\n\n observables = copy(observables)\n coefficients = copy(coefficients)\n for i, partition in enumerate(partitioned_paulis):\n for j, pauli_word in enumerate(partition):\n for observable in observables:\n if are_identical_pauli_words(pauli_word, observable):\n ind = observables.index(observable)\n partitioned_coeffs[i][j] = coefficients[ind]\n observables.pop(ind)\n coefficients.pop(ind)\n break\n\n return partitioned_paulis, partitioned_coeffs\n", "path": "pennylane/grouping/group_observables.py"}]}
3,399
444
gh_patches_debug_20125
rasdani/github-patches
git_diff
rucio__rucio-1084
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- remove unused file from common Motivation ---------- lib/rucio/common/client.py is probably unused and can be removed Modification ------------ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/common/client.py` Content: ``` 1 # Copyright European Organization for Nuclear Research (CERN) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # You may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Authors: 8 # - Vincent Garonne, <[email protected]>, 2012 9 10 import httplib 11 12 13 class BaseClient(object): 14 15 """A base client class""" 16 17 DEFAULT_PORT = 80 18 19 OK_RESPONSE_CODES = ( 20 httplib.OK, 21 httplib.CREATED, 22 httplib.ACCEPTED, 23 httplib.NO_CONTENT, 24 ) 25 26 def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None): 27 """ 28 Creates a new client to some service. 29 30 :param host: The host where service resides 31 :param port: The port where service resides 32 :param use_ssl: Should we use HTTPS? 33 :param auth_tok: The auth token to pass to the server 34 :param creds: The credentials to pass to the auth plugin 35 """ 36 self.host = host 37 self.port = port or self.DEFAULT_PORT 38 self.use_ssl = use_ssl 39 self.auth_tok = auth_tok 40 self.creds = creds or {} 41 self.connection = None 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py deleted file mode 100644 --- a/lib/rucio/common/client.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright European Organization for Nuclear Research (CERN) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Authors: -# - Vincent Garonne, <[email protected]>, 2012 - -import httplib - - -class BaseClient(object): - - """A base client class""" - - DEFAULT_PORT = 80 - - OK_RESPONSE_CODES = ( - httplib.OK, - httplib.CREATED, - httplib.ACCEPTED, - httplib.NO_CONTENT, - ) - - def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None): - """ - Creates a new client to some service. - - :param host: The host where service resides - :param port: The port where service resides - :param use_ssl: Should we use HTTPS? - :param auth_tok: The auth token to pass to the server - :param creds: The credentials to pass to the auth plugin - """ - self.host = host - self.port = port or self.DEFAULT_PORT - self.use_ssl = use_ssl - self.auth_tok = auth_tok - self.creds = creds or {} - self.connection = None
{"golden_diff": "diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py\ndeleted file mode 100644\n--- a/lib/rucio/common/client.py\n+++ /dev/null\n@@ -1,41 +0,0 @@\n-# Copyright European Organization for Nuclear Research (CERN)\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# You may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Authors:\n-# - Vincent Garonne, <[email protected]>, 2012\n-\n-import httplib\n-\n-\n-class BaseClient(object):\n-\n- \"\"\"A base client class\"\"\"\n-\n- DEFAULT_PORT = 80\n-\n- OK_RESPONSE_CODES = (\n- httplib.OK,\n- httplib.CREATED,\n- httplib.ACCEPTED,\n- httplib.NO_CONTENT,\n- )\n-\n- def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n- \"\"\"\n- Creates a new client to some service.\n-\n- :param host: The host where service resides\n- :param port: The port where service resides\n- :param use_ssl: Should we use HTTPS?\n- :param auth_tok: The auth token to pass to the server\n- :param creds: The credentials to pass to the auth plugin\n- \"\"\"\n- self.host = host\n- self.port = port or self.DEFAULT_PORT\n- self.use_ssl = use_ssl\n- self.auth_tok = auth_tok\n- self.creds = creds or {}\n- self.connection = None\n", "issue": "remove unused file from common\nMotivation\r\n----------\r\nlib/rucio/common/client.py is probably unused and can be removed\r\n\r\nModification\r\n------------\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Vincent Garonne, <[email protected]>, 2012\n\nimport httplib\n\n\nclass BaseClient(object):\n\n \"\"\"A base client class\"\"\"\n\n DEFAULT_PORT = 80\n\n OK_RESPONSE_CODES = (\n httplib.OK,\n httplib.CREATED,\n httplib.ACCEPTED,\n httplib.NO_CONTENT,\n )\n\n def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n \"\"\"\n Creates a new client to some service.\n\n :param host: The host where service resides\n :param port: The port where service resides\n :param use_ssl: Should we use HTTPS?\n :param auth_tok: The auth token to pass to the server\n :param creds: The credentials to pass to the auth plugin\n \"\"\"\n self.host = host\n self.port = port or self.DEFAULT_PORT\n self.use_ssl = use_ssl\n self.auth_tok = auth_tok\n self.creds = creds or {}\n self.connection = None\n", "path": "lib/rucio/common/client.py"}], "after_files": [{"content": null, "path": "lib/rucio/common/client.py"}]}
664
386
gh_patches_debug_22868
rasdani/github-patches
git_diff
microsoft__Qcodes-1110
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Creation of db files when import qcodes It's not a bug I guess, but I think its still unwanted. ### Steps to reproduce Set any directory to the current working directory Execute a python script/program that just imports the qcodes module ### Expected behaviour No side-effects of the file system ### Actual behaviour An experiment.db file is created in the current working directory. I think it's better if measurement databases are only created when we actually call a function that initialized an experiment. I use the qcodes module to load my experimental data from a lot of different working directories, like folders where I work on papers, during the analyses, etc... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qcodes/dataset/database.py` Content: ``` 1 # high-level interface to the database 2 3 from qcodes.dataset.sqlite_base import connect as _connect 4 from qcodes.dataset.sqlite_base import init_db as _init_db 5 import qcodes.config 6 7 8 def get_DB_location() -> str: 9 return qcodes.config["core"]["db_location"] 10 11 12 def get_DB_debug() -> bool: 13 return bool(qcodes.config["core"]["db_debug"]) 14 15 16 def initialise_database() -> None: 17 """ 18 Initialise a database in the location specified by the config object 19 If the database already exists, nothing happens 20 21 Args: 22 config: An instance of the config object 23 """ 24 conn = _connect(get_DB_location(), get_DB_debug()) 25 # init is actually idempotent so it's safe to always call! 26 _init_db(conn) 27 conn.close() 28 del conn 29 ``` Path: `qcodes/__init__.py` Content: ``` 1 """Set up the main qcodes namespace.""" 2 3 # flake8: noqa (we don't need the "<...> imported but unused" error) 4 5 # config 6 7 from qcodes.config import Config 8 from qcodes.utils.helpers import add_to_spyder_UMR_excludelist 9 10 # we dont want spyder to reload qcodes as this will overwrite the default station 11 # instrument list and running monitor 12 add_to_spyder_UMR_excludelist('qcodes') 13 config = Config() # type: Config 14 15 from qcodes.version import __version__ 16 17 plotlib = config.gui.plotlib 18 if plotlib in {'QT', 'all'}: 19 try: 20 from qcodes.plots.pyqtgraph import QtPlot 21 except Exception: 22 print('pyqtgraph plotting not supported, ' 23 'try "from qcodes.plots.pyqtgraph import QtPlot" ' 24 'to see the full error') 25 26 if plotlib in {'matplotlib', 'all'}: 27 try: 28 from qcodes.plots.qcmatplotlib import MatPlot 29 except Exception: 30 print('matplotlib plotting not supported, ' 31 'try "from qcodes.plots.qcmatplotlib import MatPlot" ' 32 'to see the full error') 33 34 35 from qcodes.station import Station 36 from qcodes.loops import Loop, active_loop, active_data_set 37 from qcodes.measure import Measure 38 from qcodes.actions import Task, Wait, BreakIf 39 haswebsockets = True 40 try: 41 import websockets 42 except ImportError: 43 haswebsockets = False 44 if haswebsockets: 45 from qcodes.monitor.monitor import Monitor 46 47 from qcodes.data.data_set import DataSet, new_data, load_data 48 from qcodes.data.location import FormatLocation 49 from qcodes.data.data_array import DataArray 50 from qcodes.data.format import Formatter 51 from qcodes.data.gnuplot_format import GNUPlotFormat 52 from qcodes.data.hdf5_format import HDF5Format 53 from qcodes.data.io import DiskIO 54 55 from qcodes.instrument.base import Instrument 56 from qcodes.instrument.ip import IPInstrument 57 from qcodes.instrument.visa import VisaInstrument 58 from qcodes.instrument.channel import InstrumentChannel, ChannelList 59 60 from qcodes.instrument.function import Function 61 from qcodes.instrument.parameter import ( 62 Parameter, 63 ArrayParameter, 64 MultiParameter, 65 StandardParameter, 66 ManualParameter, 67 combine, 68 CombinedParameter) 69 from qcodes.instrument.sweep_values import SweepFixedValues, SweepValues 70 71 from qcodes.utils import validators 72 from qcodes.utils.zmq_helpers import Publisher 73 from qcodes.instrument_drivers.test import test_instruments, test_instrument 74 75 from qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id 76 from qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \ 77 load_last_experiment, experiments 78 from qcodes.dataset.sqlite_settings import SQLiteSettings 79 from qcodes.dataset.param_spec import ParamSpec 80 # TODO: do we want this? 81 from qcodes.dataset.sqlite_base import connect as _connect 82 from qcodes.dataset.sqlite_base import init_db as _init_db 83 84 _c = _connect(config["core"]["db_location"], config["core"]["db_debug"]) 85 # init is actually idempotent so it's safe to always call! 86 _init_db(_c) 87 _c.close() 88 del _c 89 90 try: 91 get_ipython() # type: ignore # Check if we are in iPython 92 from qcodes.utils.magic import register_magic_class 93 _register_magic = config.core.get('register_magic', False) 94 if _register_magic is not False: 95 register_magic_class(magic_commands=_register_magic) 96 except NameError: 97 pass 98 except RuntimeError as e: 99 print(e) 100 101 # ensure to close all instruments when interpreter is closed 102 import atexit 103 atexit.register(Instrument.close_all) 104 105 def test(**kwargs): 106 """ 107 Run QCoDeS tests. This requires the test requirements given 108 in test_requirements.txt to be installed. 109 All arguments are forwarded to pytest.main 110 """ 111 try: 112 import pytest 113 except ImportError: 114 print("Need pytest to run tests") 115 return 116 args = ['--pyargs', 'qcodes.tests'] 117 retcode = pytest.main(args, **kwargs) 118 return retcode 119 120 121 test.__test__ = False # type: ignore # Don't try to run this method as a test 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qcodes/__init__.py b/qcodes/__init__.py --- a/qcodes/__init__.py +++ b/qcodes/__init__.py @@ -77,15 +77,6 @@ load_last_experiment, experiments from qcodes.dataset.sqlite_settings import SQLiteSettings from qcodes.dataset.param_spec import ParamSpec -# TODO: do we want this? -from qcodes.dataset.sqlite_base import connect as _connect -from qcodes.dataset.sqlite_base import init_db as _init_db - -_c = _connect(config["core"]["db_location"], config["core"]["db_debug"]) -# init is actually idempotent so it's safe to always call! -_init_db(_c) -_c.close() -del _c try: get_ipython() # type: ignore # Check if we are in iPython diff --git a/qcodes/dataset/database.py b/qcodes/dataset/database.py --- a/qcodes/dataset/database.py +++ b/qcodes/dataset/database.py @@ -1,12 +1,14 @@ # high-level interface to the database +from os.path import expanduser + from qcodes.dataset.sqlite_base import connect as _connect from qcodes.dataset.sqlite_base import init_db as _init_db import qcodes.config def get_DB_location() -> str: - return qcodes.config["core"]["db_location"] + return expanduser(qcodes.config["core"]["db_location"]) def get_DB_debug() -> bool:
{"golden_diff": "diff --git a/qcodes/__init__.py b/qcodes/__init__.py\n--- a/qcodes/__init__.py\n+++ b/qcodes/__init__.py\n@@ -77,15 +77,6 @@\n load_last_experiment, experiments\n from qcodes.dataset.sqlite_settings import SQLiteSettings\n from qcodes.dataset.param_spec import ParamSpec\n-# TODO: do we want this?\n-from qcodes.dataset.sqlite_base import connect as _connect\n-from qcodes.dataset.sqlite_base import init_db as _init_db\n-\n-_c = _connect(config[\"core\"][\"db_location\"], config[\"core\"][\"db_debug\"])\n-# init is actually idempotent so it's safe to always call!\n-_init_db(_c)\n-_c.close()\n-del _c\n \n try:\n get_ipython() # type: ignore # Check if we are in iPython\ndiff --git a/qcodes/dataset/database.py b/qcodes/dataset/database.py\n--- a/qcodes/dataset/database.py\n+++ b/qcodes/dataset/database.py\n@@ -1,12 +1,14 @@\n # high-level interface to the database\n \n+from os.path import expanduser\n+\n from qcodes.dataset.sqlite_base import connect as _connect\n from qcodes.dataset.sqlite_base import init_db as _init_db\n import qcodes.config\n \n \n def get_DB_location() -> str:\n- return qcodes.config[\"core\"][\"db_location\"]\n+ return expanduser(qcodes.config[\"core\"][\"db_location\"])\n \n \n def get_DB_debug() -> bool:\n", "issue": "Creation of db files when import qcodes\nIt's not a bug I guess, but I think its still unwanted. \r\n\r\n### Steps to reproduce\r\nSet any directory to the current working directory\r\nExecute a python script/program that just imports the qcodes module\r\n\r\n### Expected behaviour\r\nNo side-effects of the file system\r\n\r\n### Actual behaviour\r\nAn experiment.db file is created in the current working directory. \r\n\r\nI think it's better if measurement databases are only created when we actually call a function that initialized an experiment. \r\nI use the qcodes module to load my experimental data from a lot of different working directories, like folders where I work on papers, during the analyses, etc... \n", "before_files": [{"content": "# high-level interface to the database\n\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\nimport qcodes.config\n\n\ndef get_DB_location() -> str:\n return qcodes.config[\"core\"][\"db_location\"]\n\n\ndef get_DB_debug() -> bool:\n return bool(qcodes.config[\"core\"][\"db_debug\"])\n\n\ndef initialise_database() -> None:\n \"\"\"\n Initialise a database in the location specified by the config object\n If the database already exists, nothing happens\n\n Args:\n config: An instance of the config object\n \"\"\"\n conn = _connect(get_DB_location(), get_DB_debug())\n # init is actually idempotent so it's safe to always call!\n _init_db(conn)\n conn.close()\n del conn\n", "path": "qcodes/dataset/database.py"}, {"content": "\"\"\"Set up the main qcodes namespace.\"\"\"\n\n# flake8: noqa (we don't need the \"<...> imported but unused\" error)\n\n# config\n\nfrom qcodes.config import Config\nfrom qcodes.utils.helpers import add_to_spyder_UMR_excludelist\n\n# we dont want spyder to reload qcodes as this will overwrite the default station\n# instrument list and running monitor\nadd_to_spyder_UMR_excludelist('qcodes')\nconfig = Config() # type: Config\n\nfrom qcodes.version import __version__\n\nplotlib = config.gui.plotlib\nif plotlib in {'QT', 'all'}:\n try:\n from qcodes.plots.pyqtgraph import QtPlot\n except Exception:\n print('pyqtgraph plotting not supported, '\n 'try \"from qcodes.plots.pyqtgraph import QtPlot\" '\n 'to see the full error')\n\nif plotlib in {'matplotlib', 'all'}:\n try:\n from qcodes.plots.qcmatplotlib import MatPlot\n except Exception:\n print('matplotlib plotting not supported, '\n 'try \"from qcodes.plots.qcmatplotlib import MatPlot\" '\n 'to see the full error')\n\n\nfrom qcodes.station import Station\nfrom qcodes.loops import Loop, active_loop, active_data_set\nfrom qcodes.measure import Measure\nfrom qcodes.actions import Task, Wait, BreakIf\nhaswebsockets = True\ntry:\n import websockets\nexcept ImportError:\n haswebsockets = False\nif haswebsockets:\n from qcodes.monitor.monitor import Monitor\n\nfrom qcodes.data.data_set import DataSet, new_data, load_data\nfrom qcodes.data.location import FormatLocation\nfrom qcodes.data.data_array import DataArray\nfrom qcodes.data.format import Formatter\nfrom qcodes.data.gnuplot_format import GNUPlotFormat\nfrom qcodes.data.hdf5_format import HDF5Format\nfrom qcodes.data.io import DiskIO\n\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.ip import IPInstrument\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.instrument.channel import InstrumentChannel, ChannelList\n\nfrom qcodes.instrument.function import Function\nfrom qcodes.instrument.parameter import (\n Parameter,\n ArrayParameter,\n MultiParameter,\n StandardParameter,\n ManualParameter,\n combine,\n CombinedParameter)\nfrom qcodes.instrument.sweep_values import SweepFixedValues, SweepValues\n\nfrom qcodes.utils import validators\nfrom qcodes.utils.zmq_helpers import Publisher\nfrom qcodes.instrument_drivers.test import test_instruments, test_instrument\n\nfrom qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id\nfrom qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \\\n load_last_experiment, experiments\nfrom qcodes.dataset.sqlite_settings import SQLiteSettings\nfrom qcodes.dataset.param_spec import ParamSpec\n# TODO: do we want this?\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\n\n_c = _connect(config[\"core\"][\"db_location\"], config[\"core\"][\"db_debug\"])\n# init is actually idempotent so it's safe to always call!\n_init_db(_c)\n_c.close()\ndel _c\n\ntry:\n get_ipython() # type: ignore # Check if we are in iPython\n from qcodes.utils.magic import register_magic_class\n _register_magic = config.core.get('register_magic', False)\n if _register_magic is not False:\n register_magic_class(magic_commands=_register_magic)\nexcept NameError:\n pass\nexcept RuntimeError as e:\n print(e)\n\n# ensure to close all instruments when interpreter is closed\nimport atexit\natexit.register(Instrument.close_all)\n\ndef test(**kwargs):\n \"\"\"\n Run QCoDeS tests. This requires the test requirements given\n in test_requirements.txt to be installed.\n All arguments are forwarded to pytest.main\n \"\"\"\n try:\n import pytest\n except ImportError:\n print(\"Need pytest to run tests\")\n return\n args = ['--pyargs', 'qcodes.tests']\n retcode = pytest.main(args, **kwargs)\n return retcode\n\n\ntest.__test__ = False # type: ignore # Don't try to run this method as a test\n", "path": "qcodes/__init__.py"}], "after_files": [{"content": "# high-level interface to the database\n\nfrom os.path import expanduser\n\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\nimport qcodes.config\n\n\ndef get_DB_location() -> str:\n return expanduser(qcodes.config[\"core\"][\"db_location\"])\n\n\ndef get_DB_debug() -> bool:\n return bool(qcodes.config[\"core\"][\"db_debug\"])\n\n\ndef initialise_database() -> None:\n \"\"\"\n Initialise a database in the location specified by the config object\n If the database already exists, nothing happens\n\n Args:\n config: An instance of the config object\n \"\"\"\n conn = _connect(get_DB_location(), get_DB_debug())\n # init is actually idempotent so it's safe to always call!\n _init_db(conn)\n conn.close()\n del conn\n", "path": "qcodes/dataset/database.py"}, {"content": "\"\"\"Set up the main qcodes namespace.\"\"\"\n\n# flake8: noqa (we don't need the \"<...> imported but unused\" error)\n\n# config\n\nfrom qcodes.config import Config\nfrom qcodes.utils.helpers import add_to_spyder_UMR_excludelist\n\n# we dont want spyder to reload qcodes as this will overwrite the default station\n# instrument list and running monitor\nadd_to_spyder_UMR_excludelist('qcodes')\nconfig = Config() # type: Config\n\nfrom qcodes.version import __version__\n\nplotlib = config.gui.plotlib\nif plotlib in {'QT', 'all'}:\n try:\n from qcodes.plots.pyqtgraph import QtPlot\n except Exception:\n print('pyqtgraph plotting not supported, '\n 'try \"from qcodes.plots.pyqtgraph import QtPlot\" '\n 'to see the full error')\n\nif plotlib in {'matplotlib', 'all'}:\n try:\n from qcodes.plots.qcmatplotlib import MatPlot\n except Exception:\n print('matplotlib plotting not supported, '\n 'try \"from qcodes.plots.qcmatplotlib import MatPlot\" '\n 'to see the full error')\n\n\nfrom qcodes.station import Station\nfrom qcodes.loops import Loop, active_loop, active_data_set\nfrom qcodes.measure import Measure\nfrom qcodes.actions import Task, Wait, BreakIf\nhaswebsockets = True\ntry:\n import websockets\nexcept ImportError:\n haswebsockets = False\nif haswebsockets:\n from qcodes.monitor.monitor import Monitor\n\nfrom qcodes.data.data_set import DataSet, new_data, load_data\nfrom qcodes.data.location import FormatLocation\nfrom qcodes.data.data_array import DataArray\nfrom qcodes.data.format import Formatter\nfrom qcodes.data.gnuplot_format import GNUPlotFormat\nfrom qcodes.data.hdf5_format import HDF5Format\nfrom qcodes.data.io import DiskIO\n\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.ip import IPInstrument\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.instrument.channel import InstrumentChannel, ChannelList\n\nfrom qcodes.instrument.function import Function\nfrom qcodes.instrument.parameter import (\n Parameter,\n ArrayParameter,\n MultiParameter,\n StandardParameter,\n ManualParameter,\n combine,\n CombinedParameter)\nfrom qcodes.instrument.sweep_values import SweepFixedValues, SweepValues\n\nfrom qcodes.utils import validators\nfrom qcodes.utils.zmq_helpers import Publisher\nfrom qcodes.instrument_drivers.test import test_instruments, test_instrument\n\nfrom qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id\nfrom qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \\\n load_last_experiment, experiments\nfrom qcodes.dataset.sqlite_settings import SQLiteSettings\nfrom qcodes.dataset.param_spec import ParamSpec\n\ntry:\n get_ipython() # type: ignore # Check if we are in iPython\n from qcodes.utils.magic import register_magic_class\n _register_magic = config.core.get('register_magic', False)\n if _register_magic is not False:\n register_magic_class(magic_commands=_register_magic)\nexcept NameError:\n pass\nexcept RuntimeError as e:\n print(e)\n\n# ensure to close all instruments when interpreter is closed\nimport atexit\natexit.register(Instrument.close_all)\n\ndef test(**kwargs):\n \"\"\"\n Run QCoDeS tests. This requires the test requirements given\n in test_requirements.txt to be installed.\n All arguments are forwarded to pytest.main\n \"\"\"\n try:\n import pytest\n except ImportError:\n print(\"Need pytest to run tests\")\n return\n args = ['--pyargs', 'qcodes.tests']\n retcode = pytest.main(args, **kwargs)\n return retcode\n\n\ntest.__test__ = False # type: ignore # Don't try to run this method as a test\n", "path": "qcodes/__init__.py"}]}
1,828
321
gh_patches_debug_7596
rasdani/github-patches
git_diff
elastic__apm-agent-python-1725
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Elastic APM 6.13.0 agent errors when capturing long elasticsearch transactions **Describe the bug**: **To Reproduce** When capturing transactions to elasticsearch, Elastic APM throws errors when executing long transactions (e.g. async_scan), eventually causing the asynchronous event loop to cancel all tasks. ```text 2022-10-31T09:05:05.200-04:00 | Traceback (most recent call last): -- | --   | 2022-10-31T09:05:05.200-04:00 | File "/usr/local/lib/python3.9/site-packages/elasticsearch/_async/helpers.py", line 490, in async_scan   | 2022-10-31T09:05:05.200-04:00 | resp = await scroll_client.scroll(   | 2022-10-31T09:05:05.200-04:00 | File "/usr/local/lib/python3.9/site-packages/elasticsearch/_async/client/__init__.py", line 3350, in scroll   | 2022-10-31T09:05:05.200-04:00 | return await self.perform_request( # type: ignore[return-value]   | 2022-10-31T09:05:05.200-04:00 | File "/usr/local/lib/python3.9/site-packages/elasticsearch/_async/client/_base.py", line 286, in perform_request   | 2022-10-31T09:05:05.200-04:00 | meta, resp_body = await self.transport.perform_request(   | 2022-10-31T09:05:05.200-04:00 | File "/usr/local/lib/python3.9/site-packages/elasticapm/instrumentation/packages/asyncio/elasticsearch.py", line 105, in call   | 2022-10-31T09:05:05.200-04:00 | result_data = await wrapped(*args, **kwargs)   | 2022-10-31T09:05:05.200-04:00 | File "/usr/local/lib/python3.9/site-packages/elastic_transport/_async_transport.py", line 272, in perform_request   | 2022-10-31T09:05:05.200-04:00 | resp.meta.status,   | 2022-10-31T09:05:05.200-04:00 | AttributeError: 'coroutine' object has no attribute 'meta' ``` Pinning elastic-apm~6.12.0 will solve the issue and it appears that it was introduced when Elasticsearch 8.0 support was added in [PR#1642](https://github.com/elastic/apm-agent-python/pull/1642/files) but I can't determine why because you guys did access the meta attribute safely with hasattr()? Please let me know if there is additional context I can add! **Environment (please complete the following information)** - OS: [e.g. Linux] debian:bullseye-slim (python:3.9-slim container image) x86 - Python version: 3.9 - Framework and version [e.g. Django 2.1]: N/A AWS Lambda Function - APM Server version: 8.4.2 - Agent version: 6.13.0 - AWS Lambda Runtime: Custom container built w/ apm-lambda-extension. ```shell $ pip3 freeze | grep elastic elastic-apm==6.13.0 elastic-transport==8.4.0 elasticsearch==8.4.3 ``` **Additional context** Add any other context about the problem here. - Agent config options <!-- be careful not to post sensitive information --> <details> <summary>Click to expand</summary> ``` ELASTIC_APM_LOG_LEVEL: info ELASTIC_APM_DATA_RECEIVER_TIMEOUT_SECONDS: 15 ELASTIC_APM_DATA_FORWARDER_TIMEOUT_SECONDS: 3 ``` </details> - `requirements.txt`: Poetry in use (`tool.poetry.dependencies` in replace) <details> <summary>Click to expand</summary> ``` [tool.poetry.dependencies] python = "^3.9" elasticsearch = "^8" elastic-apm = "^6.13" pandas = "^1.4.3" python-dotenv = "^0.21.0" requests = "^2.28.1" python-dateutil = "^2.8.2" boto3 = "^1.25.4" psycopg2-binary = "^2.9.3" aiohttp = "^3.8.1" validators = "^0.20.0" ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/asyncio/elasticsearch.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 import elasticapm 32 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule 33 from elasticapm.instrumentation.packages.elasticsearch import ( 34 ElasticsearchConnectionInstrumentation, 35 ElasticsearchTransportInstrumentation, 36 ) 37 from elasticapm.traces import DroppedSpan, execution_context 38 39 40 class ElasticSearchAsyncConnection(ElasticsearchConnectionInstrumentation, AsyncAbstractInstrumentedModule): 41 name = "elasticsearch_connection" 42 43 def get_instrument_list(self): 44 try: 45 import elastic_transport # noqa: F401 46 47 return [ 48 ("elastic_transport._node._http_aiohttp", "AiohttpHttpNode.perform_request"), 49 ] 50 except ImportError: 51 return [ 52 ("elasticsearch_async.connection", "AIOHttpConnection.perform_request"), 53 ("elasticsearch._async.http_aiohttp", "AIOHttpConnection.perform_request"), 54 ] 55 56 async def call(self, module, method, wrapped, instance, args, kwargs): 57 span = execution_context.get_span() 58 if not span or isinstance(span, DroppedSpan): 59 return wrapped(*args, **kwargs) 60 61 self._update_context_by_request_data(span.context, instance, args, kwargs) 62 63 result = await wrapped(*args, **kwargs) 64 if hasattr(result, "meta"): # elasticsearch-py 8.x+ 65 status_code = result.meta.status 66 else: 67 status_code = result[0] 68 69 span.context["http"] = {"status_code": status_code} 70 71 return result 72 73 74 class ElasticsearchAsyncTransportInstrumentation( 75 ElasticsearchTransportInstrumentation, AsyncAbstractInstrumentedModule 76 ): 77 name = "elasticsearch_connection" 78 79 instrument_list = [ 80 ("elasticsearch._async.transport", "AsyncTransport.perform_request"), 81 ] 82 83 def get_instrument_list(self): 84 try: 85 import elastic_transport # noqa: F401 86 87 return [ 88 ("elastic_transport", "AsyncTransport.perform_request"), 89 ] 90 except ImportError: 91 return [ 92 ("elasticsearch._async.transport", "AsyncTransport.perform_request"), 93 ] 94 95 async def call(self, module, method, wrapped, instance, args, kwargs): 96 async with elasticapm.async_capture_span( 97 self._get_signature(args, kwargs), 98 span_type="db", 99 span_subtype="elasticsearch", 100 span_action="query", 101 extra={}, 102 skip_frames=2, 103 leaf=True, 104 ) as span: 105 result_data = await wrapped(*args, **kwargs) 106 107 hits = self._get_hits(result_data) 108 if hits: 109 span.context["db"]["rows_affected"] = hits 110 111 return result_data 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py --- a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py +++ b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py @@ -56,7 +56,7 @@ async def call(self, module, method, wrapped, instance, args, kwargs): span = execution_context.get_span() if not span or isinstance(span, DroppedSpan): - return wrapped(*args, **kwargs) + return await wrapped(*args, **kwargs) self._update_context_by_request_data(span.context, instance, args, kwargs)
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\n@@ -56,7 +56,7 @@\n async def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n- return wrapped(*args, **kwargs)\n+ return await wrapped(*args, **kwargs)\n \n self._update_context_by_request_data(span.context, instance, args, kwargs)\n", "issue": "Elastic APM 6.13.0 agent errors when capturing long elasticsearch transactions\n**Describe the bug**: \r\n\r\n**To Reproduce**\r\n\r\nWhen capturing transactions to elasticsearch, Elastic APM throws errors when executing long transactions (e.g. async_scan), eventually causing the asynchronous event loop to cancel all tasks. \r\n\r\n```text\r\n2022-10-31T09:05:05.200-04:00 | Traceback (most recent call last):\r\n-- | --\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | File \"/usr/local/lib/python3.9/site-packages/elasticsearch/_async/helpers.py\", line 490, in async_scan\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | resp = await scroll_client.scroll(\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | File \"/usr/local/lib/python3.9/site-packages/elasticsearch/_async/client/__init__.py\", line 3350, in scroll\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | return await self.perform_request( # type: ignore[return-value]\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | File \"/usr/local/lib/python3.9/site-packages/elasticsearch/_async/client/_base.py\", line 286, in perform_request\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | meta, resp_body = await self.transport.perform_request(\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | File \"/usr/local/lib/python3.9/site-packages/elasticapm/instrumentation/packages/asyncio/elasticsearch.py\", line 105, in call\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | result_data = await wrapped(*args, **kwargs)\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | File \"/usr/local/lib/python3.9/site-packages/elastic_transport/_async_transport.py\", line 272, in perform_request\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | resp.meta.status,\r\n\u00a0 | 2022-10-31T09:05:05.200-04:00 | AttributeError: 'coroutine' object has no attribute 'meta'\r\n```\r\n\r\nPinning elastic-apm~6.12.0 will solve the issue and it appears that it was introduced when Elasticsearch 8.0 support was added in [PR#1642](https://github.com/elastic/apm-agent-python/pull/1642/files) but I can't determine why because you guys did access the meta attribute safely with hasattr()? \r\n\r\nPlease let me know if there is additional context I can add!\r\n\r\n**Environment (please complete the following information)**\r\n- OS: [e.g. Linux] debian:bullseye-slim (python:3.9-slim container image) x86\r\n- Python version: 3.9\r\n- Framework and version [e.g. Django 2.1]: N/A AWS Lambda Function\r\n- APM Server version: 8.4.2\r\n- Agent version: 6.13.0\r\n- AWS Lambda Runtime: Custom container built w/ apm-lambda-extension.\r\n\r\n```shell\r\n$ pip3 freeze | grep elastic\r\nelastic-apm==6.13.0\r\nelastic-transport==8.4.0\r\nelasticsearch==8.4.3\r\n```\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options <!-- be careful not to post sensitive information -->\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n ELASTIC_APM_LOG_LEVEL: info\r\n ELASTIC_APM_DATA_RECEIVER_TIMEOUT_SECONDS: 15\r\n ELASTIC_APM_DATA_FORWARDER_TIMEOUT_SECONDS: 3\r\n ```\r\n </details>\r\n- `requirements.txt`: Poetry in use (`tool.poetry.dependencies` in replace)\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n [tool.poetry.dependencies]\r\n python = \"^3.9\"\r\n elasticsearch = \"^8\"\r\n elastic-apm = \"^6.13\"\r\n pandas = \"^1.4.3\"\r\n python-dotenv = \"^0.21.0\"\r\n requests = \"^2.28.1\"\r\n python-dateutil = \"^2.8.2\"\r\n boto3 = \"^1.25.4\"\r\n psycopg2-binary = \"^2.9.3\"\r\n aiohttp = \"^3.8.1\"\r\n validators = \"^0.20.0\"\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.elasticsearch import (\n ElasticsearchConnectionInstrumentation,\n ElasticsearchTransportInstrumentation,\n)\nfrom elasticapm.traces import DroppedSpan, execution_context\n\n\nclass ElasticSearchAsyncConnection(ElasticsearchConnectionInstrumentation, AsyncAbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_aiohttp\", \"AiohttpHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch_async.connection\", \"AIOHttpConnection.perform_request\"),\n (\"elasticsearch._async.http_aiohttp\", \"AIOHttpConnection.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = await wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n else:\n status_code = result[0]\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return result\n\n\nclass ElasticsearchAsyncTransportInstrumentation(\n ElasticsearchTransportInstrumentation, AsyncAbstractInstrumentedModule\n):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"AsyncTransport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n async with elasticapm.async_capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = await wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n", "path": "elasticapm/instrumentation/packages/asyncio/elasticsearch.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.elasticsearch import (\n ElasticsearchConnectionInstrumentation,\n ElasticsearchTransportInstrumentation,\n)\nfrom elasticapm.traces import DroppedSpan, execution_context\n\n\nclass ElasticSearchAsyncConnection(ElasticsearchConnectionInstrumentation, AsyncAbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_aiohttp\", \"AiohttpHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch_async.connection\", \"AIOHttpConnection.perform_request\"),\n (\"elasticsearch._async.http_aiohttp\", \"AIOHttpConnection.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return await wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = await wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n else:\n status_code = result[0]\n\n span.context[\"http\"] = {\"status_code\": status_code}\n\n return result\n\n\nclass ElasticsearchAsyncTransportInstrumentation(\n ElasticsearchTransportInstrumentation, AsyncAbstractInstrumentedModule\n):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"AsyncTransport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch._async.transport\", \"AsyncTransport.perform_request\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n async with elasticapm.async_capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = await wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n", "path": "elasticapm/instrumentation/packages/asyncio/elasticsearch.py"}]}
2,590
157
gh_patches_debug_9123
rasdani/github-patches
git_diff
Gallopsled__pwntools-1618
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aarch64.linux.pushstr_array is broken There's also a missing colon after the `if` statement, and it seems this was never never finished since there's no `%endif`. https://github.com/Gallopsled/pwntools/blob/813749493288fd7330b7b44b81a7f2f7a8a02dac/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm#L76-L79 ``` >>> shellcraft.pushstr_array(['hello', 'world']) --------------------------------------------------------------------------- KeyError Traceback (most recent call last) ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri) 248 if self.filesystem_checks: --> 249 return self._check(uri, self._collection[uri]) 250 else: KeyError: 'aarch64/pushstr_array.asm' During handling of the above exception, another exception occurred: CompileException Traceback (most recent call last) <ipython-input-2-b064486e8883> in <module> ----> 1 shellcraft.pushstr_array(['hello', 'world']) ~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key) 84 for m in self._context_modules(): 85 try: ---> 86 return getattr(m, key) 87 except AttributeError: 88 pass ~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key) 78 # This function lazy-loads the shellcodes 79 if key in self._shellcodes: ---> 80 real = internal.make_function(key, self._shellcodes[key], self._dir) 81 setattr(self, key, real) 82 return real ~/pwntools/pwnlib/shellcraft/internal.py in make_function(funcname, filename, directory) 112 import inspect 113 path = os.path.join(directory, filename) --> 114 template = lookup_template(path) 115 116 args, varargs, keywords, defaults = inspect.getargspec(template.module.render_body) ~/pwntools/pwnlib/shellcraft/internal.py in lookup_template(filename) 80 81 if filename not in loaded: ---> 82 loaded[filename] = lookup.get_template(filename) 83 84 return loaded[filename] ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri) 258 srcfile = posixpath.normpath(posixpath.join(dir_, u)) 259 if os.path.isfile(srcfile): --> 260 return self._load(srcfile, uri) 261 else: 262 raise exceptions.TopLevelLookupException( ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in _load(self, filename, uri) 320 else: 321 module_filename = None --> 322 self._collection[uri] = template = Template( 323 uri=uri, 324 filename=posixpath.normpath(filename), ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in __init__(self, text, filename, uri, format_exceptions, error_handler, lookup, output_encoding, encoding_errors, module_directory, cache_args, cache_impl, cache_enabled, cache_type, cache_dir, cache_url, module_filename, input_encoding, disable_unicode, module_writer, bytestring_passthrough, default_filters, buffer_filters, strict_undefined, imports, future_imports, enable_loop, preprocessor, lexer_cls, include_error_handler) 346 else: 347 path = None --> 348 module = self._compile_from_file(path, filename) 349 else: 350 raise exceptions.RuntimeException( ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_from_file(self, path, filename) 428 # in memory 429 data = util.read_file(filename) --> 430 code, module = _compile_text(self, data, filename) 431 self._source = None 432 self._code = code ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_text(template, text, filename) 731 def _compile_text(template, text, filename): 732 identifier = template.module_id --> 733 source, lexer = _compile( 734 template, 735 text, ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile(template, text, filename, generate_magic_comment) 710 preprocessor=template.preprocessor, 711 ) --> 712 node = lexer.parse() 713 source = codegen.compile( 714 node, ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in parse(self) 262 if self.match_expression(): 263 continue --> 264 if self.match_control_line(): 265 continue 266 if self.match_comment(): ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in match_control_line(self) 474 **self.exception_kwargs 475 ) --> 476 self.append_node(parsetree.ControlLine, keyword, isend, text) 477 else: 478 self.append_node(parsetree.Comment, text) ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in append_node(self, nodecls, *args, **kwargs) 150 kwargs.setdefault("pos", self.matched_charpos) 151 kwargs["filename"] = self.filename --> 152 node = nodecls(*args, **kwargs) 153 if len(self.tag): 154 self.tag[-1].nodes.append(node) ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/parsetree.py in __init__(self, keyword, isend, text, **kwargs) 91 self._undeclared_identifiers = [] 92 else: ---> 93 code = ast.PythonFragment(text, **self.exception_kwargs) 94 self._declared_identifiers = code.declared_identifiers 95 self._undeclared_identifiers = code.undeclared_identifiers ~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/ast.py in __init__(self, code, **exception_kwargs) 87 m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S) 88 if not m: ---> 89 raise exceptions.CompileException( 90 "Fragment '%s' is not a partial control statement" % code, 91 **exception_kwargs CompileException: Fragment 'if len(array[-1] != 'sp')' is not a partial control statement in file '/Users/heapcrash/pwntools/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm' at line: 76 char: 1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwnlib/version.py` Content: ``` 1 __version__ = '4.1.5' 2 ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 from __future__ import print_function 3 4 import glob 5 import os 6 import platform 7 import subprocess 8 import sys 9 import traceback 10 from distutils.command.install import INSTALL_SCHEMES 11 from distutils.sysconfig import get_python_inc 12 from distutils.util import convert_path 13 14 from setuptools import find_packages 15 from setuptools import setup 16 17 # Get all template files 18 templates = [] 19 for dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True): 20 for f in filenames: 21 templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib')) 22 23 # This makes pwntools-LICENSE.txt appear with the package folders 24 for scheme in INSTALL_SCHEMES.values(): 25 scheme['data'] = scheme['purelib'] 26 27 console_scripts = ['pwn=pwnlib.commandline.main:main'] 28 29 # Find all of the ancillary console scripts 30 # We have a magic flag --include-all-scripts 31 flag = '--only-use-pwn-command' 32 if flag in sys.argv: 33 sys.argv.remove(flag) 34 else: 35 flag = False 36 37 for filename in glob.glob('pwnlib/commandline/*'): 38 filename = os.path.basename(filename) 39 filename, ext = os.path.splitext(filename) 40 41 if ext != '.py' or '__init__' in filename: 42 continue 43 44 script = '%s=pwnlib.commandline.common:main' % filename 45 if not flag: 46 console_scripts.append(script) 47 48 install_requires = ['paramiko>=1.15.2', 49 'mako>=1.0.0', 50 'pyelftools>=0.2.4', 51 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160 52 'ropgadget>=5.3', 53 'pyserial>=2.7', 54 'requests>=2.0', 55 'pip>=6.0.8', 56 'pygments>=2.0', 57 'pysocks', 58 'python-dateutil', 59 'packaging', 60 'psutil>=3.3.0', 61 'intervaltree>=3.0', 62 'sortedcontainers', 63 'unicorn>=1.0.2rc1,<1.0.2rc4', # see unicorn-engine/unicorn#1100, unicorn-engine/unicorn#1170, Gallopsled/pwntools#1538 64 'six>=1.12.0', 65 ] 66 67 # Check that the user has installed the Python development headers 68 PythonH = os.path.join(get_python_inc(), 'Python.h') 69 if not os.path.exists(PythonH): 70 print("You must install the Python development headers!", file=sys.stderr) 71 print("$ apt-get install python-dev", file=sys.stderr) 72 sys.exit(-1) 73 74 # Convert README.md to reStructuredText for PyPI 75 long_description = '' 76 try: 77 long_description = subprocess.check_output(['pandoc', 'README.md', '--to=rst'], universal_newlines=True) 78 except Exception as e: 79 print("Failed to convert README.md through pandoc, proceeding anyway", file=sys.stderr) 80 traceback.print_exc() 81 82 setup( 83 name = 'pwntools', 84 python_requires = '>=2.7', 85 packages = find_packages(), 86 version = '4.1.5', 87 data_files = [('', 88 glob.glob('*.md') + glob.glob('*.txt')), 89 ], 90 package_data = { 91 'pwnlib': [ 92 'data/crcsums.txt', 93 'data/useragents/useragents.txt', 94 'data/binutils/*', 95 'data/includes/*.h', 96 'data/includes/*/*.h', 97 'data/templates/*.mako', 98 ] + templates, 99 }, 100 entry_points = {'console_scripts': console_scripts}, 101 scripts = glob.glob("bin/*"), 102 description = "Pwntools CTF framework and exploit development library.", 103 long_description = long_description, 104 author = "Gallopsled et al.", 105 author_email = "[email protected]", 106 url = 'https://pwntools.com', 107 download_url = "https://github.com/Gallopsled/pwntools/releases", 108 install_requires = install_requires, 109 license = "Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt", 110 keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon', 111 classifiers = [ 112 'Development Status :: 5 - Production/Stable', 113 'Environment :: Console', 114 'Intended Audience :: Developers', 115 'Intended Audience :: Science/Research', 116 'Intended Audience :: System Administrators', 117 'License :: OSI Approved :: MIT License', 118 'Natural Language :: English', 119 'Operating System :: POSIX :: Linux', 120 'Programming Language :: Python :: 2.7', 121 'Topic :: Security', 122 'Topic :: Software Development :: Assemblers', 123 'Topic :: Software Development :: Debuggers', 124 'Topic :: Software Development :: Disassemblers', 125 'Topic :: Software Development :: Embedded Systems', 126 'Topic :: Software Development :: Libraries :: Python Modules', 127 'Topic :: System :: System Shells', 128 'Topic :: Utilities', 129 ] 130 ) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwnlib/version.py b/pwnlib/version.py --- a/pwnlib/version.py +++ b/pwnlib/version.py @@ -1 +1 @@ -__version__ = '4.1.5' +__version__ = '4.1.6' diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -83,7 +83,7 @@ name = 'pwntools', python_requires = '>=2.7', packages = find_packages(), - version = '4.1.5', + version = '4.1.6', data_files = [('', glob.glob('*.md') + glob.glob('*.txt')), ],
{"golden_diff": "diff --git a/pwnlib/version.py b/pwnlib/version.py\n--- a/pwnlib/version.py\n+++ b/pwnlib/version.py\n@@ -1 +1 @@\n-__version__ = '4.1.5'\n+__version__ = '4.1.6'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -83,7 +83,7 @@\n name = 'pwntools',\n python_requires = '>=2.7',\n packages = find_packages(),\n- version = '4.1.5',\n+ version = '4.1.6',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n", "issue": "aarch64.linux.pushstr_array is broken\nThere's also a missing colon after the `if` statement, and it seems this was never never finished since there's no `%endif`.\r\n\r\nhttps://github.com/Gallopsled/pwntools/blob/813749493288fd7330b7b44b81a7f2f7a8a02dac/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm#L76-L79\r\n\r\n\r\n```\r\n>>> shellcraft.pushstr_array(['hello', 'world'])\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)\r\n 248 if self.filesystem_checks:\r\n--> 249 return self._check(uri, self._collection[uri])\r\n 250 else:\r\n\r\nKeyError: 'aarch64/pushstr_array.asm'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nCompileException Traceback (most recent call last)\r\n<ipython-input-2-b064486e8883> in <module>\r\n----> 1 shellcraft.pushstr_array(['hello', 'world'])\r\n\r\n~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)\r\n 84 for m in self._context_modules():\r\n 85 try:\r\n---> 86 return getattr(m, key)\r\n 87 except AttributeError:\r\n 88 pass\r\n\r\n~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)\r\n 78 # This function lazy-loads the shellcodes\r\n 79 if key in self._shellcodes:\r\n---> 80 real = internal.make_function(key, self._shellcodes[key], self._dir)\r\n 81 setattr(self, key, real)\r\n 82 return real\r\n\r\n~/pwntools/pwnlib/shellcraft/internal.py in make_function(funcname, filename, directory)\r\n 112 import inspect\r\n 113 path = os.path.join(directory, filename)\r\n--> 114 template = lookup_template(path)\r\n 115\r\n 116 args, varargs, keywords, defaults = inspect.getargspec(template.module.render_body)\r\n\r\n~/pwntools/pwnlib/shellcraft/internal.py in lookup_template(filename)\r\n 80\r\n 81 if filename not in loaded:\r\n---> 82 loaded[filename] = lookup.get_template(filename)\r\n 83\r\n 84 return loaded[filename]\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)\r\n 258 srcfile = posixpath.normpath(posixpath.join(dir_, u))\r\n 259 if os.path.isfile(srcfile):\r\n--> 260 return self._load(srcfile, uri)\r\n 261 else:\r\n 262 raise exceptions.TopLevelLookupException(\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in _load(self, filename, uri)\r\n 320 else:\r\n 321 module_filename = None\r\n--> 322 self._collection[uri] = template = Template(\r\n 323 uri=uri,\r\n 324 filename=posixpath.normpath(filename),\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in __init__(self, text, filename, uri, format_exceptions, error_handler, lookup, output_encoding, encoding_errors, module_directory, cache_args, cache_impl, cache_enabled, cache_type, cache_dir, cache_url, module_filename, input_encoding, disable_unicode, module_writer, bytestring_passthrough, default_filters, buffer_filters, strict_undefined, imports, future_imports, enable_loop, preprocessor, lexer_cls, include_error_handler)\r\n 346 else:\r\n 347 path = None\r\n--> 348 module = self._compile_from_file(path, filename)\r\n 349 else:\r\n 350 raise exceptions.RuntimeException(\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_from_file(self, path, filename)\r\n 428 # in memory\r\n 429 data = util.read_file(filename)\r\n--> 430 code, module = _compile_text(self, data, filename)\r\n 431 self._source = None\r\n 432 self._code = code\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_text(template, text, filename)\r\n 731 def _compile_text(template, text, filename):\r\n 732 identifier = template.module_id\r\n--> 733 source, lexer = _compile(\r\n 734 template,\r\n 735 text,\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile(template, text, filename, generate_magic_comment)\r\n 710 preprocessor=template.preprocessor,\r\n 711 )\r\n--> 712 node = lexer.parse()\r\n 713 source = codegen.compile(\r\n 714 node,\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in parse(self)\r\n 262 if self.match_expression():\r\n 263 continue\r\n--> 264 if self.match_control_line():\r\n 265 continue\r\n 266 if self.match_comment():\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in match_control_line(self)\r\n 474 **self.exception_kwargs\r\n 475 )\r\n--> 476 self.append_node(parsetree.ControlLine, keyword, isend, text)\r\n 477 else:\r\n 478 self.append_node(parsetree.Comment, text)\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in append_node(self, nodecls, *args, **kwargs)\r\n 150 kwargs.setdefault(\"pos\", self.matched_charpos)\r\n 151 kwargs[\"filename\"] = self.filename\r\n--> 152 node = nodecls(*args, **kwargs)\r\n 153 if len(self.tag):\r\n 154 self.tag[-1].nodes.append(node)\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/parsetree.py in __init__(self, keyword, isend, text, **kwargs)\r\n 91 self._undeclared_identifiers = []\r\n 92 else:\r\n---> 93 code = ast.PythonFragment(text, **self.exception_kwargs)\r\n 94 self._declared_identifiers = code.declared_identifiers\r\n 95 self._undeclared_identifiers = code.undeclared_identifiers\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/ast.py in __init__(self, code, **exception_kwargs)\r\n 87 m = re.match(r\"^(\\w+)(?:\\s+(.*?))?:\\s*(#|$)\", code.strip(), re.S)\r\n 88 if not m:\r\n---> 89 raise exceptions.CompileException(\r\n 90 \"Fragment '%s' is not a partial control statement\" % code,\r\n 91 **exception_kwargs\r\n\r\nCompileException: Fragment 'if len(array[-1] != 'sp')' is not a partial control statement in file '/Users/heapcrash/pwntools/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm' at line: 76 char: 1\r\n```\n", "before_files": [{"content": "__version__ = '4.1.5'\n", "path": "pwnlib/version.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport platform\nimport subprocess\nimport sys\nimport traceback\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\nconsole_scripts = ['pwn=pwnlib.commandline.main:main']\n\n# Find all of the ancillary console scripts\n# We have a magic flag --include-all-scripts\nflag = '--only-use-pwn-command'\nif flag in sys.argv:\n sys.argv.remove(flag)\nelse:\n flag = False\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.common:main' % filename\n if not flag:\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'packaging',\n 'psutil>=3.3.0',\n 'intervaltree>=3.0',\n 'sortedcontainers',\n 'unicorn>=1.0.2rc1,<1.0.2rc4', # see unicorn-engine/unicorn#1100, unicorn-engine/unicorn#1170, Gallopsled/pwntools#1538\n 'six>=1.12.0',\n]\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print(\"You must install the Python development headers!\", file=sys.stderr)\n print(\"$ apt-get install python-dev\", file=sys.stderr)\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n long_description = subprocess.check_output(['pandoc', 'README.md', '--to=rst'], universal_newlines=True)\nexcept Exception as e:\n print(\"Failed to convert README.md through pandoc, proceeding anyway\", file=sys.stderr)\n traceback.print_exc()\n\nsetup(\n name = 'pwntools',\n python_requires = '>=2.7',\n packages = find_packages(),\n version = '4.1.5',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n 'data/templates/*.mako',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"[email protected]\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "__version__ = '4.1.6'\n", "path": "pwnlib/version.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport platform\nimport subprocess\nimport sys\nimport traceback\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\nconsole_scripts = ['pwn=pwnlib.commandline.main:main']\n\n# Find all of the ancillary console scripts\n# We have a magic flag --include-all-scripts\nflag = '--only-use-pwn-command'\nif flag in sys.argv:\n sys.argv.remove(flag)\nelse:\n flag = False\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.common:main' % filename\n if not flag:\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'packaging',\n 'psutil>=3.3.0',\n 'intervaltree>=3.0',\n 'sortedcontainers',\n 'unicorn>=1.0.2rc1,<1.0.2rc4', # see unicorn-engine/unicorn#1100, unicorn-engine/unicorn#1170, Gallopsled/pwntools#1538\n 'six>=1.12.0',\n]\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print(\"You must install the Python development headers!\", file=sys.stderr)\n print(\"$ apt-get install python-dev\", file=sys.stderr)\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n long_description = subprocess.check_output(['pandoc', 'README.md', '--to=rst'], universal_newlines=True)\nexcept Exception as e:\n print(\"Failed to convert README.md through pandoc, proceeding anyway\", file=sys.stderr)\n traceback.print_exc()\n\nsetup(\n name = 'pwntools',\n python_requires = '>=2.7',\n packages = find_packages(),\n version = '4.1.6',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n 'data/templates/*.mako',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"[email protected]\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}]}
3,563
168
gh_patches_debug_36784
rasdani/github-patches
git_diff
zestedesavoir__zds-site-4294
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [v23] Renommer un contenu l'envoit deux fois dans les flux rss En jetant un œil aux [flux rss des billets](https://beta.zestedesavoir.com/tribunes/flux/rss/), on se rend compte que le billet `Un gros Troll de plus sur Javascript` est présent 2 fois. C'est du au fait que j'ai publié la première fois le billet avec un autre nom. A la deuxième publication, le nom a été modifié et ça apparait deux fois inutilement dans les flux rss. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/tutorialv2/managers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from django.conf import settings 4 from django.db import models 5 from django.db.models import Count, F 6 7 from zds.utils.models import Tag 8 from django.utils.translation import ugettext_lazy as _ 9 10 11 class PublishedContentManager(models.Manager): 12 """ 13 Custom published content manager. 14 """ 15 16 def last_contents_of_a_member_loaded(self, author, _type=None): 17 """ 18 Get contents published by author depends on settings.ZDS_APP['content']['user_page_number'] 19 20 :param author: 21 :param _type: subtype to filter request 22 :return: 23 :rtype: django.db.models.QuerySet 24 """ 25 26 queryset = self.prefetch_related('content') \ 27 .prefetch_related('content__authors') \ 28 .prefetch_related('content__subcategory') \ 29 .filter(content__authors__in=[author]) \ 30 .filter(must_redirect=False) 31 32 if _type: 33 queryset = queryset.filter(content_type=_type) 34 35 public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']] 36 return public_contents 37 38 def last_tutorials_of_a_member_loaded(self, author): 39 return self.last_contents_of_a_member_loaded(author, _type='TUTORIAL') 40 41 def last_articles_of_a_member_loaded(self, author): 42 return self.last_contents_of_a_member_loaded(author, _type='ARTICLE') 43 44 def last_opinions_of_a_member_loaded(self, author): 45 return self.last_contents_of_a_member_loaded(author, _type='OPINION') 46 47 def get_contents_count(self): 48 """ 49 :rtype: int 50 """ 51 return self.filter(must_redirect=False) \ 52 .count() 53 54 def get_top_tags(self, displayed_types, limit=-1): 55 """ 56 Retrieve all most rated tags. 57 58 :param displayed_types: 59 :param limit: if ``-1`` or ``0`` => no limit. Else just takes the provided number of elements. 60 :return: 61 """ 62 published = self.filter( 63 must_redirect=False, 64 content__type__in=displayed_types).values('content__tags').distinct() 65 tags_pk = [tag['content__tags'] for tag in published] 66 queryset = Tag.objects\ 67 .filter(pk__in=tags_pk, publishablecontent__public_version__isnull=False, 68 publishablecontent__type__in=displayed_types) \ 69 .annotate(num_content=Count('publishablecontent')) \ 70 .order_by('-num_content', 'title') 71 if limit > 0: 72 queryset = queryset[:limit] 73 return queryset 74 75 def transfer_paternity(self, unsubscribed_user, replacement_author): 76 """ 77 erase or transfer the paternity of all published content owned by a user. 78 if a content has more than one author, the unregistering author just leave its redaction\ 79 else just mark ``replacement_author`` as the new author 80 81 """ 82 for published in self.filter(authors__in=[unsubscribed_user]): 83 if published.authors.count() == 1: 84 published.authors.add(replacement_author) 85 published.authors.remove(unsubscribed_user) 86 published.save() 87 88 89 class PublishableContentManager(models.Manager): 90 """...""" 91 92 def transfer_paternity(self, unregistered_user, replacement_author, gallery_class): 93 """ 94 Erases or transfers the paternity of all publishable content owned by a user. \ 95 If a content has more than one author, the unregistering author simply leaves its author list, \ 96 otherwise their published content are sent to ``replacement_author``, \ 97 unpublished content are deleted and their beta topics closed. 98 99 :param unregistered_user: the user to be unregistered 100 :param replacement_author: the new author 101 :param gallery_class: the class to link tutorial with gallery (perhaps overkill :p) 102 """ 103 for content in self.filter(authors__in=[unregistered_user]): 104 # we delete content only if not published with only one author 105 if not content.in_public() and content.authors.count() == 1: 106 if content.in_beta() and content.beta_topic: 107 beta_topic = content.beta_topic 108 beta_topic.is_locked = True 109 beta_topic.save() 110 first_post = beta_topic.first_post() 111 first_post.update_content(_(u"# Le tutoriel présenté par ce topic n'existe plus.")) 112 first_post.save() 113 content.delete() 114 else: 115 if content.authors.count() == 1: 116 content.authors.add(replacement_author) 117 external_gallery = gallery_class() 118 external_gallery.user = replacement_author 119 external_gallery.gallery = content.gallery 120 external_gallery.mode = 'W' 121 external_gallery.save() 122 gallery_class.objects.filter(user=unregistered_user).filter(gallery=content.gallery).delete() 123 124 content.authors.remove(unregistered_user) 125 # we add a sentence to the content's introduction stating it was written by a former member. 126 versioned = content.load_version() 127 title = versioned.title 128 introduction = _(u'[[i]]\n|Ce contenu a été rédigé par {} qui a quitté le site.\n\n')\ 129 .format(unregistered_user.username) + versioned.get_introduction() 130 conclusion = versioned.get_conclusion() 131 sha = versioned.repo_update(title, introduction, conclusion, 132 commit_message='Author unsubscribed', 133 do_commit=True, update_slug=True) 134 content.sha_draft = sha 135 content.save() 136 137 def get_last_tutorials(self): 138 """ 139 This depends on settings.ZDS_APP['tutorial']['home_number'] parameter 140 141 :return: lit of last published content 142 :rtype: list 143 """ 144 home_number = settings.ZDS_APP['tutorial']['home_number'] 145 all_contents = self.filter(type='TUTORIAL') \ 146 .filter(public_version__isnull=False) \ 147 .prefetch_related('authors') \ 148 .prefetch_related('authors__profile') \ 149 .select_related('last_note') \ 150 .select_related('public_version') \ 151 .prefetch_related('subcategory') \ 152 .prefetch_related('tags') \ 153 .order_by('-public_version__publication_date')[:home_number] 154 published = [] 155 for content in all_contents: 156 content.public_version.content = content 157 published.append(content.public_version) 158 return published 159 160 def get_last_articles(self): 161 """ 162 ..attention: 163 this one uses a raw subquery for historical reasons. It will hopefully be replaced one day by an 164 ORM primitive. 165 166 :return: list of last articles expanded with 'count_note' property that prefetches number of comments 167 :rtype: list 168 """ 169 sub_query = 'SELECT COUNT(*) FROM {} WHERE {}={}'.format( 170 'tutorialv2_contentreaction', 171 'tutorialv2_contentreaction.related_content_id', 172 'tutorialv2_publishedcontent.content_pk', 173 ) 174 home_number = settings.ZDS_APP['article']['home_number'] 175 all_contents = self.filter(type='ARTICLE') \ 176 .filter(public_version__isnull=False) \ 177 .prefetch_related('authors') \ 178 .prefetch_related('authors__profile') \ 179 .select_related('last_note') \ 180 .select_related('public_version') \ 181 .prefetch_related('subcategory') \ 182 .prefetch_related('tags') \ 183 .extra(select={'count_note': sub_query}) \ 184 .order_by('-public_version__publication_date')[:home_number] 185 published = [] 186 for content in all_contents: 187 content.public_version.content = content 188 published.append(content.public_version) 189 return published 190 191 def get_last_opinions(self): 192 """ 193 This depends on settings.ZDS_APP['opinions']['home_number'] parameter. 194 195 :return: list of last opinions 196 :rtype: list 197 """ 198 home_number = settings.ZDS_APP['opinions']['home_number'] 199 all_contents = self.filter(type='OPINION') \ 200 .filter(public_version__isnull=False, sha_picked=F('sha_public')) \ 201 .prefetch_related('authors') \ 202 .prefetch_related('authors__profile') \ 203 .select_related('last_note') \ 204 .select_related('public_version') \ 205 .prefetch_related('subcategory') \ 206 .prefetch_related('tags') \ 207 .order_by('-picked_date')[:home_number] 208 published = [] 209 for content in all_contents: 210 content.public_version.content = content 211 published.append(content.public_version) 212 return published 213 ``` Path: `zds/tutorialv2/feeds.py` Content: ``` 1 # coding: utf-8 2 3 from django.contrib.syndication.views import Feed 4 from django.conf import settings 5 6 from django.utils.feedgenerator import Atom1Feed 7 8 from zds.tutorialv2.models.models_database import PublishedContent 9 from zds.settings import ZDS_APP 10 11 12 class LastContentFeedRSS(Feed): 13 """ 14 RSS feed for any type of content. 15 """ 16 title = u'Contenus sur {}'.format(settings.ZDS_APP['site']['litteral_name']) 17 description = u'Les derniers contenus parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name']) 18 link = '' 19 content_type = None 20 21 def items(self): 22 """ 23 :return: The last (typically 5) contents (sorted by publication date). 24 If `self.type` is not `None`, the contents will only be of this type. 25 """ 26 contents = PublishedContent.objects\ 27 .prefetch_related('content')\ 28 .prefetch_related('content__authors') 29 30 if self.content_type is not None: 31 contents = contents.filter(content_type=self.content_type) 32 33 return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']] 34 35 def item_title(self, item): 36 return item.content.title 37 38 def item_pubdate(self, item): 39 return item.publication_date 40 41 def item_description(self, item): 42 return item.content.description 43 44 def item_author_name(self, item): 45 authors_list = item.content.authors.all() 46 authors = [] 47 for authors_obj in authors_list: 48 authors.append(authors_obj.username) 49 authors = ', '.join(authors) 50 return authors 51 52 def item_link(self, item): 53 return item.get_absolute_url_online() 54 55 56 class LastContentFeedATOM(LastContentFeedRSS): 57 feed_type = Atom1Feed 58 subtitle = LastContentFeedRSS.description 59 60 61 class LastTutorialsFeedRSS(LastContentFeedRSS): 62 """ 63 Redefinition of `LastContentFeedRSS` for tutorials only 64 """ 65 content_type = 'TUTORIAL' 66 link = '/tutoriels/' 67 title = u'Tutoriels sur {}'.format(settings.ZDS_APP['site']['litteral_name']) 68 description = u'Les derniers tutoriels parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name']) 69 70 71 class LastTutorialsFeedATOM(LastTutorialsFeedRSS): 72 feed_type = Atom1Feed 73 subtitle = LastTutorialsFeedRSS.description 74 75 76 class LastArticlesFeedRSS(LastContentFeedRSS): 77 """ 78 Redefinition of `LastContentFeedRSS` for articles only 79 """ 80 content_type = 'ARTICLE' 81 link = '/articles/' 82 title = u'Articles sur {}'.format(settings.ZDS_APP['site']['litteral_name']) 83 description = u'Les derniers articles parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name']) 84 85 86 class LastArticlesFeedATOM(LastArticlesFeedRSS): 87 feed_type = Atom1Feed 88 subtitle = LastArticlesFeedRSS.description 89 90 91 class LastOpinionsFeedRSS(LastContentFeedRSS): 92 """ 93 Redefinition of `LastContentFeedRSS` for opinions only 94 """ 95 content_type = 'OPINION' 96 link = '/tribunes/' 97 title = u'Tribunes sur {}'.format(settings.ZDS_APP['site']['litteral_name']) 98 description = u'Les derniers billets des tribunes parus sur {}.'.format( 99 settings.ZDS_APP['site']['litteral_name']) 100 101 102 class LastOpinionsFeedATOM(LastOpinionsFeedRSS): 103 feed_type = Atom1Feed 104 subtitle = LastOpinionsFeedRSS.description 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py --- a/zds/tutorialv2/feeds.py +++ b/zds/tutorialv2/feeds.py @@ -23,14 +23,9 @@ :return: The last (typically 5) contents (sorted by publication date). If `self.type` is not `None`, the contents will only be of this type. """ - contents = PublishedContent.objects\ - .prefetch_related('content')\ - .prefetch_related('content__authors') + contents = PublishedContent.objects.published_contents(self.content_type)[:ZDS_APP['content']['feed_length']] - if self.content_type is not None: - contents = contents.filter(content_type=self.content_type) - - return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']] + return contents def item_title(self, item): return item.content.title diff --git a/zds/tutorialv2/managers.py b/zds/tutorialv2/managers.py --- a/zds/tutorialv2/managers.py +++ b/zds/tutorialv2/managers.py @@ -13,26 +13,37 @@ Custom published content manager. """ - def last_contents_of_a_member_loaded(self, author, _type=None): + def published_contents(self, _type=None): """ - Get contents published by author depends on settings.ZDS_APP['content']['user_page_number'] + Get contents published order by date. - :param author: - :param _type: subtype to filter request :return: :rtype: django.db.models.QuerySet """ - queryset = self.prefetch_related('content') \ .prefetch_related('content__authors') \ .prefetch_related('content__subcategory') \ - .filter(content__authors__in=[author]) \ - .filter(must_redirect=False) + .filter(must_redirect=False) \ + .order_by('-publication_date') if _type: queryset = queryset.filter(content_type=_type) - public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']] + return queryset + + def last_contents_of_a_member_loaded(self, author, _type=None): + """ + Get contents published by author depends on settings.ZDS_APP['content']['user_page_number'] + + :param author: + :param _type: subtype to filter request + :return: + :rtype: django.db.models.QuerySet + """ + queryset = self.published_contents(_type) \ + .filter(content__authors__in=[author]) + + public_contents = queryset.all()[:settings.ZDS_APP['content']['user_page_number']] return public_contents def last_tutorials_of_a_member_loaded(self, author):
{"golden_diff": "diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py\n--- a/zds/tutorialv2/feeds.py\n+++ b/zds/tutorialv2/feeds.py\n@@ -23,14 +23,9 @@\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n- contents = PublishedContent.objects\\\n- .prefetch_related('content')\\\n- .prefetch_related('content__authors')\n+ contents = PublishedContent.objects.published_contents(self.content_type)[:ZDS_APP['content']['feed_length']]\n \n- if self.content_type is not None:\n- contents = contents.filter(content_type=self.content_type)\n-\n- return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]\n+ return contents\n \n def item_title(self, item):\n return item.content.title\ndiff --git a/zds/tutorialv2/managers.py b/zds/tutorialv2/managers.py\n--- a/zds/tutorialv2/managers.py\n+++ b/zds/tutorialv2/managers.py\n@@ -13,26 +13,37 @@\n Custom published content manager.\n \"\"\"\n \n- def last_contents_of_a_member_loaded(self, author, _type=None):\n+ def published_contents(self, _type=None):\n \"\"\"\n- Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n+ Get contents published order by date.\n \n- :param author:\n- :param _type: subtype to filter request\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n-\n queryset = self.prefetch_related('content') \\\n .prefetch_related('content__authors') \\\n .prefetch_related('content__subcategory') \\\n- .filter(content__authors__in=[author]) \\\n- .filter(must_redirect=False)\n+ .filter(must_redirect=False) \\\n+ .order_by('-publication_date')\n \n if _type:\n queryset = queryset.filter(content_type=_type)\n \n- public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]\n+ return queryset\n+\n+ def last_contents_of_a_member_loaded(self, author, _type=None):\n+ \"\"\"\n+ Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n+\n+ :param author:\n+ :param _type: subtype to filter request\n+ :return:\n+ :rtype: django.db.models.QuerySet\n+ \"\"\"\n+ queryset = self.published_contents(_type) \\\n+ .filter(content__authors__in=[author])\n+\n+ public_contents = queryset.all()[:settings.ZDS_APP['content']['user_page_number']]\n return public_contents\n \n def last_tutorials_of_a_member_loaded(self, author):\n", "issue": "[v23] Renommer un contenu l'envoit deux fois dans les flux rss\nEn jetant un \u0153il aux [flux rss des billets](https://beta.zestedesavoir.com/tribunes/flux/rss/), on se rend compte que le billet `Un gros Troll de plus sur Javascript` est pr\u00e9sent 2 fois.\r\n\r\nC'est du au fait que j'ai publi\u00e9 la premi\u00e8re fois le billet avec un autre nom. A la deuxi\u00e8me publication, le nom a \u00e9t\u00e9 modifi\u00e9 et \u00e7a apparait deux fois inutilement dans les flux rss.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Count, F\n\nfrom zds.utils.models import Tag\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PublishedContentManager(models.Manager):\n \"\"\"\n Custom published content manager.\n \"\"\"\n\n def last_contents_of_a_member_loaded(self, author, _type=None):\n \"\"\"\n Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n\n :param author:\n :param _type: subtype to filter request\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n\n queryset = self.prefetch_related('content') \\\n .prefetch_related('content__authors') \\\n .prefetch_related('content__subcategory') \\\n .filter(content__authors__in=[author]) \\\n .filter(must_redirect=False)\n\n if _type:\n queryset = queryset.filter(content_type=_type)\n\n public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]\n return public_contents\n\n def last_tutorials_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='TUTORIAL')\n\n def last_articles_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='ARTICLE')\n\n def last_opinions_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='OPINION')\n\n def get_contents_count(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.filter(must_redirect=False) \\\n .count()\n\n def get_top_tags(self, displayed_types, limit=-1):\n \"\"\"\n Retrieve all most rated tags.\n\n :param displayed_types:\n :param limit: if ``-1``\u00a0or ``0`` => no limit. Else just takes the provided number of elements.\n :return:\n \"\"\"\n published = self.filter(\n must_redirect=False,\n content__type__in=displayed_types).values('content__tags').distinct()\n tags_pk = [tag['content__tags'] for tag in published]\n queryset = Tag.objects\\\n .filter(pk__in=tags_pk, publishablecontent__public_version__isnull=False,\n publishablecontent__type__in=displayed_types) \\\n .annotate(num_content=Count('publishablecontent')) \\\n .order_by('-num_content', 'title')\n if limit > 0:\n queryset = queryset[:limit]\n return queryset\n\n def transfer_paternity(self, unsubscribed_user, replacement_author):\n \"\"\"\n erase or transfer the paternity of all published content owned by a user.\n if a content has more than one author, the unregistering author just leave its redaction\\\n else just mark ``replacement_author`` as the new author\n\n \"\"\"\n for published in self.filter(authors__in=[unsubscribed_user]):\n if published.authors.count() == 1:\n published.authors.add(replacement_author)\n published.authors.remove(unsubscribed_user)\n published.save()\n\n\nclass PublishableContentManager(models.Manager):\n \"\"\"...\"\"\"\n\n def transfer_paternity(self, unregistered_user, replacement_author, gallery_class):\n \"\"\"\n Erases or transfers the paternity of all publishable content owned by a user. \\\n If a content has more than one author, the unregistering author simply leaves its author list, \\\n otherwise their published content are sent to ``replacement_author``, \\\n unpublished content are deleted and their beta topics closed.\n\n :param unregistered_user: the user to be unregistered\n :param replacement_author: the new author\n :param gallery_class: the class to link tutorial with gallery (perhaps overkill :p)\n \"\"\"\n for content in self.filter(authors__in=[unregistered_user]):\n # we delete content only if not published with only one author\n if not content.in_public() and content.authors.count() == 1:\n if content.in_beta() and content.beta_topic:\n beta_topic = content.beta_topic\n beta_topic.is_locked = True\n beta_topic.save()\n first_post = beta_topic.first_post()\n first_post.update_content(_(u\"# Le tutoriel pr\u00e9sent\u00e9 par ce topic n'existe plus.\"))\n first_post.save()\n content.delete()\n else:\n if content.authors.count() == 1:\n content.authors.add(replacement_author)\n external_gallery = gallery_class()\n external_gallery.user = replacement_author\n external_gallery.gallery = content.gallery\n external_gallery.mode = 'W'\n external_gallery.save()\n gallery_class.objects.filter(user=unregistered_user).filter(gallery=content.gallery).delete()\n\n content.authors.remove(unregistered_user)\n # we add a sentence to the content's introduction stating it was written by a former member.\n versioned = content.load_version()\n title = versioned.title\n introduction = _(u'[[i]]\\n|Ce contenu a \u00e9t\u00e9 r\u00e9dig\u00e9 par {} qui a quitt\u00e9 le site.\\n\\n')\\\n .format(unregistered_user.username) + versioned.get_introduction()\n conclusion = versioned.get_conclusion()\n sha = versioned.repo_update(title, introduction, conclusion,\n commit_message='Author unsubscribed',\n do_commit=True, update_slug=True)\n content.sha_draft = sha\n content.save()\n\n def get_last_tutorials(self):\n \"\"\"\n This depends on settings.ZDS_APP['tutorial']['home_number'] parameter\n\n :return: lit of last published content\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['tutorial']['home_number']\n all_contents = self.filter(type='TUTORIAL') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_articles(self):\n \"\"\"\n ..attention:\n this one uses a raw subquery for historical reasons. It will hopefully be replaced one day by an\n ORM primitive.\n\n :return: list of last articles expanded with 'count_note' property that prefetches number of comments\n :rtype: list\n \"\"\"\n sub_query = 'SELECT COUNT(*) FROM {} WHERE {}={}'.format(\n 'tutorialv2_contentreaction',\n 'tutorialv2_contentreaction.related_content_id',\n 'tutorialv2_publishedcontent.content_pk',\n )\n home_number = settings.ZDS_APP['article']['home_number']\n all_contents = self.filter(type='ARTICLE') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .extra(select={'count_note': sub_query}) \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_opinions(self):\n \"\"\"\n This depends on settings.ZDS_APP['opinions']['home_number'] parameter.\n\n :return: list of last opinions\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['opinions']['home_number']\n all_contents = self.filter(type='OPINION') \\\n .filter(public_version__isnull=False, sha_picked=F('sha_public')) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-picked_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n", "path": "zds/tutorialv2/managers.py"}, {"content": "# coding: utf-8\n\nfrom django.contrib.syndication.views import Feed\nfrom django.conf import settings\n\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom zds.tutorialv2.models.models_database import PublishedContent\nfrom zds.settings import ZDS_APP\n\n\nclass LastContentFeedRSS(Feed):\n \"\"\"\n RSS feed for any type of content.\n \"\"\"\n title = u'Contenus sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers contenus parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n link = ''\n content_type = None\n\n def items(self):\n \"\"\"\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n contents = PublishedContent.objects\\\n .prefetch_related('content')\\\n .prefetch_related('content__authors')\n\n if self.content_type is not None:\n contents = contents.filter(content_type=self.content_type)\n\n return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]\n\n def item_title(self, item):\n return item.content.title\n\n def item_pubdate(self, item):\n return item.publication_date\n\n def item_description(self, item):\n return item.content.description\n\n def item_author_name(self, item):\n authors_list = item.content.authors.all()\n authors = []\n for authors_obj in authors_list:\n authors.append(authors_obj.username)\n authors = ', '.join(authors)\n return authors\n\n def item_link(self, item):\n return item.get_absolute_url_online()\n\n\nclass LastContentFeedATOM(LastContentFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastContentFeedRSS.description\n\n\nclass LastTutorialsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for tutorials only\n \"\"\"\n content_type = 'TUTORIAL'\n link = '/tutoriels/'\n title = u'Tutoriels sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers tutoriels parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastTutorialsFeedATOM(LastTutorialsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastTutorialsFeedRSS.description\n\n\nclass LastArticlesFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for articles only\n \"\"\"\n content_type = 'ARTICLE'\n link = '/articles/'\n title = u'Articles sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers articles parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastArticlesFeedATOM(LastArticlesFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastArticlesFeedRSS.description\n\n\nclass LastOpinionsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for opinions only\n \"\"\"\n content_type = 'OPINION'\n link = '/tribunes/'\n title = u'Tribunes sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers billets des tribunes parus sur {}.'.format(\n settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastOpinionsFeedATOM(LastOpinionsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastOpinionsFeedRSS.description\n", "path": "zds/tutorialv2/feeds.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Count, F\n\nfrom zds.utils.models import Tag\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PublishedContentManager(models.Manager):\n \"\"\"\n Custom published content manager.\n \"\"\"\n\n def published_contents(self, _type=None):\n \"\"\"\n Get contents published order by date.\n\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n queryset = self.prefetch_related('content') \\\n .prefetch_related('content__authors') \\\n .prefetch_related('content__subcategory') \\\n .filter(must_redirect=False) \\\n .order_by('-publication_date')\n\n if _type:\n queryset = queryset.filter(content_type=_type)\n\n return queryset\n\n def last_contents_of_a_member_loaded(self, author, _type=None):\n \"\"\"\n Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n\n :param author:\n :param _type: subtype to filter request\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n queryset = self.published_contents(_type) \\\n .filter(content__authors__in=[author])\n\n public_contents = queryset.all()[:settings.ZDS_APP['content']['user_page_number']]\n return public_contents\n\n def last_tutorials_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='TUTORIAL')\n\n def last_articles_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='ARTICLE')\n\n def last_opinions_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='OPINION')\n\n def get_contents_count(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.filter(must_redirect=False) \\\n .count()\n\n def get_top_tags(self, displayed_types, limit=-1):\n \"\"\"\n Retrieve all most rated tags.\n\n :param displayed_types:\n :param limit: if ``-1``\u00a0or ``0`` => no limit. Else just takes the provided number of elements.\n :return:\n \"\"\"\n published = self.filter(\n must_redirect=False,\n content__type__in=displayed_types).values('content__tags').distinct()\n tags_pk = [tag['content__tags'] for tag in published]\n queryset = Tag.objects\\\n .filter(pk__in=tags_pk, publishablecontent__public_version__isnull=False,\n publishablecontent__type__in=displayed_types) \\\n .annotate(num_content=Count('publishablecontent')) \\\n .order_by('-num_content', 'title')\n if limit > 0:\n queryset = queryset[:limit]\n return queryset\n\n def transfer_paternity(self, unsubscribed_user, replacement_author):\n \"\"\"\n erase or transfer the paternity of all published content owned by a user.\n if a content has more than one author, the unregistering author just leave its redaction\\\n else just mark ``replacement_author`` as the new author\n\n \"\"\"\n for published in self.filter(authors__in=[unsubscribed_user]):\n if published.authors.count() == 1:\n published.authors.add(replacement_author)\n published.authors.remove(unsubscribed_user)\n published.save()\n\n\nclass PublishableContentManager(models.Manager):\n \"\"\"...\"\"\"\n\n def transfer_paternity(self, unregistered_user, replacement_author, gallery_class):\n \"\"\"\n Erases or transfers the paternity of all publishable content owned by a user. \\\n If a content has more than one author, the unregistering author simply leaves its author list, \\\n otherwise their published content are sent to ``replacement_author``, \\\n unpublished content are deleted and their beta topics closed.\n\n :param unregistered_user: the user to be unregistered\n :param replacement_author: the new author\n :param gallery_class: the class to link tutorial with gallery (perhaps overkill :p)\n \"\"\"\n for content in self.filter(authors__in=[unregistered_user]):\n # we delete content only if not published with only one author\n if not content.in_public() and content.authors.count() == 1:\n if content.in_beta() and content.beta_topic:\n beta_topic = content.beta_topic\n beta_topic.is_locked = True\n beta_topic.save()\n first_post = beta_topic.first_post()\n first_post.update_content(_(u\"# Le tutoriel pr\u00e9sent\u00e9 par ce topic n'existe plus.\"))\n first_post.save()\n content.delete()\n else:\n if content.authors.count() == 1:\n content.authors.add(replacement_author)\n external_gallery = gallery_class()\n external_gallery.user = replacement_author\n external_gallery.gallery = content.gallery\n external_gallery.mode = 'W'\n external_gallery.save()\n gallery_class.objects.filter(user=unregistered_user).filter(gallery=content.gallery).delete()\n\n content.authors.remove(unregistered_user)\n # we add a sentence to the content's introduction stating it was written by a former member.\n versioned = content.load_version()\n title = versioned.title\n introduction = _(u'[[i]]\\n|Ce contenu a \u00e9t\u00e9 r\u00e9dig\u00e9 par {} qui a quitt\u00e9 le site.\\n\\n')\\\n .format(unregistered_user.username) + versioned.get_introduction()\n conclusion = versioned.get_conclusion()\n sha = versioned.repo_update(title, introduction, conclusion,\n commit_message='Author unsubscribed',\n do_commit=True, update_slug=True)\n content.sha_draft = sha\n content.save()\n\n def get_last_tutorials(self):\n \"\"\"\n This depends on settings.ZDS_APP['tutorial']['home_number'] parameter\n\n :return: lit of last published content\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['tutorial']['home_number']\n all_contents = self.filter(type='TUTORIAL') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_articles(self):\n \"\"\"\n ..attention:\n this one uses a raw subquery for historical reasons. It will hopefully be replaced one day by an\n ORM primitive.\n\n :return: list of last articles expanded with 'count_note' property that prefetches number of comments\n :rtype: list\n \"\"\"\n sub_query = 'SELECT COUNT(*) FROM {} WHERE {}={}'.format(\n 'tutorialv2_contentreaction',\n 'tutorialv2_contentreaction.related_content_id',\n 'tutorialv2_publishedcontent.content_pk',\n )\n home_number = settings.ZDS_APP['article']['home_number']\n all_contents = self.filter(type='ARTICLE') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .extra(select={'count_note': sub_query}) \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_opinions(self):\n \"\"\"\n This depends on settings.ZDS_APP['opinions']['home_number'] parameter.\n\n :return: list of last opinions\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['opinions']['home_number']\n all_contents = self.filter(type='OPINION') \\\n .filter(public_version__isnull=False, sha_picked=F('sha_public')) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-picked_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n", "path": "zds/tutorialv2/managers.py"}, {"content": "# coding: utf-8\n\nfrom django.contrib.syndication.views import Feed\nfrom django.conf import settings\n\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom zds.tutorialv2.models.models_database import PublishedContent\nfrom zds.settings import ZDS_APP\n\n\nclass LastContentFeedRSS(Feed):\n \"\"\"\n RSS feed for any type of content.\n \"\"\"\n title = u'Contenus sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers contenus parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n link = ''\n content_type = None\n\n def items(self):\n \"\"\"\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n contents = PublishedContent.objects.published_contents(self.content_type)[:ZDS_APP['content']['feed_length']]\n\n return contents\n\n def item_title(self, item):\n return item.content.title\n\n def item_pubdate(self, item):\n return item.publication_date\n\n def item_description(self, item):\n return item.content.description\n\n def item_author_name(self, item):\n authors_list = item.content.authors.all()\n authors = []\n for authors_obj in authors_list:\n authors.append(authors_obj.username)\n authors = ', '.join(authors)\n return authors\n\n def item_link(self, item):\n return item.get_absolute_url_online()\n\n\nclass LastContentFeedATOM(LastContentFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastContentFeedRSS.description\n\n\nclass LastTutorialsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for tutorials only\n \"\"\"\n content_type = 'TUTORIAL'\n link = '/tutoriels/'\n title = u'Tutoriels sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers tutoriels parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastTutorialsFeedATOM(LastTutorialsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastTutorialsFeedRSS.description\n\n\nclass LastArticlesFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for articles only\n \"\"\"\n content_type = 'ARTICLE'\n link = '/articles/'\n title = u'Articles sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers articles parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastArticlesFeedATOM(LastArticlesFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastArticlesFeedRSS.description\n\n\nclass LastOpinionsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for opinions only\n \"\"\"\n content_type = 'OPINION'\n link = '/tribunes/'\n title = u'Tribunes sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers billets des tribunes parus sur {}.'.format(\n settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastOpinionsFeedATOM(LastOpinionsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastOpinionsFeedRSS.description\n", "path": "zds/tutorialv2/feeds.py"}]}
3,748
636
gh_patches_debug_41491
rasdani/github-patches
git_diff
TheAlgorithms__Python-9208
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Making binary tree traversals lazy. ### Feature description Hi, In [binary_tree_traversals.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py) file, there are different kinds of traversals such as `preorder`, `inorder`, `postorder` and etc. Although the implementations are pretty clean one-liner like: ```python # preorder return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] ``` It isn't memory friendly. We can use generators instead not to load all the nodes into the memory: ```python # preorder if not root: return [] yield root.data yield from preorder(root.left) yield from preorder(root.right) ``` Shall we go ahead and change them? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `data_structures/binary_tree/binary_tree_traversals.py` Content: ``` 1 # https://en.wikipedia.org/wiki/Tree_traversal 2 from __future__ import annotations 3 4 from collections import deque 5 from collections.abc import Sequence 6 from dataclasses import dataclass 7 from typing import Any 8 9 10 @dataclass 11 class Node: 12 data: int 13 left: Node | None = None 14 right: Node | None = None 15 16 17 def make_tree() -> Node | None: 18 r""" 19 The below tree 20 1 21 / \ 22 2 3 23 / \ 24 4 5 25 """ 26 tree = Node(1) 27 tree.left = Node(2) 28 tree.right = Node(3) 29 tree.left.left = Node(4) 30 tree.left.right = Node(5) 31 return tree 32 33 34 def preorder(root: Node | None) -> list[int]: 35 """ 36 Pre-order traversal visits root node, left subtree, right subtree. 37 >>> preorder(make_tree()) 38 [1, 2, 4, 5, 3] 39 """ 40 return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] 41 42 43 def postorder(root: Node | None) -> list[int]: 44 """ 45 Post-order traversal visits left subtree, right subtree, root node. 46 >>> postorder(make_tree()) 47 [4, 5, 2, 3, 1] 48 """ 49 return postorder(root.left) + postorder(root.right) + [root.data] if root else [] 50 51 52 def inorder(root: Node | None) -> list[int]: 53 """ 54 In-order traversal visits left subtree, root node, right subtree. 55 >>> inorder(make_tree()) 56 [4, 2, 5, 1, 3] 57 """ 58 return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] 59 60 61 def reverse_inorder(root: Node | None) -> list[int]: 62 """ 63 Reverse in-order traversal visits right subtree, root node, left subtree. 64 >>> reverse_inorder(make_tree()) 65 [3, 1, 5, 2, 4] 66 """ 67 return ( 68 [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)] 69 if root 70 else [] 71 ) 72 73 74 def height(root: Node | None) -> int: 75 """ 76 Recursive function for calculating the height of the binary tree. 77 >>> height(None) 78 0 79 >>> height(make_tree()) 80 3 81 """ 82 return (max(height(root.left), height(root.right)) + 1) if root else 0 83 84 85 def level_order(root: Node | None) -> Sequence[Node | None]: 86 """ 87 Returns a list of nodes value from a whole binary tree in Level Order Traverse. 88 Level Order traverse: Visit nodes of the tree level-by-level. 89 """ 90 output: list[Any] = [] 91 92 if root is None: 93 return output 94 95 process_queue = deque([root]) 96 97 while process_queue: 98 node = process_queue.popleft() 99 output.append(node.data) 100 101 if node.left: 102 process_queue.append(node.left) 103 if node.right: 104 process_queue.append(node.right) 105 return output 106 107 108 def get_nodes_from_left_to_right( 109 root: Node | None, level: int 110 ) -> Sequence[Node | None]: 111 """ 112 Returns a list of nodes value from a particular level: 113 Left to right direction of the binary tree. 114 """ 115 output: list[Any] = [] 116 117 def populate_output(root: Node | None, level: int) -> None: 118 if not root: 119 return 120 if level == 1: 121 output.append(root.data) 122 elif level > 1: 123 populate_output(root.left, level - 1) 124 populate_output(root.right, level - 1) 125 126 populate_output(root, level) 127 return output 128 129 130 def get_nodes_from_right_to_left( 131 root: Node | None, level: int 132 ) -> Sequence[Node | None]: 133 """ 134 Returns a list of nodes value from a particular level: 135 Right to left direction of the binary tree. 136 """ 137 output: list[Any] = [] 138 139 def populate_output(root: Node | None, level: int) -> None: 140 if root is None: 141 return 142 if level == 1: 143 output.append(root.data) 144 elif level > 1: 145 populate_output(root.right, level - 1) 146 populate_output(root.left, level - 1) 147 148 populate_output(root, level) 149 return output 150 151 152 def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]: 153 """ 154 ZigZag traverse: 155 Returns a list of nodes value from left to right and right to left, alternatively. 156 """ 157 if root is None: 158 return [] 159 160 output: list[Sequence[Node | None]] = [] 161 162 flag = 0 163 height_tree = height(root) 164 165 for h in range(1, height_tree + 1): 166 if not flag: 167 output.append(get_nodes_from_left_to_right(root, h)) 168 flag = 1 169 else: 170 output.append(get_nodes_from_right_to_left(root, h)) 171 flag = 0 172 173 return output 174 175 176 def main() -> None: # Main function for testing. 177 # Create binary tree. 178 root = make_tree() 179 180 # All Traversals of the binary are as follows: 181 print(f"In-order Traversal: {inorder(root)}") 182 print(f"Reverse In-order Traversal: {reverse_inorder(root)}") 183 print(f"Pre-order Traversal: {preorder(root)}") 184 print(f"Post-order Traversal: {postorder(root)}", "\n") 185 186 print(f"Height of Tree: {height(root)}", "\n") 187 188 print("Complete Level Order Traversal: ") 189 print(level_order(root), "\n") 190 191 print("Level-wise order Traversal: ") 192 193 for level in range(1, height(root) + 1): 194 print(f"Level {level}:", get_nodes_from_left_to_right(root, level=level)) 195 196 print("\nZigZag order Traversal: ") 197 print(zigzag(root)) 198 199 200 if __name__ == "__main__": 201 import doctest 202 203 doctest.testmod() 204 main() 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,12 +1,12 @@ -# https://en.wikipedia.org/wiki/Tree_traversal from __future__ import annotations from collections import deque -from collections.abc import Sequence +from collections.abc import Generator, Sequence from dataclasses import dataclass from typing import Any +# https://en.wikipedia.org/wiki/Tree_traversal @dataclass class Node: data: int @@ -31,44 +31,56 @@ return tree -def preorder(root: Node | None) -> list[int]: +def preorder(root: Node | None) -> Generator[int, None, None]: """ Pre-order traversal visits root node, left subtree, right subtree. - >>> preorder(make_tree()) + >>> list(preorder(make_tree())) [1, 2, 4, 5, 3] """ - return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] + if not root: + return + yield root.data + yield from preorder(root.left) + yield from preorder(root.right) -def postorder(root: Node | None) -> list[int]: +def postorder(root: Node | None) -> Generator[int, None, None]: """ Post-order traversal visits left subtree, right subtree, root node. - >>> postorder(make_tree()) + >>> list(postorder(make_tree())) [4, 5, 2, 3, 1] """ - return postorder(root.left) + postorder(root.right) + [root.data] if root else [] + if not root: + return + yield from postorder(root.left) + yield from postorder(root.right) + yield root.data -def inorder(root: Node | None) -> list[int]: +def inorder(root: Node | None) -> Generator[int, None, None]: """ In-order traversal visits left subtree, root node, right subtree. - >>> inorder(make_tree()) + >>> list(inorder(make_tree())) [4, 2, 5, 1, 3] """ - return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] + if not root: + return + yield from inorder(root.left) + yield root.data + yield from inorder(root.right) -def reverse_inorder(root: Node | None) -> list[int]: +def reverse_inorder(root: Node | None) -> Generator[int, None, None]: """ Reverse in-order traversal visits right subtree, root node, left subtree. - >>> reverse_inorder(make_tree()) + >>> list(reverse_inorder(make_tree())) [3, 1, 5, 2, 4] """ - return ( - [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)] - if root - else [] - ) + if not root: + return + yield from reverse_inorder(root.right) + yield root.data + yield from reverse_inorder(root.left) def height(root: Node | None) -> int: @@ -178,10 +190,10 @@ root = make_tree() # All Traversals of the binary are as follows: - print(f"In-order Traversal: {inorder(root)}") - print(f"Reverse In-order Traversal: {reverse_inorder(root)}") - print(f"Pre-order Traversal: {preorder(root)}") - print(f"Post-order Traversal: {postorder(root)}", "\n") + print(f"In-order Traversal: {list(inorder(root))}") + print(f"Reverse In-order Traversal: {list(reverse_inorder(root))}") + print(f"Pre-order Traversal: {list(preorder(root))}") + print(f"Post-order Traversal: {list(postorder(root))}", "\n") print(f"Height of Tree: {height(root)}", "\n")
{"golden_diff": "diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py\n--- a/data_structures/binary_tree/binary_tree_traversals.py\n+++ b/data_structures/binary_tree/binary_tree_traversals.py\n@@ -1,12 +1,12 @@\n-# https://en.wikipedia.org/wiki/Tree_traversal\r\n from __future__ import annotations\r\n \r\n from collections import deque\r\n-from collections.abc import Sequence\r\n+from collections.abc import Generator, Sequence\r\n from dataclasses import dataclass\r\n from typing import Any\r\n \r\n \r\n+# https://en.wikipedia.org/wiki/Tree_traversal\r\n @dataclass\r\n class Node:\r\n data: int\r\n@@ -31,44 +31,56 @@\n return tree\r\n \r\n \r\n-def preorder(root: Node | None) -> list[int]:\r\n+def preorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Pre-order traversal visits root node, left subtree, right subtree.\r\n- >>> preorder(make_tree())\r\n+ >>> list(preorder(make_tree()))\r\n [1, 2, 4, 5, 3]\r\n \"\"\"\r\n- return [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield root.data\r\n+ yield from preorder(root.left)\r\n+ yield from preorder(root.right)\r\n \r\n \r\n-def postorder(root: Node | None) -> list[int]:\r\n+def postorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Post-order traversal visits left subtree, right subtree, root node.\r\n- >>> postorder(make_tree())\r\n+ >>> list(postorder(make_tree()))\r\n [4, 5, 2, 3, 1]\r\n \"\"\"\r\n- return postorder(root.left) + postorder(root.right) + [root.data] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield from postorder(root.left)\r\n+ yield from postorder(root.right)\r\n+ yield root.data\r\n \r\n \r\n-def inorder(root: Node | None) -> list[int]:\r\n+def inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n In-order traversal visits left subtree, root node, right subtree.\r\n- >>> inorder(make_tree())\r\n+ >>> list(inorder(make_tree()))\r\n [4, 2, 5, 1, 3]\r\n \"\"\"\r\n- return [*inorder(root.left), root.data, *inorder(root.right)] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield from inorder(root.left)\r\n+ yield root.data\r\n+ yield from inorder(root.right)\r\n \r\n \r\n-def reverse_inorder(root: Node | None) -> list[int]:\r\n+def reverse_inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Reverse in-order traversal visits right subtree, root node, left subtree.\r\n- >>> reverse_inorder(make_tree())\r\n+ >>> list(reverse_inorder(make_tree()))\r\n [3, 1, 5, 2, 4]\r\n \"\"\"\r\n- return (\r\n- [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]\r\n- if root\r\n- else []\r\n- )\r\n+ if not root:\r\n+ return\r\n+ yield from reverse_inorder(root.right)\r\n+ yield root.data\r\n+ yield from reverse_inorder(root.left)\r\n \r\n \r\n def height(root: Node | None) -> int:\r\n@@ -178,10 +190,10 @@\n root = make_tree()\r\n \r\n # All Traversals of the binary are as follows:\r\n- print(f\"In-order Traversal: {inorder(root)}\")\r\n- print(f\"Reverse In-order Traversal: {reverse_inorder(root)}\")\r\n- print(f\"Pre-order Traversal: {preorder(root)}\")\r\n- print(f\"Post-order Traversal: {postorder(root)}\", \"\\n\")\r\n+ print(f\"In-order Traversal: {list(inorder(root))}\")\r\n+ print(f\"Reverse In-order Traversal: {list(reverse_inorder(root))}\")\r\n+ print(f\"Pre-order Traversal: {list(preorder(root))}\")\r\n+ print(f\"Post-order Traversal: {list(postorder(root))}\", \"\\n\")\r\n \r\n print(f\"Height of Tree: {height(root)}\", \"\\n\")\n", "issue": "Making binary tree traversals lazy.\n### Feature description\n\nHi,\r\nIn [binary_tree_traversals.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py) file, there are different kinds of traversals such as `preorder`, `inorder`, `postorder` and etc.\r\n\r\nAlthough the implementations are pretty clean one-liner like:\r\n```python\r\n# preorder\r\nreturn [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n```\r\nIt isn't memory friendly. We can use generators instead not to load all the nodes into the memory:\r\n\r\n```python\r\n# preorder\r\n if not root:\r\n return []\r\n yield root.data\r\n yield from preorder(root.left)\r\n yield from preorder(root.right)\r\n```\r\nShall we go ahead and change them?\n", "before_files": [{"content": "# https://en.wikipedia.org/wiki/Tree_traversal\r\nfrom __future__ import annotations\r\n\r\nfrom collections import deque\r\nfrom collections.abc import Sequence\r\nfrom dataclasses import dataclass\r\nfrom typing import Any\r\n\r\n\r\n@dataclass\r\nclass Node:\r\n data: int\r\n left: Node | None = None\r\n right: Node | None = None\r\n\r\n\r\ndef make_tree() -> Node | None:\r\n r\"\"\"\r\n The below tree\r\n 1\r\n / \\\r\n 2 3\r\n / \\\r\n 4 5\r\n \"\"\"\r\n tree = Node(1)\r\n tree.left = Node(2)\r\n tree.right = Node(3)\r\n tree.left.left = Node(4)\r\n tree.left.right = Node(5)\r\n return tree\r\n\r\n\r\ndef preorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Pre-order traversal visits root node, left subtree, right subtree.\r\n >>> preorder(make_tree())\r\n [1, 2, 4, 5, 3]\r\n \"\"\"\r\n return [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n\r\n\r\ndef postorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Post-order traversal visits left subtree, right subtree, root node.\r\n >>> postorder(make_tree())\r\n [4, 5, 2, 3, 1]\r\n \"\"\"\r\n return postorder(root.left) + postorder(root.right) + [root.data] if root else []\r\n\r\n\r\ndef inorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n In-order traversal visits left subtree, root node, right subtree.\r\n >>> inorder(make_tree())\r\n [4, 2, 5, 1, 3]\r\n \"\"\"\r\n return [*inorder(root.left), root.data, *inorder(root.right)] if root else []\r\n\r\n\r\ndef reverse_inorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Reverse in-order traversal visits right subtree, root node, left subtree.\r\n >>> reverse_inorder(make_tree())\r\n [3, 1, 5, 2, 4]\r\n \"\"\"\r\n return (\r\n [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]\r\n if root\r\n else []\r\n )\r\n\r\n\r\ndef height(root: Node | None) -> int:\r\n \"\"\"\r\n Recursive function for calculating the height of the binary tree.\r\n >>> height(None)\r\n 0\r\n >>> height(make_tree())\r\n 3\r\n \"\"\"\r\n return (max(height(root.left), height(root.right)) + 1) if root else 0\r\n\r\n\r\ndef level_order(root: Node | None) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a whole binary tree in Level Order Traverse.\r\n Level Order traverse: Visit nodes of the tree level-by-level.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n if root is None:\r\n return output\r\n\r\n process_queue = deque([root])\r\n\r\n while process_queue:\r\n node = process_queue.popleft()\r\n output.append(node.data)\r\n\r\n if node.left:\r\n process_queue.append(node.left)\r\n if node.right:\r\n process_queue.append(node.right)\r\n return output\r\n\r\n\r\ndef get_nodes_from_left_to_right(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Left to right direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if not root:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.left, level - 1)\r\n populate_output(root.right, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef get_nodes_from_right_to_left(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Right to left direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if root is None:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.right, level - 1)\r\n populate_output(root.left, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]:\r\n \"\"\"\r\n ZigZag traverse:\r\n Returns a list of nodes value from left to right and right to left, alternatively.\r\n \"\"\"\r\n if root is None:\r\n return []\r\n\r\n output: list[Sequence[Node | None]] = []\r\n\r\n flag = 0\r\n height_tree = height(root)\r\n\r\n for h in range(1, height_tree + 1):\r\n if not flag:\r\n output.append(get_nodes_from_left_to_right(root, h))\r\n flag = 1\r\n else:\r\n output.append(get_nodes_from_right_to_left(root, h))\r\n flag = 0\r\n\r\n return output\r\n\r\n\r\ndef main() -> None: # Main function for testing.\r\n # Create binary tree.\r\n root = make_tree()\r\n\r\n # All Traversals of the binary are as follows:\r\n print(f\"In-order Traversal: {inorder(root)}\")\r\n print(f\"Reverse In-order Traversal: {reverse_inorder(root)}\")\r\n print(f\"Pre-order Traversal: {preorder(root)}\")\r\n print(f\"Post-order Traversal: {postorder(root)}\", \"\\n\")\r\n\r\n print(f\"Height of Tree: {height(root)}\", \"\\n\")\r\n\r\n print(\"Complete Level Order Traversal: \")\r\n print(level_order(root), \"\\n\")\r\n\r\n print(\"Level-wise order Traversal: \")\r\n\r\n for level in range(1, height(root) + 1):\r\n print(f\"Level {level}:\", get_nodes_from_left_to_right(root, level=level))\r\n\r\n print(\"\\nZigZag order Traversal: \")\r\n print(zigzag(root))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n main()\r\n", "path": "data_structures/binary_tree/binary_tree_traversals.py"}], "after_files": [{"content": "from __future__ import annotations\r\n\r\nfrom collections import deque\r\nfrom collections.abc import Generator, Sequence\r\nfrom dataclasses import dataclass\r\nfrom typing import Any\r\n\r\n\r\n# https://en.wikipedia.org/wiki/Tree_traversal\r\n@dataclass\r\nclass Node:\r\n data: int\r\n left: Node | None = None\r\n right: Node | None = None\r\n\r\n\r\ndef make_tree() -> Node | None:\r\n r\"\"\"\r\n The below tree\r\n 1\r\n / \\\r\n 2 3\r\n / \\\r\n 4 5\r\n \"\"\"\r\n tree = Node(1)\r\n tree.left = Node(2)\r\n tree.right = Node(3)\r\n tree.left.left = Node(4)\r\n tree.left.right = Node(5)\r\n return tree\r\n\r\n\r\ndef preorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Pre-order traversal visits root node, left subtree, right subtree.\r\n >>> list(preorder(make_tree()))\r\n [1, 2, 4, 5, 3]\r\n \"\"\"\r\n if not root:\r\n return\r\n yield root.data\r\n yield from preorder(root.left)\r\n yield from preorder(root.right)\r\n\r\n\r\ndef postorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Post-order traversal visits left subtree, right subtree, root node.\r\n >>> list(postorder(make_tree()))\r\n [4, 5, 2, 3, 1]\r\n \"\"\"\r\n if not root:\r\n return\r\n yield from postorder(root.left)\r\n yield from postorder(root.right)\r\n yield root.data\r\n\r\n\r\ndef inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n In-order traversal visits left subtree, root node, right subtree.\r\n >>> list(inorder(make_tree()))\r\n [4, 2, 5, 1, 3]\r\n \"\"\"\r\n if not root:\r\n return\r\n yield from inorder(root.left)\r\n yield root.data\r\n yield from inorder(root.right)\r\n\r\n\r\ndef reverse_inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Reverse in-order traversal visits right subtree, root node, left subtree.\r\n >>> list(reverse_inorder(make_tree()))\r\n [3, 1, 5, 2, 4]\r\n \"\"\"\r\n if not root:\r\n return\r\n yield from reverse_inorder(root.right)\r\n yield root.data\r\n yield from reverse_inorder(root.left)\r\n\r\n\r\ndef height(root: Node | None) -> int:\r\n \"\"\"\r\n Recursive function for calculating the height of the binary tree.\r\n >>> height(None)\r\n 0\r\n >>> height(make_tree())\r\n 3\r\n \"\"\"\r\n return (max(height(root.left), height(root.right)) + 1) if root else 0\r\n\r\n\r\ndef level_order(root: Node | None) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a whole binary tree in Level Order Traverse.\r\n Level Order traverse: Visit nodes of the tree level-by-level.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n if root is None:\r\n return output\r\n\r\n process_queue = deque([root])\r\n\r\n while process_queue:\r\n node = process_queue.popleft()\r\n output.append(node.data)\r\n\r\n if node.left:\r\n process_queue.append(node.left)\r\n if node.right:\r\n process_queue.append(node.right)\r\n return output\r\n\r\n\r\ndef get_nodes_from_left_to_right(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Left to right direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if not root:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.left, level - 1)\r\n populate_output(root.right, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef get_nodes_from_right_to_left(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Right to left direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if root is None:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.right, level - 1)\r\n populate_output(root.left, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]:\r\n \"\"\"\r\n ZigZag traverse:\r\n Returns a list of nodes value from left to right and right to left, alternatively.\r\n \"\"\"\r\n if root is None:\r\n return []\r\n\r\n output: list[Sequence[Node | None]] = []\r\n\r\n flag = 0\r\n height_tree = height(root)\r\n\r\n for h in range(1, height_tree + 1):\r\n if not flag:\r\n output.append(get_nodes_from_left_to_right(root, h))\r\n flag = 1\r\n else:\r\n output.append(get_nodes_from_right_to_left(root, h))\r\n flag = 0\r\n\r\n return output\r\n\r\n\r\ndef main() -> None: # Main function for testing.\r\n # Create binary tree.\r\n root = make_tree()\r\n\r\n # All Traversals of the binary are as follows:\r\n print(f\"In-order Traversal: {list(inorder(root))}\")\r\n print(f\"Reverse In-order Traversal: {list(reverse_inorder(root))}\")\r\n print(f\"Pre-order Traversal: {list(preorder(root))}\")\r\n print(f\"Post-order Traversal: {list(postorder(root))}\", \"\\n\")\r\n\r\n print(f\"Height of Tree: {height(root)}\", \"\\n\")\r\n\r\n print(\"Complete Level Order Traversal: \")\r\n print(level_order(root), \"\\n\")\r\n\r\n print(\"Level-wise order Traversal: \")\r\n\r\n for level in range(1, height(root) + 1):\r\n print(f\"Level {level}:\", get_nodes_from_left_to_right(root, level=level))\r\n\r\n print(\"\\nZigZag order Traversal: \")\r\n print(zigzag(root))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n main()\r\n", "path": "data_structures/binary_tree/binary_tree_traversals.py"}]}
2,313
947
gh_patches_debug_29675
rasdani/github-patches
git_diff
opensearch-project__opensearch-build-147
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Script to build each component from manifest and assemble bundle. This script should read a manifest and output all artifacts ready for upload. Example. /build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle /build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle /build/opensearch-sql-1.0.0.0-rc1.zip /build/org/opensearch... <- maven artifacts Input: to the script should be a manifest file location - format defined here #111 Output: all required artifacts are written to ./build - [x] Clone each component repository defined in the manifest - [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact. - [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134 To make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/bundle-build/lib/component.py` Content: ``` 1 import os 2 import tempfile 3 import subprocess 4 from lib.git import GitRepository 5 6 class Component: 7 def __init__(self, data): 8 self._name = data['name'] 9 self._repository = data['repository'] 10 self._ref = data['ref'] 11 12 def name(self): 13 return self._name 14 15 def repository(self): 16 return self._repository 17 18 def git_repository(self): 19 return self._git_repository 20 21 def ref(self): 22 return self._ref 23 24 def checkout(self): 25 self._git_repository = GitRepository(self.repository(), self.ref()) 26 27 # script overridden in this repo 28 def custom_component_script_path(self): 29 dirname = os.path.dirname(os.path.abspath(__file__)) 30 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh')) 31 32 # script inside the component repo 33 def component_script_path(self): 34 dirname = self.git_repository().dir() 35 return os.path.realpath(os.path.join(dirname, 'scripts/build.sh')) 36 37 # default gradle script 38 def default_script_path(self): 39 dirname = os.path.dirname(os.path.abspath(__file__)) 40 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh')) 41 42 def build_script(self): 43 paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()] 44 return next(filter(lambda path: os.path.exists(path), paths), None) 45 46 def build(self, version, arch): 47 build_script = f'{self.build_script()} {version} {arch}' 48 print(f'Running {build_script} ...') 49 self.git_repository().execute(build_script) 50 51 def artifacts_path(self): 52 dirname = self.git_repository().dir() 53 return os.path.realpath(os.path.join(dirname, 'artifacts')) 54 55 def export(self, dest): 56 artifacts_path = self.artifacts_path() 57 if os.path.exists(artifacts_path): 58 print(f'Publishing artifacts from {artifacts_path} into {dest} ...') 59 self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"') 60 else: 61 print(f'No artifacts found in {artifacts_path}, skipping.') 62 63 def dict(self): 64 return { 65 'name': self.name(), 66 'repository': self.repository(), 67 'ref': self.ref(), 68 'sha': self.git_repository().sha() 69 } 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py --- a/tools/bundle-build/lib/component.py +++ b/tools/bundle-build/lib/component.py @@ -21,6 +21,9 @@ def ref(self): return self._ref + def artifacts(self): + return self._artifacts + def checkout(self): self._git_repository = GitRepository(self.repository(), self.ref()) @@ -57,13 +60,29 @@ if os.path.exists(artifacts_path): print(f'Publishing artifacts from {artifacts_path} into {dest} ...') self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"') + self.set_artifacts() else: print(f'No artifacts found in {artifacts_path}, skipping.') + def set_artifacts(self): + self._artifacts = {key: self.file_paths(key) for key in ["maven", "plugins", "bundle", "libs"] if self.file_paths(key)} + + def file_paths(self, dir_name): + artifacts_path = self.artifacts_path() + sub_dir = os.path.join(artifacts_path, dir_name) + file_paths = [] + if os.path.exists(sub_dir): + for dir, dirs, files in os.walk(sub_dir): + for file_name in files: + path = os.path.relpath(os.path.join(dir, file_name), artifacts_path) + file_paths.append(path) + return file_paths + def dict(self): return { 'name': self.name(), 'repository': self.repository(), 'ref': self.ref(), - 'sha': self.git_repository().sha() + 'sha': self.git_repository().sha(), + 'artifacts': self.artifacts() }
{"golden_diff": "diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py\n--- a/tools/bundle-build/lib/component.py\n+++ b/tools/bundle-build/lib/component.py\n@@ -21,6 +21,9 @@\n def ref(self):\n return self._ref\n \n+ def artifacts(self):\n+ return self._artifacts\n+\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n \n@@ -57,13 +60,29 @@\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n+ self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n \n+ def set_artifacts(self):\n+ self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n+\n+ def file_paths(self, dir_name):\n+ artifacts_path = self.artifacts_path()\n+ sub_dir = os.path.join(artifacts_path, dir_name)\n+ file_paths = []\n+ if os.path.exists(sub_dir):\n+ for dir, dirs, files in os.walk(sub_dir):\n+ for file_name in files:\n+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n+ file_paths.append(path)\n+ return file_paths\n+\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n- 'sha': self.git_repository().sha()\n+ 'sha': self.git_repository().sha(),\n+ 'artifacts': self.artifacts()\n }\n", "issue": "Script to build each component from manifest and assemble bundle.\nThis script should read a manifest and output all artifacts ready for upload.\r\nExample.\r\n/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle\r\n/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle\r\n/build/opensearch-sql-1.0.0.0-rc1.zip\r\n/build/org/opensearch... <- maven artifacts\r\n\r\nInput: to the script should be a manifest file location - format defined here #111 \r\nOutput: all required artifacts are written to ./build\r\n\r\n- [x] Clone each component repository defined in the manifest\r\n- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.\r\n- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134 \r\n\r\nTo make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.\r\n\n", "before_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha()\n }\n", "path": "tools/bundle-build/lib/component.py"}], "after_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def artifacts(self):\n return self._artifacts\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def set_artifacts(self):\n self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n\n def file_paths(self, dir_name):\n artifacts_path = self.artifacts_path()\n sub_dir = os.path.join(artifacts_path, dir_name)\n file_paths = []\n if os.path.exists(sub_dir):\n for dir, dirs, files in os.walk(sub_dir):\n for file_name in files:\n path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n file_paths.append(path)\n return file_paths\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha(),\n 'artifacts': self.artifacts()\n }\n", "path": "tools/bundle-build/lib/component.py"}]}
1,211
403
gh_patches_debug_7646
rasdani/github-patches
git_diff
lnbits__lnbits-194
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LNURLp links give errors on WalletofSatoshi and BlueWallet Using this LNURLp link: https://lnbits.com/lnurlp/212 BlueWallet Error: "Alert: Bad response from server" Wallet of Satoshi Error: "Error: Could not complete payment, please try again." --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lnbits/extensions/lnurlp/views_api.py` Content: ``` 1 from quart import g, jsonify, request 2 from http import HTTPStatus 3 from lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore 4 5 from lnbits.core.crud import get_user 6 from lnbits.decorators import api_check_wallet_key, api_validate_post_request 7 from lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis 8 9 from . import lnurlp_ext 10 from .crud import ( 11 create_pay_link, 12 get_pay_link, 13 get_pay_links, 14 update_pay_link, 15 delete_pay_link, 16 ) 17 18 19 @lnurlp_ext.route("/api/v1/currencies", methods=["GET"]) 20 async def api_list_currencies_available(): 21 return jsonify(list(currencies.keys())) 22 23 24 @lnurlp_ext.route("/api/v1/links", methods=["GET"]) 25 @api_check_wallet_key("invoice") 26 async def api_links(): 27 wallet_ids = [g.wallet.id] 28 29 if "all_wallets" in request.args: 30 wallet_ids = (await get_user(g.wallet.user)).wallet_ids 31 32 try: 33 return ( 34 jsonify( 35 [ 36 {**link._asdict(), **{"lnurl": link.lnurl}} 37 for link in await get_pay_links(wallet_ids) 38 ] 39 ), 40 HTTPStatus.OK, 41 ) 42 except LnurlInvalidUrl: 43 return ( 44 jsonify( 45 { 46 "message": "LNURLs need to be delivered over a publically accessible `https` domain or Tor." 47 } 48 ), 49 HTTPStatus.UPGRADE_REQUIRED, 50 ) 51 52 53 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["GET"]) 54 @api_check_wallet_key("invoice") 55 async def api_link_retrieve(link_id): 56 link = await get_pay_link(link_id) 57 58 if not link: 59 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND 60 61 if link.wallet != g.wallet.id: 62 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN 63 64 return jsonify({**link._asdict(), **{"lnurl": link.lnurl}}), HTTPStatus.OK 65 66 67 @lnurlp_ext.route("/api/v1/links", methods=["POST"]) 68 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["PUT"]) 69 @api_check_wallet_key("invoice") 70 @api_validate_post_request( 71 schema={ 72 "description": {"type": "string", "empty": False, "required": True}, 73 "min": {"type": "number", "min": 0.01, "required": True}, 74 "max": {"type": "number", "min": 0.01, "required": True}, 75 "currency": {"type": "string", "nullable": True, "required": False}, 76 "comment_chars": {"type": "integer", "required": True, "min": 0, "max": 800}, 77 "webhook_url": {"type": "string", "required": False}, 78 "success_text": {"type": "string", "required": False}, 79 "success_url": {"type": "string", "required": False}, 80 } 81 ) 82 async def api_link_create_or_update(link_id=None): 83 if g.data["min"] > g.data["max"]: 84 return jsonify({"message": "Min is greater than max."}), HTTPStatus.BAD_REQUEST 85 86 if g.data.get("currency") == None and ( 87 round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"] 88 ): 89 return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST 90 91 if link_id: 92 link = await get_pay_link(link_id) 93 94 if not link: 95 return ( 96 jsonify({"message": "Pay link does not exist."}), 97 HTTPStatus.NOT_FOUND, 98 ) 99 100 if link.wallet != g.wallet.id: 101 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN 102 103 link = await update_pay_link(link_id, **g.data) 104 else: 105 link = await create_pay_link(wallet_id=g.wallet.id, **g.data) 106 107 return ( 108 jsonify({**link._asdict(), **{"lnurl": link.lnurl}}), 109 HTTPStatus.OK if link_id else HTTPStatus.CREATED, 110 ) 111 112 113 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["DELETE"]) 114 @api_check_wallet_key("invoice") 115 async def api_link_delete(link_id): 116 link = await get_pay_link(link_id) 117 118 if not link: 119 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND 120 121 if link.wallet != g.wallet.id: 122 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN 123 124 await delete_pay_link(link_id) 125 126 return "", HTTPStatus.NO_CONTENT 127 128 129 @lnurlp_ext.route("/api/v1/rate/<currency>", methods=["GET"]) 130 async def api_check_fiat_rate(currency): 131 try: 132 rate = await get_fiat_rate_satoshis(currency) 133 except AssertionError: 134 rate = None 135 136 return jsonify({"rate": rate}), HTTPStatus.OK 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py --- a/lnbits/extensions/lnurlp/views_api.py +++ b/lnbits/extensions/lnurlp/views_api.py @@ -87,6 +87,9 @@ round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"] ): return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST + + if g.data["success_url"][:8] != "https://": + return jsonify({"message": "Success URL must be secure https://..."}), HTTPStatus.BAD_REQUEST if link_id: link = await get_pay_link(link_id)
{"golden_diff": "diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py\n--- a/lnbits/extensions/lnurlp/views_api.py\n+++ b/lnbits/extensions/lnurlp/views_api.py\n@@ -87,6 +87,9 @@\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n+ \n+ if g.data[\"success_url\"][:8] != \"https://\":\n+ return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n \n if link_id:\n link = await get_pay_link(link_id)\n", "issue": "LNURLp links give errors on WalletofSatoshi and BlueWallet\nUsing this LNURLp link: https://lnbits.com/lnurlp/212\r\n\r\nBlueWallet Error: \"Alert: Bad response from server\"\r\nWallet of Satoshi Error: \"Error: Could not complete payment, please try again.\"\n", "before_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}], "after_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n \n if g.data[\"success_url\"][:8] != \"https://\":\n return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}]}
1,753
173
gh_patches_debug_37208
rasdani/github-patches
git_diff
pymedusa__Medusa-3990
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Growl Not Registering Medusa Medusa Info: | Branch: master Commit: d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe Version: v0.2.1 Database: 44.9 Python Version: | 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)] SSL Version: | OpenSSL 1.0.2j 26 Sep 2016 OS: | Windows-7-6.1.7601-SP1 Locale: | en_GB.cp1252 **What you did: Input ip:port to register gowl **What happened: Nothing! **What you expected: Successful registration. **Logs:** ``` 2018-04-07 08:09:55 ERROR Thread-298 :: [d6eb72d] Exception generated: 'module' object has no attribute 'GNTPRegister' Traceback (most recent call last): File "C:\Medusa\Medusa\medusa\server\web\core\base.py", line 283, in async_call result = function(**kwargs) File "C:\Medusa\Medusa\medusa\server\web\home\handler.py", line 354, in testGrowl result = notifiers.growl_notifier.test_notify(host, password) File "C:\Medusa\Medusa\medusa\notifiers\growl.py", line 20, in test_notify self._sendRegistration(host, password) File "C:\Medusa\Medusa\medusa\notifiers\growl.py", line 172, in _sendRegistration register = gntp.GNTPRegister() AttributeError: 'module' object has no attribute 'GNTPRegister' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/notifiers/growl.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import print_function 4 from __future__ import unicode_literals 5 6 import logging 7 import socket 8 from builtins import object 9 10 import gntp 11 12 from medusa import app, common 13 from medusa.helper.exceptions import ex 14 from medusa.logger.adapters.style import BraceAdapter 15 16 log = BraceAdapter(logging.getLogger(__name__)) 17 log.logger.addHandler(logging.NullHandler()) 18 19 20 class Notifier(object): 21 def test_notify(self, host, password): 22 self._sendRegistration(host, password) 23 return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password, 24 force=True) 25 26 def notify_snatch(self, ep_name, is_proper): 27 if app.GROWL_NOTIFY_ONSNATCH: 28 self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name) 29 30 def notify_download(self, ep_name): 31 if app.GROWL_NOTIFY_ONDOWNLOAD: 32 self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) 33 34 def notify_subtitle_download(self, ep_name, lang): 35 if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD: 36 self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang) 37 38 def notify_git_update(self, new_version='??'): 39 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT] 40 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE] 41 self._sendGrowl(title, update_text + new_version) 42 43 def notify_login(self, ipaddress=''): 44 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT] 45 title = common.notifyStrings[common.NOTIFY_LOGIN] 46 self._sendGrowl(title, update_text.format(ipaddress)) 47 48 def _send_growl(self, options, message=None): 49 50 # Send Notification 51 notice = gntp.GNTPNotice() 52 53 # Required 54 notice.add_header('Application-Name', options['app']) 55 notice.add_header('Notification-Name', options['name']) 56 notice.add_header('Notification-Title', options['title']) 57 58 if options['password']: 59 notice.set_password(options['password']) 60 61 # Optional 62 if options['sticky']: 63 notice.add_header('Notification-Sticky', options['sticky']) 64 if options['priority']: 65 notice.add_header('Notification-Priority', options['priority']) 66 if options['icon']: 67 notice.add_header('Notification-Icon', app.LOGO_URL) 68 69 if message: 70 notice.add_header('Notification-Text', message) 71 72 response = self._send(options['host'], options['port'], notice.encode(), options['debug']) 73 return True if isinstance(response, gntp.GNTPOK) else False 74 75 @staticmethod 76 def _send(host, port, data, debug=False): 77 if debug: 78 print('<Sending>\n', data, '\n</Sending>') 79 80 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 81 s.connect((host, port)) 82 s.send(data) 83 response = gntp.parse_gntp(s.recv(1024)) 84 s.close() 85 86 if debug: 87 print('<Received>\n', response, '\n</Received>') 88 89 return response 90 91 def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None, 92 force=False): 93 if not app.USE_GROWL and not force: 94 return False 95 96 if name is None: 97 name = title 98 99 if host is None: 100 hostParts = app.GROWL_HOST.split(':') 101 else: 102 hostParts = host.split(':') 103 104 if len(hostParts) != 2 or hostParts[1] == '': 105 port = 23053 106 else: 107 port = int(hostParts[1]) 108 109 growlHosts = [(hostParts[0], port)] 110 111 opts = { 112 'name': name, 113 'title': title, 114 'app': 'Medusa', 115 'sticky': None, 116 'priority': None, 117 'debug': False 118 } 119 120 if password is None: 121 opts['password'] = app.GROWL_PASSWORD 122 else: 123 opts['password'] = password 124 125 opts['icon'] = True 126 127 for pc in growlHosts: 128 opts['host'] = pc[0] 129 opts['port'] = pc[1] 130 log.debug( 131 u'GROWL: Sending growl to {host}:{port} - {msg!r}', 132 {'msg': message, 'host': opts['host'], 'port': opts['port']} 133 ) 134 try: 135 if self._send_growl(opts, message): 136 return True 137 else: 138 if self._sendRegistration(host, password): 139 return self._send_growl(opts, message) 140 else: 141 return False 142 except Exception as error: 143 log.warning( 144 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}', 145 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']} 146 ) 147 return False 148 149 def _sendRegistration(self, host=None, password=None): 150 opts = {} 151 152 if host is None: 153 hostParts = app.GROWL_HOST.split(':') 154 else: 155 hostParts = host.split(':') 156 157 if len(hostParts) != 2 or hostParts[1] == '': 158 port = 23053 159 else: 160 port = int(hostParts[1]) 161 162 opts['host'] = hostParts[0] 163 opts['port'] = port 164 165 if password is None: 166 opts['password'] = app.GROWL_PASSWORD 167 else: 168 opts['password'] = password 169 170 opts['app'] = 'Medusa' 171 opts['debug'] = False 172 173 # Send Registration 174 register = gntp.GNTPRegister() 175 register.add_header('Application-Name', opts['app']) 176 register.add_header('Application-Icon', app.LOGO_URL) 177 178 register.add_notification('Test', True) 179 register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True) 180 register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True) 181 register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True) 182 183 if opts['password']: 184 register.set_password(opts['password']) 185 186 try: 187 return self._send(opts['host'], opts['port'], register.encode(), opts['debug']) 188 except Exception as error: 189 log.warning( 190 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}', 191 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']} 192 ) 193 return False 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py --- a/medusa/notifiers/growl.py +++ b/medusa/notifiers/growl.py @@ -25,7 +25,10 @@ def notify_snatch(self, ep_name, is_proper): if app.GROWL_NOTIFY_ONSNATCH: - self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name) + self._sendGrowl( + common.notifyStrings[ + (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper] + ], ep_name) def notify_download(self, ep_name): if app.GROWL_NOTIFY_ONDOWNLOAD: @@ -47,16 +50,13 @@ def _send_growl(self, options, message=None): - # Send Notification - notice = gntp.GNTPNotice() - - # Required - notice.add_header('Application-Name', options['app']) - notice.add_header('Notification-Name', options['name']) - notice.add_header('Notification-Title', options['title']) - - if options['password']: - notice.set_password(options['password']) + # Initialize Notification + notice = gntp.core.GNTPNotice( + app=options['app'], + name=options['name'], + title=options['title'], + password=options['password'], + ) # Optional if options['sticky']: @@ -70,7 +70,7 @@ notice.add_header('Notification-Text', message) response = self._send(options['host'], options['port'], notice.encode(), options['debug']) - return True if isinstance(response, gntp.GNTPOK) else False + return True if isinstance(response, gntp.core.GNTPOK) else False @staticmethod def _send(host, port, data, debug=False): @@ -80,7 +80,7 @@ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) s.send(data) - response = gntp.parse_gntp(s.recv(1024)) + response = gntp.core.parse_gntp(s.recv(1024)) s.close() if debug: @@ -171,7 +171,7 @@ opts['debug'] = False # Send Registration - register = gntp.GNTPRegister() + register = gntp.core.GNTPRegister() register.add_header('Application-Name', opts['app']) register.add_header('Application-Icon', app.LOGO_URL)
{"golden_diff": "diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py\n--- a/medusa/notifiers/growl.py\n+++ b/medusa/notifiers/growl.py\n@@ -25,7 +25,10 @@\n \n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n- self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)\n+ self._sendGrowl(\n+ common.notifyStrings[\n+ (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]\n+ ], ep_name)\n \n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n@@ -47,16 +50,13 @@\n \n def _send_growl(self, options, message=None):\n \n- # Send Notification\n- notice = gntp.GNTPNotice()\n-\n- # Required\n- notice.add_header('Application-Name', options['app'])\n- notice.add_header('Notification-Name', options['name'])\n- notice.add_header('Notification-Title', options['title'])\n-\n- if options['password']:\n- notice.set_password(options['password'])\n+ # Initialize Notification\n+ notice = gntp.core.GNTPNotice(\n+ app=options['app'],\n+ name=options['name'],\n+ title=options['title'],\n+ password=options['password'],\n+ )\n \n # Optional\n if options['sticky']:\n@@ -70,7 +70,7 @@\n notice.add_header('Notification-Text', message)\n \n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n- return True if isinstance(response, gntp.GNTPOK) else False\n+ return True if isinstance(response, gntp.core.GNTPOK) else False\n \n @staticmethod\n def _send(host, port, data, debug=False):\n@@ -80,7 +80,7 @@\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n- response = gntp.parse_gntp(s.recv(1024))\n+ response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n \n if debug:\n@@ -171,7 +171,7 @@\n opts['debug'] = False\n \n # Send Registration\n- register = gntp.GNTPRegister()\n+ register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n", "issue": "Growl Not Registering Medusa\nMedusa Info: | Branch: master Commit: d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe Version: v0.2.1 Database: 44.9\r\nPython Version: | 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)]\r\nSSL Version: | OpenSSL 1.0.2j 26 Sep 2016\r\nOS: | Windows-7-6.1.7601-SP1\r\nLocale: | en_GB.cp1252\r\n\r\n**What you did: Input ip:port to register gowl\r\n**What happened: Nothing!\r\n**What you expected: Successful registration.\r\n**Logs:**\r\n```\r\n2018-04-07 08:09:55 ERROR Thread-298 :: [d6eb72d] Exception generated: 'module' object has no attribute 'GNTPRegister'\r\nTraceback (most recent call last):\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\core\\base.py\", line 283, in async_call\r\n result = function(**kwargs)\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\home\\handler.py\", line 354, in testGrowl\r\n result = notifiers.growl_notifier.test_notify(host, password)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\growl.py\", line 20, in test_notify\r\n self._sendRegistration(host, password)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\growl.py\", line 172, in _sendRegistration\r\n register = gntp.GNTPRegister()\r\nAttributeError: 'module' object has no attribute 'GNTPRegister'\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)\n\n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Send Notification\n notice = gntp.GNTPNotice()\n\n # Required\n notice.add_header('Application-Name', options['app'])\n notice.add_header('Notification-Name', options['name'])\n notice.add_header('Notification-Title', options['title'])\n\n if options['password']:\n notice.set_password(options['password'])\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(\n common.notifyStrings[\n (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]\n ], ep_name)\n\n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Initialize Notification\n notice = gntp.core.GNTPNotice(\n app=options['app'],\n name=options['name'],\n title=options['title'],\n password=options['password'],\n )\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py"}]}
2,695
610
gh_patches_debug_37245
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2351
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Consider using binary search for ExplicitBucketHistogram https://github.com/open-telemetry/opentelemetry-python/blob/dfb5c66ae310001bb40326f6286345b7fa252aba/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py#L129-L134 Could use some benchmarks, but this is likely to perform better with binary search (builtin `bisect` module) above a certain number of buckets. Also, using a list instead of `OrderedDict` may be faster --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from abc import ABC, abstractmethod 16 from collections import OrderedDict 17 from logging import getLogger 18 from math import inf 19 from threading import Lock 20 from typing import Generic, Optional, Sequence, TypeVar 21 22 from opentelemetry.sdk._metrics.measurement import Measurement 23 from opentelemetry.sdk._metrics.point import ( 24 AggregationTemporality, 25 Gauge, 26 Histogram, 27 PointT, 28 Sum, 29 ) 30 from opentelemetry.util._time import _time_ns 31 32 _PointVarT = TypeVar("_PointVarT", bound=PointT) 33 34 _logger = getLogger(__name__) 35 36 37 class _InstrumentMonotonicityAwareAggregation: 38 def __init__(self, instrument_is_monotonic: bool): 39 self._instrument_is_monotonic = instrument_is_monotonic 40 super().__init__() 41 42 43 class Aggregation(ABC, Generic[_PointVarT]): 44 def __init__(self): 45 self._lock = Lock() 46 47 @abstractmethod 48 def aggregate(self, measurement: Measurement) -> None: 49 pass 50 51 @abstractmethod 52 def collect(self) -> Optional[_PointVarT]: 53 pass 54 55 56 class SynchronousSumAggregation( 57 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum] 58 ): 59 def __init__(self, instrument_is_monotonic: bool): 60 super().__init__(instrument_is_monotonic) 61 self._value = 0 62 self._start_time_unix_nano = _time_ns() 63 64 def aggregate(self, measurement: Measurement) -> None: 65 with self._lock: 66 self._value = self._value + measurement.value 67 68 def collect(self) -> Optional[Sum]: 69 """ 70 Atomically return a point for the current value of the metric and 71 reset the aggregation value. 72 """ 73 now = _time_ns() 74 75 with self._lock: 76 value = self._value 77 start_time_unix_nano = self._start_time_unix_nano 78 79 self._value = 0 80 self._start_time_unix_nano = now + 1 81 82 return Sum( 83 aggregation_temporality=AggregationTemporality.DELTA, 84 is_monotonic=self._instrument_is_monotonic, 85 start_time_unix_nano=start_time_unix_nano, 86 time_unix_nano=now, 87 value=value, 88 ) 89 90 91 class AsynchronousSumAggregation( 92 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum] 93 ): 94 def __init__(self, instrument_is_monotonic: bool): 95 super().__init__(instrument_is_monotonic) 96 self._value = None 97 self._start_time_unix_nano = _time_ns() 98 99 def aggregate(self, measurement: Measurement) -> None: 100 with self._lock: 101 self._value = measurement.value 102 103 def collect(self) -> Optional[Sum]: 104 """ 105 Atomically return a point for the current value of the metric. 106 """ 107 if self._value is None: 108 return None 109 110 return Sum( 111 start_time_unix_nano=self._start_time_unix_nano, 112 time_unix_nano=_time_ns(), 113 value=self._value, 114 aggregation_temporality=AggregationTemporality.CUMULATIVE, 115 is_monotonic=self._instrument_is_monotonic, 116 ) 117 118 119 class LastValueAggregation(Aggregation[Gauge]): 120 def __init__(self): 121 super().__init__() 122 self._value = None 123 124 def aggregate(self, measurement: Measurement): 125 with self._lock: 126 self._value = measurement.value 127 128 def collect(self) -> Optional[Gauge]: 129 """ 130 Atomically return a point for the current value of the metric. 131 """ 132 if self._value is None: 133 return None 134 135 return Gauge( 136 time_unix_nano=_time_ns(), 137 value=self._value, 138 ) 139 140 141 class ExplicitBucketHistogramAggregation(Aggregation[Histogram]): 142 def __init__( 143 self, 144 boundaries: Sequence[int] = ( 145 0, 146 5, 147 10, 148 25, 149 50, 150 75, 151 100, 152 250, 153 500, 154 1000, 155 ), 156 record_min_max: bool = True, 157 ): 158 super().__init__() 159 self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)]) 160 self._min = inf 161 self._max = -inf 162 self._sum = 0 163 self._record_min_max = record_min_max 164 self._start_time_unix_nano = _time_ns() 165 self._boundaries = boundaries 166 167 def aggregate(self, measurement: Measurement) -> None: 168 169 value = measurement.value 170 171 if self._record_min_max: 172 self._min = min(self._min, value) 173 self._max = max(self._max, value) 174 175 self._sum += value 176 177 for key in self._value.keys(): 178 179 if value < key: 180 self._value[key] = self._value[key] + 1 181 182 break 183 184 def collect(self) -> Optional[Histogram]: 185 """ 186 Atomically return a point for the current value of the metric. 187 """ 188 now = _time_ns() 189 190 with self._lock: 191 value = self._value 192 start_time_unix_nano = self._start_time_unix_nano 193 194 self._value = OrderedDict( 195 [(key, 0) for key in (*self._boundaries, inf)] 196 ) 197 self._start_time_unix_nano = now + 1 198 199 return Histogram( 200 start_time_unix_nano=start_time_unix_nano, 201 time_unix_nano=now, 202 bucket_counts=tuple(value.values()), 203 explicit_bounds=self._boundaries, 204 aggregation_temporality=AggregationTemporality.DELTA, 205 ) 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py --- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py @@ -13,7 +13,7 @@ # limitations under the License. from abc import ABC, abstractmethod -from collections import OrderedDict +from bisect import bisect_left from logging import getLogger from math import inf from threading import Lock @@ -156,13 +156,14 @@ record_min_max: bool = True, ): super().__init__() - self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)]) + # pylint: disable=unnecessary-comprehension + self._boundaries = [boundary for boundary in (*boundaries, inf)] + self.value = [0 for _ in range(len(self._boundaries))] self._min = inf self._max = -inf self._sum = 0 self._record_min_max = record_min_max self._start_time_unix_nano = _time_ns() - self._boundaries = boundaries def aggregate(self, measurement: Measurement) -> None: @@ -174,12 +175,7 @@ self._sum += value - for key in self._value.keys(): - - if value < key: - self._value[key] = self._value[key] + 1 - - break + self.value[bisect_left(self._boundaries, value)] += 1 def collect(self) -> Optional[Histogram]: """ @@ -188,18 +184,16 @@ now = _time_ns() with self._lock: - value = self._value + value = self.value start_time_unix_nano = self._start_time_unix_nano - self._value = OrderedDict( - [(key, 0) for key in (*self._boundaries, inf)] - ) + self.value = [0 for _ in range(len(self._boundaries))] self._start_time_unix_nano = now + 1 return Histogram( start_time_unix_nano=start_time_unix_nano, time_unix_nano=now, - bucket_counts=tuple(value.values()), + bucket_counts=tuple(value), explicit_bounds=self._boundaries, aggregation_temporality=AggregationTemporality.DELTA, )
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n from abc import ABC, abstractmethod\n-from collections import OrderedDict\n+from bisect import bisect_left\n from logging import getLogger\n from math import inf\n from threading import Lock\n@@ -156,13 +156,14 @@\n record_min_max: bool = True,\n ):\n super().__init__()\n- self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])\n+ # pylint: disable=unnecessary-comprehension\n+ self._boundaries = [boundary for boundary in (*boundaries, inf)]\n+ self.value = [0 for _ in range(len(self._boundaries))]\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n- self._boundaries = boundaries\n \n def aggregate(self, measurement: Measurement) -> None:\n \n@@ -174,12 +175,7 @@\n \n self._sum += value\n \n- for key in self._value.keys():\n-\n- if value < key:\n- self._value[key] = self._value[key] + 1\n-\n- break\n+ self.value[bisect_left(self._boundaries, value)] += 1\n \n def collect(self) -> Optional[Histogram]:\n \"\"\"\n@@ -188,18 +184,16 @@\n now = _time_ns()\n \n with self._lock:\n- value = self._value\n+ value = self.value\n start_time_unix_nano = self._start_time_unix_nano\n \n- self._value = OrderedDict(\n- [(key, 0) for key in (*self._boundaries, inf)]\n- )\n+ self.value = [0 for _ in range(len(self._boundaries))]\n self._start_time_unix_nano = now + 1\n \n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n- bucket_counts=tuple(value.values()),\n+ bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n )\n", "issue": "Consider using binary search for ExplicitBucketHistogram\nhttps://github.com/open-telemetry/opentelemetry-python/blob/dfb5c66ae310001bb40326f6286345b7fa252aba/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py#L129-L134\r\n\r\nCould use some benchmarks, but this is likely to perform better with binary search (builtin `bisect` module) above a certain number of buckets. Also, using a list instead of `OrderedDict` may be faster\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom collections import OrderedDict\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, Optional, Sequence, TypeVar\n\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _InstrumentMonotonicityAwareAggregation:\n def __init__(self, instrument_is_monotonic: bool):\n self._instrument_is_monotonic = instrument_is_monotonic\n super().__init__()\n\n\nclass Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass SynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = 0\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass AsynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = None\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Sum(\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=_time_ns(),\n value=self._value,\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n )\n\n\nclass LastValueAggregation(Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=self._value,\n )\n\n\nclass ExplicitBucketHistogramAggregation(Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[int] = (\n 0,\n 5,\n 10,\n 25,\n 50,\n 75,\n 100,\n 250,\n 500,\n 1000,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n self._boundaries = boundaries\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n for key in self._value.keys():\n\n if value < key:\n self._value[key] = self._value[key] + 1\n\n break\n\n def collect(self) -> Optional[Histogram]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = OrderedDict(\n [(key, 0) for key in (*self._boundaries, inf)]\n )\n self._start_time_unix_nano = now + 1\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value.values()),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom bisect import bisect_left\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, Optional, Sequence, TypeVar\n\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _InstrumentMonotonicityAwareAggregation:\n def __init__(self, instrument_is_monotonic: bool):\n self._instrument_is_monotonic = instrument_is_monotonic\n super().__init__()\n\n\nclass Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass SynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = 0\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass AsynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = None\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Sum(\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=_time_ns(),\n value=self._value,\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n )\n\n\nclass LastValueAggregation(Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=self._value,\n )\n\n\nclass ExplicitBucketHistogramAggregation(Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[int] = (\n 0,\n 5,\n 10,\n 25,\n 50,\n 75,\n 100,\n 250,\n 500,\n 1000,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n # pylint: disable=unnecessary-comprehension\n self._boundaries = [boundary for boundary in (*boundaries, inf)]\n self.value = [0 for _ in range(len(self._boundaries))]\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n self.value[bisect_left(self._boundaries, value)] += 1\n\n def collect(self) -> Optional[Histogram]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self.value\n start_time_unix_nano = self._start_time_unix_nano\n\n self.value = [0 for _ in range(len(self._boundaries))]\n self._start_time_unix_nano = now + 1\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}]}
2,292
576
gh_patches_debug_20567
rasdani/github-patches
git_diff
pantsbuild__pants-13467
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pants package does not build missing docker images if previous build was cached. **Describe the bug** Pant's caching of build targets does not take into consideration that the final target does not exist. Take this example: https://www.pantsbuild.org/v2.8/docs/docker#example ``` $ ./pants package src/docker/hw/Dockerfile [...] 18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex 18:07:31.83 [INFO] Completed: Building docker image helloworld:latest 18:07:31.83 [INFO] Built docker image: helloworld:latest $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE helloworld latest abcdefabcdef 6 seconds ago 420MB $ docker rmi helloworld:latest $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE $ ./pants package src/docker/hw/Dockerfile 19:07:31.83 [INFO] Built docker image: helloworld:latest $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE ``` If you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder. **Pants version** 2.8rc1 **OS** Linux --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/docker/util_rules/docker_binary.py` Content: ``` 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import annotations 5 6 from dataclasses import dataclass 7 from typing import Mapping 8 9 from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs 10 from pants.engine.fs import Digest 11 from pants.engine.process import ( 12 BinaryNotFoundError, 13 BinaryPath, 14 BinaryPathRequest, 15 BinaryPaths, 16 BinaryPathTest, 17 Process, 18 SearchPath, 19 ) 20 from pants.engine.rules import Get, collect_rules, rule 21 from pants.util.logging import LogLevel 22 from pants.util.strutil import pluralize 23 24 25 class DockerBinary(BinaryPath): 26 """The `docker` binary.""" 27 28 DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin")) 29 30 def build_image( 31 self, 32 tags: tuple[str, ...], 33 digest: Digest, 34 dockerfile: str | None = None, 35 build_args: DockerBuildArgs | None = None, 36 env: Mapping[str, str] | None = None, 37 ) -> Process: 38 args = [self.path, "build"] 39 40 for tag in tags: 41 args.extend(["-t", tag]) 42 43 if build_args: 44 for build_arg in build_args: 45 args.extend(["--build-arg", build_arg]) 46 47 if dockerfile: 48 args.extend(["-f", dockerfile]) 49 50 # Add build context root. 51 args.append(".") 52 53 return Process( 54 argv=tuple(args), 55 description=( 56 f"Building docker image {tags[0]}" 57 + (f" +{pluralize(len(tags)-1, 'additional tag')}." if len(tags) > 1 else ".") 58 ), 59 env=env, 60 input_digest=digest, 61 ) 62 63 def push_image(self, tags: tuple[str, ...]) -> Process | None: 64 if not tags: 65 return None 66 67 return Process( 68 argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}" 69 ) 70 71 72 @dataclass(frozen=True) 73 class DockerBinaryRequest: 74 search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH 75 76 77 @rule(desc="Finding the `docker` binary", level=LogLevel.DEBUG) 78 async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary: 79 request = BinaryPathRequest( 80 binary_name="docker", 81 search_path=docker_request.search_path, 82 test=BinaryPathTest(args=["-v"]), 83 ) 84 paths = await Get(BinaryPaths, BinaryPathRequest, request) 85 first_path = paths.first_path 86 if not first_path: 87 raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon") 88 return DockerBinary(first_path.path, first_path.fingerprint) 89 90 91 @rule 92 async def get_docker() -> DockerBinary: 93 return await Get(DockerBinary, DockerBinaryRequest()) 94 95 96 def rules(): 97 return collect_rules() 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py --- a/src/python/pants/backend/docker/util_rules/docker_binary.py +++ b/src/python/pants/backend/docker/util_rules/docker_binary.py @@ -15,6 +15,7 @@ BinaryPaths, BinaryPathTest, Process, + ProcessCacheScope, SearchPath, ) from pants.engine.rules import Get, collect_rules, rule @@ -58,6 +59,7 @@ ), env=env, input_digest=digest, + cache_scope=ProcessCacheScope.PER_SESSION, ) def push_image(self, tags: tuple[str, ...]) -> Process | None: @@ -65,7 +67,9 @@ return None return Process( - argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}" + argv=(self.path, "push", *tags), + cache_scope=ProcessCacheScope.PER_SESSION, + description=f"Pushing docker image {tags[0]}", )
{"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py\n--- a/src/python/pants/backend/docker/util_rules/docker_binary.py\n+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py\n@@ -15,6 +15,7 @@\n BinaryPaths,\n BinaryPathTest,\n Process,\n+ ProcessCacheScope,\n SearchPath,\n )\n from pants.engine.rules import Get, collect_rules, rule\n@@ -58,6 +59,7 @@\n ),\n env=env,\n input_digest=digest,\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n )\n \n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n@@ -65,7 +67,9 @@\n return None\n \n return Process(\n- argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n+ argv=(self.path, \"push\", *tags),\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n+ description=f\"Pushing docker image {tags[0]}\",\n )\n", "issue": "pants package does not build missing docker images if previous build was cached.\n**Describe the bug**\r\nPant's caching of build targets does not take into consideration that the final target does not exist.\r\n\r\nTake this example: https://www.pantsbuild.org/v2.8/docs/docker#example\r\n\r\n```\r\n$ ./pants package src/docker/hw/Dockerfile\r\n[...]\r\n18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex\r\n18:07:31.83 [INFO] Completed: Building docker image helloworld:latest\r\n18:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\nhelloworld latest abcdefabcdef 6 seconds ago 420MB\r\n\r\n$ docker rmi helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n\r\n$ ./pants package src/docker/hw/Dockerfile\r\n19:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n```\r\nIf you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.\r\n\r\n**Pants version**\r\n2.8rc1\r\n\r\n**OS**\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n ProcessCacheScope,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tags[0]}\",\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}]}
1,400
245
gh_patches_debug_17413
rasdani/github-patches
git_diff
pyodide__pyodide-3853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Proxied JS method.apply(context, list) fail while method(*list) doesn't ## 🐛 Bug While working on [this PyScript issue](https://github.com/pyscript/pyscript/pull/1459) I've noticed that `context.method.apply(context, list)` doesn't work while `context.method(*list)` does. I don't mind using the latter as that's also more Pythonic but that might surprise JS developers using Pyodide proxies that mimic JS APIs. ### To Reproduce ```python import js classList = js.document.body.classList classList.add.apply(classList, ["a", "b"]) ``` ### Expected behavior The method should be invoked with *n* arguments as by JS specs. ### Environment - Pyodide Version<!-- (e.g. 1.8.1) -->: latest - Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: any - Any other relevant information: nope <!-- If you are building Pyodide by yourself, please also include these information: --> <!-- - Commit hash of Pyodide git repository: - Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->: --> ### Additional context Happy to have it as won't fix but at least there's a related issue that explain the *gotcha*. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py` Content: ``` 1 from sphinx.application import Sphinx 2 from sphinx.ext.intersphinx import InventoryAdapter 3 4 DATA = { 5 "js:function": { 6 "setTimeout": "API/", 7 "clearTimeout": "API/", 8 "setInterval": "API/", 9 "clearInterval": "API/", 10 "fetch": "API/", 11 "eval": "$global/", 12 "Object.fromEntries": "$global/", 13 "Reflect.ownKeys": "$global/", 14 "Array.from": "$global/", 15 "Atomics.wait": "$global/", 16 }, 17 "js:class": { 18 "Array": "$global/", 19 "NodeList": "API/", 20 "HTMLCollection": "API/", 21 "Generator": "$global/", 22 "AsyncGenerator": "$global/", 23 "Date": "$global/", 24 "ArrayBuffer": "$global/", 25 "SharedArrayBuffer": "$global/", 26 "TypedArray": "$global/", 27 "TextEncoder": "$global/", 28 "TextDecoder": "$global/", 29 "DataView": "$global/", 30 "Uint8Array": "$global/", 31 "Map": "$global/", 32 "Set": "$global/", 33 # the JavaScript domain has no exception type for some reason... 34 "Error": "$global/", 35 "Function": "$global/", 36 "Promise": "$global/", 37 "FileSystemDirectoryHandle": "API/", 38 }, 39 "js:method": { 40 "Iterator.next": "$reference/Iteration_protocols#next", 41 "AsyncIterator.next": "$reference/Iteration_protocols#next_2", 42 "Generator.next": "$global/", 43 "Generator.throw": "$global/", 44 "Generator.return": "$global/", 45 "AsyncGenerator.next": "$global/", 46 "AsyncGenerator.throw": "$global/", 47 "AsyncGenerator.return": "$global/", 48 "Response.clone": "API/", 49 "Response.arrayBuffer": "API/", 50 "EventTarget.addEventListener": "API/", 51 "EventTarget.removeEventListener": "API/", 52 "Promise.then": "$global/", 53 "Promise.catch": "$global/", 54 "Promise.finally": "$global/", 55 "Function.apply": "$global/", 56 "Function.bind": "$global/", 57 "Function.call": "$global/", 58 }, 59 "js:data": { 60 "Iterable": "$reference/Iteration_protocols#the_iterable_protocol", 61 "IteratorResult": "$reference/Iteration_protocols#next", 62 "Iterator": "$reference/Iteration_protocols#the_iterator_protocol", 63 "AsyncIterator": "$reference/Iteration_protocols#the_async_iterator_and_async_iterable_protocols", 64 "Symbol.asyncIterator": "$global/", 65 "Symbol.iterator": "$global/", 66 "Symbol.toStringTag": "$global/", 67 "FinalizationRegistry": "$global/", 68 "globalThis": "$global/", 69 "NaN": "$global/", 70 "undefined": "$global/", 71 "BigInt": "$global/", 72 "Number": "$global/", 73 "String": "$global/", 74 "Boolean": "$global/", 75 "Object": "$global/", 76 "Number.MAX_SAFE_INTEGER": "$global/", 77 "null": "$reference/Operators/", 78 "Response": "API/", 79 "TypedArray.BYTES_PER_ELEMENT": "$global/", 80 }, 81 "js:attribute": { 82 "TypedArray.byteLength": "$global/", 83 "Response.type": "API/", 84 "Response.url": "API/", 85 "Response.statusText": "API/", 86 "Response.bodyUsed": "API/", 87 "Response.ok": "API/", 88 "Response.redirected": "API/", 89 "Response.status": "API/", 90 }, 91 "std:label": {"async function": "$reference/Statements/async_function"}, 92 } 93 94 JSDATA = set(DATA["js:data"].keys()) 95 JSDATA.update([x.lower() for x in JSDATA]) 96 JSDATA.add("void") 97 JSDATA.add("any") 98 JSCLASS = set(DATA["js:class"].keys()) 99 100 # Each entry is a four tuple: 101 # (project_name, project_version, url, link_text) 102 # 103 # If link_text is "-" the original name of the xref will be used as the link 104 # text which is good enough for us. 105 PROJECT_NAME = "MDN docs" 106 PROJECT_VERSION = "" # MDN docs are not really versioned 107 USE_NAME_AS_LINK_TEXT = "-" 108 109 INVDATA: dict[str, dict[str, tuple[str, str, str, str]]] = {} 110 for type, entries in DATA.items(): 111 type_values = INVDATA.setdefault(type, {}) 112 for key, value in entries.items(): 113 value = value.replace("$reference", "JavaScript/Reference") 114 value = value.replace("$global", "JavaScript/Reference/Global_Objects") 115 if value.endswith("/"): 116 value += key.replace(".", "/") 117 url = f"https://developer.mozilla.org/en-US/docs/Web/{value}" 118 type_values[key] = (PROJECT_NAME, PROJECT_VERSION, url, USE_NAME_AS_LINK_TEXT) 119 type_values[key.lower()] = ( 120 PROJECT_NAME, 121 PROJECT_VERSION, 122 url, 123 USE_NAME_AS_LINK_TEXT, 124 ) 125 126 for key, url in [ 127 ("void", "https://www.typescriptlang.org/docs/handbook/2/functions.html#void"), 128 ("any", "https://www.typescriptlang.org/docs/handbook/2/everyday-types.html#any"), 129 ]: 130 INVDATA["js:data"][key] = ( 131 "typescript docs", 132 "", 133 url, 134 "-", 135 ) 136 137 for key in ["stdin", "stdout", "stderr"]: 138 INVDATA["js:data"][f"process.{key}"] = ( 139 "node docs", 140 "", 141 f"https://nodejs.org/api/process.html#process{key}", 142 "-", 143 ) 144 145 146 def add_mdn_xrefs(app: Sphinx) -> None: 147 """Add cross referencing to Mozilla Developer Network documentation""" 148 inventories = InventoryAdapter(app.builder.env) 149 inventories.named_inventory["mdn"] = INVDATA 150 for type, objects in INVDATA.items(): 151 inventories.main_inventory.setdefault(type, {}).update(objects) 152 153 154 __all__ = ["add_mdn_xrefs"] 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py --- a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py +++ b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py @@ -55,6 +55,25 @@ "Function.apply": "$global/", "Function.bind": "$global/", "Function.call": "$global/", + "Array.join": "$global/", + "Array.slice": "$global/", + "Array.lastIndexOf": "$global/", + "Array.indexOf": "$global/", + "Array.forEach": "$global/", + "Array.map": "$global/", + "Array.filter": "$global/", + "Array.reduce": "$global/", + "Array.reduceRight": "$global/", + "Array.some": "$global/", + "Array.every": "$global/", + "Array.at": "$global/", + "Array.concat": "$global/", + "Array.includes": "$global/", + "Array.entries": "$global/", + "Array.keys": "$global/", + "Array.values": "$global/", + "Array.find": "$global/", + "Array.findIndex": "$global/", }, "js:data": { "Iterable": "$reference/Iteration_protocols#the_iterable_protocol",
{"golden_diff": "diff --git a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n--- a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n+++ b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n@@ -55,6 +55,25 @@\n \"Function.apply\": \"$global/\",\n \"Function.bind\": \"$global/\",\n \"Function.call\": \"$global/\",\n+ \"Array.join\": \"$global/\",\n+ \"Array.slice\": \"$global/\",\n+ \"Array.lastIndexOf\": \"$global/\",\n+ \"Array.indexOf\": \"$global/\",\n+ \"Array.forEach\": \"$global/\",\n+ \"Array.map\": \"$global/\",\n+ \"Array.filter\": \"$global/\",\n+ \"Array.reduce\": \"$global/\",\n+ \"Array.reduceRight\": \"$global/\",\n+ \"Array.some\": \"$global/\",\n+ \"Array.every\": \"$global/\",\n+ \"Array.at\": \"$global/\",\n+ \"Array.concat\": \"$global/\",\n+ \"Array.includes\": \"$global/\",\n+ \"Array.entries\": \"$global/\",\n+ \"Array.keys\": \"$global/\",\n+ \"Array.values\": \"$global/\",\n+ \"Array.find\": \"$global/\",\n+ \"Array.findIndex\": \"$global/\",\n },\n \"js:data\": {\n \"Iterable\": \"$reference/Iteration_protocols#the_iterable_protocol\",\n", "issue": "Proxied JS method.apply(context, list) fail while method(*list) doesn't\n## \ud83d\udc1b Bug\r\n\r\nWhile working on [this PyScript issue](https://github.com/pyscript/pyscript/pull/1459) I've noticed that `context.method.apply(context, list)` doesn't work while `context.method(*list)` does.\r\n\r\nI don't mind using the latter as that's also more Pythonic but that might surprise JS developers using Pyodide proxies that mimic JS APIs.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport js\r\nclassList = js.document.body.classList\r\nclassList.add.apply(classList, [\"a\", \"b\"])\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe method should be invoked with *n* arguments as by JS specs.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: latest\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: any\r\n- Any other relevant information: nope\r\n\r\n<!-- If you are building Pyodide by yourself, please also include these information: -->\r\n\r\n<!--\r\n- Commit hash of Pyodide git repository:\r\n- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:\r\n-->\r\n\r\n### Additional context\r\n\r\nHappy to have it as won't fix but at least there's a related issue that explain the *gotcha*.\r\n\n", "before_files": [{"content": "from sphinx.application import Sphinx\nfrom sphinx.ext.intersphinx import InventoryAdapter\n\nDATA = {\n \"js:function\": {\n \"setTimeout\": \"API/\",\n \"clearTimeout\": \"API/\",\n \"setInterval\": \"API/\",\n \"clearInterval\": \"API/\",\n \"fetch\": \"API/\",\n \"eval\": \"$global/\",\n \"Object.fromEntries\": \"$global/\",\n \"Reflect.ownKeys\": \"$global/\",\n \"Array.from\": \"$global/\",\n \"Atomics.wait\": \"$global/\",\n },\n \"js:class\": {\n \"Array\": \"$global/\",\n \"NodeList\": \"API/\",\n \"HTMLCollection\": \"API/\",\n \"Generator\": \"$global/\",\n \"AsyncGenerator\": \"$global/\",\n \"Date\": \"$global/\",\n \"ArrayBuffer\": \"$global/\",\n \"SharedArrayBuffer\": \"$global/\",\n \"TypedArray\": \"$global/\",\n \"TextEncoder\": \"$global/\",\n \"TextDecoder\": \"$global/\",\n \"DataView\": \"$global/\",\n \"Uint8Array\": \"$global/\",\n \"Map\": \"$global/\",\n \"Set\": \"$global/\",\n # the JavaScript domain has no exception type for some reason...\n \"Error\": \"$global/\",\n \"Function\": \"$global/\",\n \"Promise\": \"$global/\",\n \"FileSystemDirectoryHandle\": \"API/\",\n },\n \"js:method\": {\n \"Iterator.next\": \"$reference/Iteration_protocols#next\",\n \"AsyncIterator.next\": \"$reference/Iteration_protocols#next_2\",\n \"Generator.next\": \"$global/\",\n \"Generator.throw\": \"$global/\",\n \"Generator.return\": \"$global/\",\n \"AsyncGenerator.next\": \"$global/\",\n \"AsyncGenerator.throw\": \"$global/\",\n \"AsyncGenerator.return\": \"$global/\",\n \"Response.clone\": \"API/\",\n \"Response.arrayBuffer\": \"API/\",\n \"EventTarget.addEventListener\": \"API/\",\n \"EventTarget.removeEventListener\": \"API/\",\n \"Promise.then\": \"$global/\",\n \"Promise.catch\": \"$global/\",\n \"Promise.finally\": \"$global/\",\n \"Function.apply\": \"$global/\",\n \"Function.bind\": \"$global/\",\n \"Function.call\": \"$global/\",\n },\n \"js:data\": {\n \"Iterable\": \"$reference/Iteration_protocols#the_iterable_protocol\",\n \"IteratorResult\": \"$reference/Iteration_protocols#next\",\n \"Iterator\": \"$reference/Iteration_protocols#the_iterator_protocol\",\n \"AsyncIterator\": \"$reference/Iteration_protocols#the_async_iterator_and_async_iterable_protocols\",\n \"Symbol.asyncIterator\": \"$global/\",\n \"Symbol.iterator\": \"$global/\",\n \"Symbol.toStringTag\": \"$global/\",\n \"FinalizationRegistry\": \"$global/\",\n \"globalThis\": \"$global/\",\n \"NaN\": \"$global/\",\n \"undefined\": \"$global/\",\n \"BigInt\": \"$global/\",\n \"Number\": \"$global/\",\n \"String\": \"$global/\",\n \"Boolean\": \"$global/\",\n \"Object\": \"$global/\",\n \"Number.MAX_SAFE_INTEGER\": \"$global/\",\n \"null\": \"$reference/Operators/\",\n \"Response\": \"API/\",\n \"TypedArray.BYTES_PER_ELEMENT\": \"$global/\",\n },\n \"js:attribute\": {\n \"TypedArray.byteLength\": \"$global/\",\n \"Response.type\": \"API/\",\n \"Response.url\": \"API/\",\n \"Response.statusText\": \"API/\",\n \"Response.bodyUsed\": \"API/\",\n \"Response.ok\": \"API/\",\n \"Response.redirected\": \"API/\",\n \"Response.status\": \"API/\",\n },\n \"std:label\": {\"async function\": \"$reference/Statements/async_function\"},\n}\n\nJSDATA = set(DATA[\"js:data\"].keys())\nJSDATA.update([x.lower() for x in JSDATA])\nJSDATA.add(\"void\")\nJSDATA.add(\"any\")\nJSCLASS = set(DATA[\"js:class\"].keys())\n\n# Each entry is a four tuple:\n# (project_name, project_version, url, link_text)\n#\n# If link_text is \"-\" the original name of the xref will be used as the link\n# text which is good enough for us.\nPROJECT_NAME = \"MDN docs\"\nPROJECT_VERSION = \"\" # MDN docs are not really versioned\nUSE_NAME_AS_LINK_TEXT = \"-\"\n\nINVDATA: dict[str, dict[str, tuple[str, str, str, str]]] = {}\nfor type, entries in DATA.items():\n type_values = INVDATA.setdefault(type, {})\n for key, value in entries.items():\n value = value.replace(\"$reference\", \"JavaScript/Reference\")\n value = value.replace(\"$global\", \"JavaScript/Reference/Global_Objects\")\n if value.endswith(\"/\"):\n value += key.replace(\".\", \"/\")\n url = f\"https://developer.mozilla.org/en-US/docs/Web/{value}\"\n type_values[key] = (PROJECT_NAME, PROJECT_VERSION, url, USE_NAME_AS_LINK_TEXT)\n type_values[key.lower()] = (\n PROJECT_NAME,\n PROJECT_VERSION,\n url,\n USE_NAME_AS_LINK_TEXT,\n )\n\nfor key, url in [\n (\"void\", \"https://www.typescriptlang.org/docs/handbook/2/functions.html#void\"),\n (\"any\", \"https://www.typescriptlang.org/docs/handbook/2/everyday-types.html#any\"),\n]:\n INVDATA[\"js:data\"][key] = (\n \"typescript docs\",\n \"\",\n url,\n \"-\",\n )\n\nfor key in [\"stdin\", \"stdout\", \"stderr\"]:\n INVDATA[\"js:data\"][f\"process.{key}\"] = (\n \"node docs\",\n \"\",\n f\"https://nodejs.org/api/process.html#process{key}\",\n \"-\",\n )\n\n\ndef add_mdn_xrefs(app: Sphinx) -> None:\n \"\"\"Add cross referencing to Mozilla Developer Network documentation\"\"\"\n inventories = InventoryAdapter(app.builder.env)\n inventories.named_inventory[\"mdn\"] = INVDATA\n for type, objects in INVDATA.items():\n inventories.main_inventory.setdefault(type, {}).update(objects)\n\n\n__all__ = [\"add_mdn_xrefs\"]\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py"}], "after_files": [{"content": "from sphinx.application import Sphinx\nfrom sphinx.ext.intersphinx import InventoryAdapter\n\nDATA = {\n \"js:function\": {\n \"setTimeout\": \"API/\",\n \"clearTimeout\": \"API/\",\n \"setInterval\": \"API/\",\n \"clearInterval\": \"API/\",\n \"fetch\": \"API/\",\n \"eval\": \"$global/\",\n \"Object.fromEntries\": \"$global/\",\n \"Reflect.ownKeys\": \"$global/\",\n \"Array.from\": \"$global/\",\n \"Atomics.wait\": \"$global/\",\n },\n \"js:class\": {\n \"Array\": \"$global/\",\n \"NodeList\": \"API/\",\n \"HTMLCollection\": \"API/\",\n \"Generator\": \"$global/\",\n \"AsyncGenerator\": \"$global/\",\n \"Date\": \"$global/\",\n \"ArrayBuffer\": \"$global/\",\n \"SharedArrayBuffer\": \"$global/\",\n \"TypedArray\": \"$global/\",\n \"TextEncoder\": \"$global/\",\n \"TextDecoder\": \"$global/\",\n \"DataView\": \"$global/\",\n \"Uint8Array\": \"$global/\",\n \"Map\": \"$global/\",\n \"Set\": \"$global/\",\n # the JavaScript domain has no exception type for some reason...\n \"Error\": \"$global/\",\n \"Function\": \"$global/\",\n \"Promise\": \"$global/\",\n \"FileSystemDirectoryHandle\": \"API/\",\n },\n \"js:method\": {\n \"Iterator.next\": \"$reference/Iteration_protocols#next\",\n \"AsyncIterator.next\": \"$reference/Iteration_protocols#next_2\",\n \"Generator.next\": \"$global/\",\n \"Generator.throw\": \"$global/\",\n \"Generator.return\": \"$global/\",\n \"AsyncGenerator.next\": \"$global/\",\n \"AsyncGenerator.throw\": \"$global/\",\n \"AsyncGenerator.return\": \"$global/\",\n \"Response.clone\": \"API/\",\n \"Response.arrayBuffer\": \"API/\",\n \"EventTarget.addEventListener\": \"API/\",\n \"EventTarget.removeEventListener\": \"API/\",\n \"Promise.then\": \"$global/\",\n \"Promise.catch\": \"$global/\",\n \"Promise.finally\": \"$global/\",\n \"Function.apply\": \"$global/\",\n \"Function.bind\": \"$global/\",\n \"Function.call\": \"$global/\",\n \"Array.join\": \"$global/\",\n \"Array.slice\": \"$global/\",\n \"Array.lastIndexOf\": \"$global/\",\n \"Array.indexOf\": \"$global/\",\n \"Array.forEach\": \"$global/\",\n \"Array.map\": \"$global/\",\n \"Array.filter\": \"$global/\",\n \"Array.reduce\": \"$global/\",\n \"Array.reduceRight\": \"$global/\",\n \"Array.some\": \"$global/\",\n \"Array.every\": \"$global/\",\n \"Array.at\": \"$global/\",\n \"Array.concat\": \"$global/\",\n \"Array.includes\": \"$global/\",\n \"Array.entries\": \"$global/\",\n \"Array.keys\": \"$global/\",\n \"Array.values\": \"$global/\",\n \"Array.find\": \"$global/\",\n \"Array.findIndex\": \"$global/\",\n },\n \"js:data\": {\n \"Iterable\": \"$reference/Iteration_protocols#the_iterable_protocol\",\n \"IteratorResult\": \"$reference/Iteration_protocols#next\",\n \"Iterator\": \"$reference/Iteration_protocols#the_iterator_protocol\",\n \"AsyncIterator\": \"$reference/Iteration_protocols#the_async_iterator_and_async_iterable_protocols\",\n \"Symbol.asyncIterator\": \"$global/\",\n \"Symbol.iterator\": \"$global/\",\n \"Symbol.toStringTag\": \"$global/\",\n \"FinalizationRegistry\": \"$global/\",\n \"globalThis\": \"$global/\",\n \"NaN\": \"$global/\",\n \"undefined\": \"$global/\",\n \"BigInt\": \"$global/\",\n \"Number\": \"$global/\",\n \"String\": \"$global/\",\n \"Boolean\": \"$global/\",\n \"Object\": \"$global/\",\n \"Number.MAX_SAFE_INTEGER\": \"$global/\",\n \"null\": \"$reference/Operators/\",\n \"Response\": \"API/\",\n \"TypedArray.BYTES_PER_ELEMENT\": \"$global/\",\n },\n \"js:attribute\": {\n \"TypedArray.byteLength\": \"$global/\",\n \"Response.type\": \"API/\",\n \"Response.url\": \"API/\",\n \"Response.statusText\": \"API/\",\n \"Response.bodyUsed\": \"API/\",\n \"Response.ok\": \"API/\",\n \"Response.redirected\": \"API/\",\n \"Response.status\": \"API/\",\n },\n \"std:label\": {\"async function\": \"$reference/Statements/async_function\"},\n}\n\nJSDATA = set(DATA[\"js:data\"].keys())\nJSDATA.update([x.lower() for x in JSDATA])\nJSDATA.add(\"void\")\nJSDATA.add(\"any\")\nJSCLASS = set(DATA[\"js:class\"].keys())\n\n# Each entry is a four tuple:\n# (project_name, project_version, url, link_text)\n#\n# If link_text is \"-\" the original name of the xref will be used as the link\n# text which is good enough for us.\nPROJECT_NAME = \"MDN docs\"\nPROJECT_VERSION = \"\" # MDN docs are not really versioned\nUSE_NAME_AS_LINK_TEXT = \"-\"\n\nINVDATA: dict[str, dict[str, tuple[str, str, str, str]]] = {}\nfor type, entries in DATA.items():\n type_values = INVDATA.setdefault(type, {})\n for key, value in entries.items():\n value = value.replace(\"$reference\", \"JavaScript/Reference\")\n value = value.replace(\"$global\", \"JavaScript/Reference/Global_Objects\")\n if value.endswith(\"/\"):\n value += key.replace(\".\", \"/\")\n url = f\"https://developer.mozilla.org/en-US/docs/Web/{value}\"\n type_values[key] = (PROJECT_NAME, PROJECT_VERSION, url, USE_NAME_AS_LINK_TEXT)\n type_values[key.lower()] = (\n PROJECT_NAME,\n PROJECT_VERSION,\n url,\n USE_NAME_AS_LINK_TEXT,\n )\n\nfor key, url in [\n (\"void\", \"https://www.typescriptlang.org/docs/handbook/2/functions.html#void\"),\n (\"any\", \"https://www.typescriptlang.org/docs/handbook/2/everyday-types.html#any\"),\n]:\n INVDATA[\"js:data\"][key] = (\n \"typescript docs\",\n \"\",\n url,\n \"-\",\n )\n\nfor key in [\"stdin\", \"stdout\", \"stderr\"]:\n INVDATA[\"js:data\"][f\"process.{key}\"] = (\n \"node docs\",\n \"\",\n f\"https://nodejs.org/api/process.html#process{key}\",\n \"-\",\n )\n\n\ndef add_mdn_xrefs(app: Sphinx) -> None:\n \"\"\"Add cross referencing to Mozilla Developer Network documentation\"\"\"\n inventories = InventoryAdapter(app.builder.env)\n inventories.named_inventory[\"mdn\"] = INVDATA\n for type, objects in INVDATA.items():\n inventories.main_inventory.setdefault(type, {}).update(objects)\n\n\n__all__ = [\"add_mdn_xrefs\"]\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py"}]}
2,214
309
gh_patches_debug_23656
rasdani/github-patches
git_diff
OpenMined__PySyft-4991
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Does the framework support IPv6 networks? Is this framework suitable for IPv6 network environment? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/syft/grid/example_nodes/network.py` Content: ``` 1 """ 2 The purpose of this application is to allow us to dev and test PySyft 3 functionality on an actual local network. This is NOT meant to be run in 4 production (that's the *actual* grid's job). 5 6 For example: 7 $ python src/syft/grid/example_nodes/network.py 8 9 """ 10 # stdlib 11 import os 12 13 # third party 14 import flask 15 from flask import Flask 16 from flask import Response 17 from nacl.encoding import HexEncoder 18 19 # syft absolute 20 from syft.core.common.message import SignedImmediateSyftMessageWithReply 21 from syft.core.common.message import SignedImmediateSyftMessageWithoutReply 22 from syft.core.common.serde.deserialize import _deserialize 23 from syft.core.node.network.network import Network 24 from syft.grid.services.signaling_service import PullSignalingService 25 from syft.grid.services.signaling_service import PushSignalingService 26 from syft.grid.services.signaling_service import RegisterDuetPeerService 27 28 app = Flask(__name__) 29 30 network = Network(name="om-net") 31 32 network.immediate_services_without_reply.append(PushSignalingService) 33 network.immediate_services_with_reply.append(PullSignalingService) 34 network.immediate_services_with_reply.append(RegisterDuetPeerService) 35 network._register_services() # re-register all services including SignalingService 36 37 38 @app.route("/metadata") 39 def get_metadata() -> flask.Response: 40 metadata = network.get_metadata_for_client() 41 metadata_proto = metadata.serialize() 42 r = Response( 43 response=metadata_proto.SerializeToString(), 44 status=200, 45 ) 46 r.headers["Content-Type"] = "application/octet-stream" 47 return r 48 49 50 @app.route("/", methods=["POST"]) 51 def process_network_msgs() -> flask.Response: 52 data = flask.request.get_data() 53 obj_msg = _deserialize(blob=data, from_bytes=True) 54 if isinstance(obj_msg, SignedImmediateSyftMessageWithReply): 55 print( 56 f"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch" 57 ) 58 reply = network.recv_immediate_msg_with_reply(msg=obj_msg) 59 r = Response(response=reply.serialize(to_bytes=True), status=200) 60 r.headers["Content-Type"] = "application/octet-stream" 61 return r 62 elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply): 63 print( 64 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch" 65 ) 66 network.recv_immediate_msg_without_reply(msg=obj_msg) 67 r = Response(status=200) 68 return r 69 else: 70 print( 71 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch" 72 ) 73 network.recv_eventual_msg_without_reply(msg=obj_msg) 74 r = Response(status=200) 75 return r 76 77 78 def run() -> None: 79 global network 80 print("====================================") 81 print("========== NODE ROOT KEY ===========") 82 print("====================================") 83 # this signing_key is to aid in local development and is not used in the real 84 # PyGrid implementation 85 PORT = os.getenv("PORT", 5000) 86 print(f"Starting Node on PORT: {PORT}") 87 print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n") 88 app.run(host="0.0.0.0", port=int(PORT)) # nosec 89 90 91 run() 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py --- a/src/syft/grid/example_nodes/network.py +++ b/src/syft/grid/example_nodes/network.py @@ -9,6 +9,7 @@ """ # stdlib import os +import sys # third party import flask @@ -77,15 +78,25 @@ def run() -> None: global network - print("====================================") - print("========== NODE ROOT KEY ===========") - print("====================================") + + IP_MODE = os.getenv("IP_MODE", "IPV4") # default to ipv4 + if len(sys.argv) > 1: + IP_MODE = sys.argv[1] + + IP_MODE = "IPV6" if IP_MODE == "IPV6" else "IPV4" # this signing_key is to aid in local development and is not used in the real # PyGrid implementation + HOST = "0.0.0.0" if IP_MODE == "IPV4" else "::" # nosec PORT = os.getenv("PORT", 5000) - print(f"Starting Node on PORT: {PORT}") + + print("====================================") + print("========== NODE ROOT KEY ===========") + print("====================================") print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n") - app.run(host="0.0.0.0", port=int(PORT)) # nosec + + print(f"Using {IP_MODE} and listening on port {PORT}") + + app.run(host=HOST, port=int(PORT)) run()
{"golden_diff": "diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py\n--- a/src/syft/grid/example_nodes/network.py\n+++ b/src/syft/grid/example_nodes/network.py\n@@ -9,6 +9,7 @@\n \"\"\"\n # stdlib\n import os\n+import sys\n \n # third party\n import flask\n@@ -77,15 +78,25 @@\n \n def run() -> None:\n global network\n- print(\"====================================\")\n- print(\"========== NODE ROOT KEY ===========\")\n- print(\"====================================\")\n+\n+ IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n+ if len(sys.argv) > 1:\n+ IP_MODE = sys.argv[1]\n+\n+ IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n+ HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n- print(f\"Starting Node on PORT: {PORT}\")\n+\n+ print(\"====================================\")\n+ print(\"========== NODE ROOT KEY ===========\")\n+ print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n- app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n+\n+ print(f\"Using {IP_MODE} and listening on port {PORT}\")\n+\n+ app.run(host=HOST, port=int(PORT))\n \n \n run()\n", "issue": "Does the framework support IPv6 networks?\nIs this framework suitable for IPv6 network environment? \n", "before_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n PORT = os.getenv(\"PORT\", 5000)\n print(f\"Starting Node on PORT: {PORT}\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}], "after_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\nimport sys\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n\n IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n if len(sys.argv) > 1:\n IP_MODE = sys.argv[1]\n\n IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n\n print(f\"Using {IP_MODE} and listening on port {PORT}\")\n\n app.run(host=HOST, port=int(PORT))\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}]}
1,180
384
gh_patches_debug_19544
rasdani/github-patches
git_diff
mabel-dev__opteryx-1688
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 🪲 [CI] MyPy test failure ### Thank you for taking the time to report a problem with Opteryx. _To help us to respond to your request we ask that you try to provide the below detail about the bug._ **Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._ **Expected behaviour** _A clear and concise description of what you expected to happen._ **Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._ ~~~sql ~~~ **Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opteryx/planner/views/__init__.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import orjson 14 15 from opteryx.managers.expression import NodeType 16 from opteryx.third_party.travers import Graph 17 18 19 def _load_views(): 20 try: 21 with open("views.json", "rb") as defs: 22 return orjson.loads(defs.read()) 23 except Exception as err: 24 print(f"[OPTERYX] Unable to open views definition file. {err}") 25 return {} 26 27 28 VIEWS = _load_views() 29 30 31 def is_view(view_name: str) -> bool: 32 return view_name in VIEWS 33 34 35 def view_as_plan(view_name: str) -> Graph: 36 from opteryx.planner.logical_planner import do_logical_planning_phase 37 from opteryx.third_party import sqloxide 38 from opteryx.utils.sql import clean_statement 39 from opteryx.utils.sql import remove_comments 40 41 operation = VIEWS.get(view_name)["statement"] 42 43 clean_sql = clean_statement(remove_comments(operation)) 44 parsed_statements = sqloxide.parse_sql(clean_sql, dialect="mysql") 45 logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements)) 46 47 return logical_plan 48 ``` Path: `opteryx/__version__.py` Content: ``` 1 __build__ = 522 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Store the version here so: 17 1) we don't load dependencies by storing it in __init__.py 18 2) we can import it in setup.py for the same reason 19 """ 20 from enum import Enum # isort: skip 21 22 23 class VersionStatus(Enum): 24 ALPHA = "alpha" 25 BETA = "beta" 26 RELEASE = "release" 27 28 29 _major = 0 30 _minor = 16 31 _revision = 0 32 _status = VersionStatus.ALPHA 33 34 __author__ = "@joocer" 35 __version__ = f"{_major}.{_minor}.{_revision}" + ( 36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else "" 37 ) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opteryx/__version__.py b/opteryx/__version__.py --- a/opteryx/__version__.py +++ b/opteryx/__version__.py @@ -1,4 +1,4 @@ -__build__ = 522 +__build__ = 523 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py --- a/opteryx/planner/views/__init__.py +++ b/opteryx/planner/views/__init__.py @@ -12,8 +12,7 @@ import orjson -from opteryx.managers.expression import NodeType -from opteryx.third_party.travers import Graph +from opteryx.planner.logical_planner import LogicalPlan def _load_views(): @@ -32,7 +31,7 @@ return view_name in VIEWS -def view_as_plan(view_name: str) -> Graph: +def view_as_plan(view_name: str) -> LogicalPlan: from opteryx.planner.logical_planner import do_logical_planning_phase from opteryx.third_party import sqloxide from opteryx.utils.sql import clean_statement
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 522\n+__build__ = 523\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py\n--- a/opteryx/planner/views/__init__.py\n+++ b/opteryx/planner/views/__init__.py\n@@ -12,8 +12,7 @@\n \n import orjson\n \n-from opteryx.managers.expression import NodeType\n-from opteryx.third_party.travers import Graph\n+from opteryx.planner.logical_planner import LogicalPlan\n \n \n def _load_views():\n@@ -32,7 +31,7 @@\n return view_name in VIEWS\n \n \n-def view_as_plan(view_name: str) -> Graph:\n+def view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n", "issue": "\ud83e\udeb2 [CI] MyPy test failure\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.travers import Graph\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> Graph:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 522\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.planner.logical_planner import LogicalPlan\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 523\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
1,251
300
gh_patches_debug_37940
rasdani/github-patches
git_diff
deepset-ai__haystack-6753
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- feat: Add split by `page` to `DocumentSplitter` **Is your feature request related to a problem? Please describe.** There are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility. **Describe the solution you'd like** Add a new `split_by` value of `page` that would split on `"\f"`. **Describe alternatives you've considered** Split up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `haystack/components/preprocessors/document_splitter.py` Content: ``` 1 from copy import deepcopy 2 from typing import List, Literal 3 4 from more_itertools import windowed 5 6 from haystack import component, Document 7 8 9 @component 10 class DocumentSplitter: 11 """ 12 Splits a list of text documents into a list of text documents with shorter texts. 13 This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models. 14 """ 15 16 def __init__( 17 self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0 18 ): 19 """ 20 :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ", 21 "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n". 22 :param split_length: The maximum number of units in each split. 23 :param split_overlap: The number of units that each split should overlap. 24 """ 25 26 self.split_by = split_by 27 if split_by not in ["word", "sentence", "passage"]: 28 raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.") 29 if split_length <= 0: 30 raise ValueError("split_length must be greater than 0.") 31 self.split_length = split_length 32 if split_overlap < 0: 33 raise ValueError("split_overlap must be greater than or equal to 0.") 34 self.split_overlap = split_overlap 35 36 @component.output_types(documents=List[Document]) 37 def run(self, documents: List[Document]): 38 """ 39 Splits the documents by split_by after split_length units with an overlap of split_overlap units. 40 Returns a list of documents with the split texts. 41 A metadata field "source_id" is added to each document to keep track of the original document that was split. 42 Other metadata are copied from the original document. 43 :param documents: The documents to split. 44 :return: A list of documents with the split texts. 45 """ 46 47 if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)): 48 raise TypeError("DocumentSplitter expects a List of Documents as input.") 49 50 split_docs = [] 51 for doc in documents: 52 if doc.content is None: 53 raise ValueError( 54 f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None." 55 ) 56 units = self._split_into_units(doc.content, self.split_by) 57 text_splits = self._concatenate_units(units, self.split_length, self.split_overlap) 58 metadata = deepcopy(doc.meta) 59 metadata["source_id"] = doc.id 60 split_docs += [Document(content=txt, meta=metadata) for txt in text_splits] 61 return {"documents": split_docs} 62 63 def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]: 64 if split_by == "passage": 65 split_at = "\n\n" 66 elif split_by == "sentence": 67 split_at = "." 68 elif split_by == "word": 69 split_at = " " 70 else: 71 raise NotImplementedError( 72 "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options." 73 ) 74 units = text.split(split_at) 75 # Add the delimiter back to all units except the last one 76 for i in range(len(units) - 1): 77 units[i] += split_at 78 return units 79 80 def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]: 81 """ 82 Concatenates the elements into parts of split_length units. 83 """ 84 text_splits = [] 85 segments = windowed(elements, n=split_length, step=split_length - split_overlap) 86 for seg in segments: 87 current_units = [unit for unit in seg if unit is not None] 88 txt = "".join(current_units) 89 if len(txt) > 0: 90 text_splits.append(txt) 91 return text_splits 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py --- a/haystack/components/preprocessors/document_splitter.py +++ b/haystack/components/preprocessors/document_splitter.py @@ -14,18 +14,21 @@ """ def __init__( - self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0 + self, + split_by: Literal["word", "sentence", "page", "passage"] = "word", + split_length: int = 200, + split_overlap: int = 0, ): """ :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ", - "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n". + "sentence" for splitting by ".", "page" for splitting by "\f" or "passage" for splitting by "\\n\\n". :param split_length: The maximum number of units in each split. :param split_overlap: The number of units that each split should overlap. """ self.split_by = split_by - if split_by not in ["word", "sentence", "passage"]: - raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.") + if split_by not in ["word", "sentence", "page", "passage"]: + raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.") if split_length <= 0: raise ValueError("split_length must be greater than 0.") self.split_length = split_length @@ -60,8 +63,10 @@ split_docs += [Document(content=txt, meta=metadata) for txt in text_splits] return {"documents": split_docs} - def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]: - if split_by == "passage": + def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]: + if split_by == "page": + split_at = "\f" + elif split_by == "passage": split_at = "\n\n" elif split_by == "sentence": split_at = "." @@ -69,7 +74,7 @@ split_at = " " else: raise NotImplementedError( - "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options." + "DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options." ) units = text.split(split_at) # Add the delimiter back to all units except the last one
{"golden_diff": "diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py\n--- a/haystack/components/preprocessors/document_splitter.py\n+++ b/haystack/components/preprocessors/document_splitter.py\n@@ -14,18 +14,21 @@\n \"\"\"\n \n def __init__(\n- self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n+ self,\n+ split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n+ split_length: int = 200,\n+ split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n- \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n+ \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n \n self.split_by = split_by\n- if split_by not in [\"word\", \"sentence\", \"passage\"]:\n- raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n+ if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n+ raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n@@ -60,8 +63,10 @@\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n \n- def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n- if split_by == \"passage\":\n+ def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n+ if split_by == \"page\":\n+ split_at = \"\\f\"\n+ elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n@@ -69,7 +74,7 @@\n split_at = \" \"\n else:\n raise NotImplementedError(\n- \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n+ \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n", "issue": "feat: Add split by `page` to `DocumentSplitter`\n**Is your feature request related to a problem? Please describe.**\r\nThere are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility. \r\n\r\n**Describe the solution you'd like**\r\nAdd a new `split_by` value of `page` that would split on `\"\\f\"`.\r\n\r\n**Describe alternatives you've considered**\r\nSplit up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this. \r\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n if split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self,\n split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n split_length: int = 200,\n split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n if split_by == \"page\":\n split_at = \"\\f\"\n elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}]}
1,521
667
gh_patches_debug_1503
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-11075
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Build: support Ruby under `build.tools` We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll. Work required: - [x] Update the documentation - [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images - [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503) - [x] Update `settings.py` to add this tool and version - [x] Update config v2 to accept this value - [x] Create a branch on `test-builds` for this use case > **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/builds/constants_docker.py` Content: ``` 1 """ 2 Define constants here to allow import them without any external dependency. 3 4 There are situations where we want to have access to these values without Django installed 5 (e.g. common/dockerfiles/tasks.py) 6 7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``. 8 """ 9 10 DOCKER_DEFAULT_IMAGE = "readthedocs/build" 11 12 # Adding a new tool/version to this setting requires: 13 # 14 # - a mapping between the expected version in the config file, to the full 15 # version installed via asdf (found via ``asdf list all <tool>``) 16 # 17 # - running the script ``./scripts/compile_version_upload.sh`` in 18 # development and production environments to compile and cache the new 19 # tool/version 20 # 21 # Note that when updating this options, you should also update the file: 22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json 23 RTD_DOCKER_BUILD_SETTINGS = { 24 # Mapping of build.os options to docker image. 25 "os": { 26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04", 27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04", 28 }, 29 # Mapping of build.tools options to specific versions. 30 "tools": { 31 "python": { 32 "2.7": "2.7.18", 33 "3.6": "3.6.15", 34 "3.7": "3.7.17", 35 "3.8": "3.8.18", 36 "3.9": "3.9.18", 37 "3.10": "3.10.13", 38 "3.11": "3.11.6", 39 "3.12": "3.12.0", 40 # Always point to the latest stable release. 41 "3": "3.12.0", 42 "miniconda3-4.7": "miniconda3-4.7.12", 43 "mambaforge-4.10": "mambaforge-4.10.3-10", 44 "mambaforge-22.9": "mambaforge-22.9.0-3", 45 }, 46 "nodejs": { 47 "14": "14.20.1", 48 "16": "16.18.1", 49 "18": "18.16.1", # LTS 50 "19": "19.0.1", 51 "20": "20.3.1", 52 }, 53 "rust": { 54 "1.55": "1.55.0", 55 "1.61": "1.61.0", 56 "1.64": "1.64.0", 57 "1.70": "1.70.0", 58 }, 59 "golang": { 60 "1.17": "1.17.13", 61 "1.18": "1.18.10", 62 "1.19": "1.19.10", 63 "1.20": "1.20.5", 64 }, 65 }, 66 } 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py --- a/readthedocs/builds/constants_docker.py +++ b/readthedocs/builds/constants_docker.py @@ -50,6 +50,9 @@ "19": "19.0.1", "20": "20.3.1", }, + "ruby": { + "3.3": "3.3.0", + }, "rust": { "1.55": "1.55.0", "1.61": "1.61.0",
{"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -50,6 +50,9 @@\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n+ \"ruby\": {\n+ \"3.3\": \"3.3.0\",\n+ },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n", "issue": "Build: support Ruby under `build.tools` \nWe should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.\r\n\r\nWork required:\r\n\r\n- [x] Update the documentation\r\n- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images\r\n- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)\r\n- [x] Update `settings.py` to add this tool and version\r\n- [x] Update config v2 to accept this value\r\n- [x] Create a branch on `test-builds` for this use case\r\n\r\n\r\n> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}], "after_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"ruby\": {\n \"3.3\": \"3.3.0\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]}
1,347
146