problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_13121
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1281
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- errors.py: Add support for JSON content that has `errors` instead of `detail` or `details` Thanks for stopping by to let us know something could be better! **PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. **Is your feature request related to a problem? Please describe.** We have been using [errors.py](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py) to raise HttpError errors. Unfortunately, many of our instances have content that matches the following format: ``` { error: { errors: { } } } ``` versus what [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) is looking for i.e. only one of `detail`, `details`, or `message` This results us seeing the errors being logged but not the details: ``` <HttpError 503 when requesting url returned "Internal error. Please try again."> ``` **Describe the solution you'd like** The solution involves modifying [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) to also support `error_detail_keyword` with an additional value of `errors`. This modifies the order to: `detail`, `details`, `errors`, `message` **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `googleapiclient/errors.py` Content: ``` 1 # Copyright 2014 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Errors for the library. 16 17 All exceptions defined by the library 18 should be defined in this file. 19 """ 20 from __future__ import absolute_import 21 22 __author__ = "[email protected] (Joe Gregorio)" 23 24 import json 25 26 from googleapiclient import _helpers as util 27 28 29 class Error(Exception): 30 """Base error for this module.""" 31 32 pass 33 34 35 class HttpError(Error): 36 """HTTP data was invalid or unexpected.""" 37 38 @util.positional(3) 39 def __init__(self, resp, content, uri=None): 40 self.resp = resp 41 if not isinstance(content, bytes): 42 raise TypeError("HTTP content should be bytes") 43 self.content = content 44 self.uri = uri 45 self.error_details = "" 46 self._get_reason() 47 48 @property 49 def status_code(self): 50 """Return the HTTP status code from the response content.""" 51 return self.resp.status 52 53 def _get_reason(self): 54 """Calculate the reason for the error from the response content.""" 55 reason = self.resp.reason 56 try: 57 try: 58 data = json.loads(self.content.decode("utf-8")) 59 except json.JSONDecodeError: 60 # In case it is not json 61 data = self.content.decode("utf-8") 62 if isinstance(data, dict): 63 reason = data["error"]["message"] 64 error_detail_keyword = next((kw for kw in ["detail", "details", "message"] if kw in data["error"]), "") 65 if error_detail_keyword: 66 self.error_details = data["error"][error_detail_keyword] 67 elif isinstance(data, list) and len(data) > 0: 68 first_error = data[0] 69 reason = first_error["error"]["message"] 70 if "details" in first_error["error"]: 71 self.error_details = first_error["error"]["details"] 72 else: 73 self.error_details = data 74 except (ValueError, KeyError, TypeError): 75 pass 76 if reason is None: 77 reason = "" 78 return reason 79 80 def __repr__(self): 81 reason = self._get_reason() 82 if self.error_details: 83 return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % ( 84 self.resp.status, 85 self.uri, 86 reason.strip(), 87 self.error_details, 88 ) 89 elif self.uri: 90 return '<HttpError %s when requesting %s returned "%s">' % ( 91 self.resp.status, 92 self.uri, 93 self._get_reason().strip(), 94 ) 95 else: 96 return '<HttpError %s "%s">' % (self.resp.status, self._get_reason()) 97 98 __str__ = __repr__ 99 100 101 class InvalidJsonError(Error): 102 """The JSON returned could not be parsed.""" 103 104 pass 105 106 107 class UnknownFileType(Error): 108 """File type unknown or unexpected.""" 109 110 pass 111 112 113 class UnknownLinkType(Error): 114 """Link type unknown or unexpected.""" 115 116 pass 117 118 119 class UnknownApiNameOrVersion(Error): 120 """No API with that name and version exists.""" 121 122 pass 123 124 125 class UnacceptableMimeTypeError(Error): 126 """That is an unacceptable mimetype for this operation.""" 127 128 pass 129 130 131 class MediaUploadSizeError(Error): 132 """Media is larger than the method can accept.""" 133 134 pass 135 136 137 class ResumableUploadError(HttpError): 138 """Error occurred during resumable upload.""" 139 140 pass 141 142 143 class InvalidChunkSizeError(Error): 144 """The given chunksize is not valid.""" 145 146 pass 147 148 149 class InvalidNotificationError(Error): 150 """The channel Notification is invalid.""" 151 152 pass 153 154 155 class BatchError(HttpError): 156 """Error occurred during batch operations.""" 157 158 @util.positional(2) 159 def __init__(self, reason, resp=None, content=None): 160 self.resp = resp 161 self.content = content 162 self.reason = reason 163 164 def __repr__(self): 165 if getattr(self.resp, "status", None) is None: 166 return '<BatchError "%s">' % (self.reason) 167 else: 168 return '<BatchError %s "%s">' % (self.resp.status, self.reason) 169 170 __str__ = __repr__ 171 172 173 class UnexpectedMethodError(Error): 174 """Exception raised by RequestMockBuilder on unexpected calls.""" 175 176 @util.positional(1) 177 def __init__(self, methodId=None): 178 """Constructor for an UnexpectedMethodError.""" 179 super(UnexpectedMethodError, self).__init__( 180 "Received unexpected call %s" % methodId 181 ) 182 183 184 class UnexpectedBodyError(Error): 185 """Exception raised by RequestMockBuilder on unexpected bodies.""" 186 187 def __init__(self, expected, provided): 188 """Constructor for an UnexpectedMethodError.""" 189 super(UnexpectedBodyError, self).__init__( 190 "Expected: [%s] - Provided: [%s]" % (expected, provided) 191 ) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py --- a/googleapiclient/errors.py +++ b/googleapiclient/errors.py @@ -61,7 +61,7 @@ data = self.content.decode("utf-8") if isinstance(data, dict): reason = data["error"]["message"] - error_detail_keyword = next((kw for kw in ["detail", "details", "message"] if kw in data["error"]), "") + error_detail_keyword = next((kw for kw in ["detail", "details", "errors", "message"] if kw in data["error"]), "") if error_detail_keyword: self.error_details = data["error"][error_detail_keyword] elif isinstance(data, list) and len(data) > 0:
{"golden_diff": "diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py\n--- a/googleapiclient/errors.py\n+++ b/googleapiclient/errors.py\n@@ -61,7 +61,7 @@\n data = self.content.decode(\"utf-8\")\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n- error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n+ error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"errors\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n", "issue": "errors.py: Add support for JSON content that has `errors` instead of `detail` or `details`\nThanks for stopping by to let us know something could be better!\r\n\r\n**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.\r\n\r\n **Is your feature request related to a problem? Please describe.**\r\n\r\nWe have been using [errors.py](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py) to raise HttpError errors. Unfortunately, many of our instances have content that matches the following format:\r\n```\r\n{\r\n error: {\r\n errors: {\r\n }\r\n }\r\n}\r\n```\r\nversus what [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) is looking for i.e. only one of `detail`, `details`, or `message`\r\nThis results us seeing the errors being logged but not the details:\r\n\r\n```\r\n<HttpError 503 when requesting url returned \"Internal error. Please try again.\">\r\n```\r\n\r\n **Describe the solution you'd like**\r\n\r\nThe solution involves modifying [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) to also support `error_detail_keyword` with an additional value of `errors`. This modifies the order to: `detail`, `details`, `errors`, `message`\r\n\r\n **Describe alternatives you've considered**\r\nA clear and concise description of any alternative solutions or features you've considered.\r\n **Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"\nfrom __future__ import absolute_import\n\n__author__ = \"[email protected] (Joe Gregorio)\"\n\nimport json\n\nfrom googleapiclient import _helpers as util\n\n\nclass Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n\n pass\n\n\nclass HttpError(Error):\n \"\"\"HTTP data was invalid or unexpected.\"\"\"\n\n @util.positional(3)\n def __init__(self, resp, content, uri=None):\n self.resp = resp\n if not isinstance(content, bytes):\n raise TypeError(\"HTTP content should be bytes\")\n self.content = content\n self.uri = uri\n self.error_details = \"\"\n self._get_reason()\n\n @property\n def status_code(self):\n \"\"\"Return the HTTP status code from the response content.\"\"\"\n return self.resp.status\n\n def _get_reason(self):\n \"\"\"Calculate the reason for the error from the response content.\"\"\"\n reason = self.resp.reason\n try:\n try:\n data = json.loads(self.content.decode(\"utf-8\"))\n except json.JSONDecodeError:\n # In case it is not json\n data = self.content.decode(\"utf-8\")\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n if \"details\" in first_error[\"error\"]:\n self.error_details = first_error[\"error\"][\"details\"]\n else:\n self.error_details = data\n except (ValueError, KeyError, TypeError):\n pass\n if reason is None:\n reason = \"\"\n return reason\n\n def __repr__(self):\n reason = self._get_reason()\n if self.error_details:\n return '<HttpError %s when requesting %s returned \"%s\". Details: \"%s\">' % (\n self.resp.status,\n self.uri,\n reason.strip(),\n self.error_details,\n )\n elif self.uri:\n return '<HttpError %s when requesting %s returned \"%s\">' % (\n self.resp.status,\n self.uri,\n self._get_reason().strip(),\n )\n else:\n return '<HttpError %s \"%s\">' % (self.resp.status, self._get_reason())\n\n __str__ = __repr__\n\n\nclass InvalidJsonError(Error):\n \"\"\"The JSON returned could not be parsed.\"\"\"\n\n pass\n\n\nclass UnknownFileType(Error):\n \"\"\"File type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownLinkType(Error):\n \"\"\"Link type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownApiNameOrVersion(Error):\n \"\"\"No API with that name and version exists.\"\"\"\n\n pass\n\n\nclass UnacceptableMimeTypeError(Error):\n \"\"\"That is an unacceptable mimetype for this operation.\"\"\"\n\n pass\n\n\nclass MediaUploadSizeError(Error):\n \"\"\"Media is larger than the method can accept.\"\"\"\n\n pass\n\n\nclass ResumableUploadError(HttpError):\n \"\"\"Error occurred during resumable upload.\"\"\"\n\n pass\n\n\nclass InvalidChunkSizeError(Error):\n \"\"\"The given chunksize is not valid.\"\"\"\n\n pass\n\n\nclass InvalidNotificationError(Error):\n \"\"\"The channel Notification is invalid.\"\"\"\n\n pass\n\n\nclass BatchError(HttpError):\n \"\"\"Error occurred during batch operations.\"\"\"\n\n @util.positional(2)\n def __init__(self, reason, resp=None, content=None):\n self.resp = resp\n self.content = content\n self.reason = reason\n\n def __repr__(self):\n if getattr(self.resp, \"status\", None) is None:\n return '<BatchError \"%s\">' % (self.reason)\n else:\n return '<BatchError %s \"%s\">' % (self.resp.status, self.reason)\n\n __str__ = __repr__\n\n\nclass UnexpectedMethodError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected calls.\"\"\"\n\n @util.positional(1)\n def __init__(self, methodId=None):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedMethodError, self).__init__(\n \"Received unexpected call %s\" % methodId\n )\n\n\nclass UnexpectedBodyError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected bodies.\"\"\"\n\n def __init__(self, expected, provided):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedBodyError, self).__init__(\n \"Expected: [%s] - Provided: [%s]\" % (expected, provided)\n )\n", "path": "googleapiclient/errors.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"\nfrom __future__ import absolute_import\n\n__author__ = \"[email protected] (Joe Gregorio)\"\n\nimport json\n\nfrom googleapiclient import _helpers as util\n\n\nclass Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n\n pass\n\n\nclass HttpError(Error):\n \"\"\"HTTP data was invalid or unexpected.\"\"\"\n\n @util.positional(3)\n def __init__(self, resp, content, uri=None):\n self.resp = resp\n if not isinstance(content, bytes):\n raise TypeError(\"HTTP content should be bytes\")\n self.content = content\n self.uri = uri\n self.error_details = \"\"\n self._get_reason()\n\n @property\n def status_code(self):\n \"\"\"Return the HTTP status code from the response content.\"\"\"\n return self.resp.status\n\n def _get_reason(self):\n \"\"\"Calculate the reason for the error from the response content.\"\"\"\n reason = self.resp.reason\n try:\n try:\n data = json.loads(self.content.decode(\"utf-8\"))\n except json.JSONDecodeError:\n # In case it is not json\n data = self.content.decode(\"utf-8\")\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"errors\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n if \"details\" in first_error[\"error\"]:\n self.error_details = first_error[\"error\"][\"details\"]\n else:\n self.error_details = data\n except (ValueError, KeyError, TypeError):\n pass\n if reason is None:\n reason = \"\"\n return reason\n\n def __repr__(self):\n reason = self._get_reason()\n if self.error_details:\n return '<HttpError %s when requesting %s returned \"%s\". Details: \"%s\">' % (\n self.resp.status,\n self.uri,\n reason.strip(),\n self.error_details,\n )\n elif self.uri:\n return '<HttpError %s when requesting %s returned \"%s\">' % (\n self.resp.status,\n self.uri,\n self._get_reason().strip(),\n )\n else:\n return '<HttpError %s \"%s\">' % (self.resp.status, self._get_reason())\n\n __str__ = __repr__\n\n\nclass InvalidJsonError(Error):\n \"\"\"The JSON returned could not be parsed.\"\"\"\n\n pass\n\n\nclass UnknownFileType(Error):\n \"\"\"File type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownLinkType(Error):\n \"\"\"Link type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownApiNameOrVersion(Error):\n \"\"\"No API with that name and version exists.\"\"\"\n\n pass\n\n\nclass UnacceptableMimeTypeError(Error):\n \"\"\"That is an unacceptable mimetype for this operation.\"\"\"\n\n pass\n\n\nclass MediaUploadSizeError(Error):\n \"\"\"Media is larger than the method can accept.\"\"\"\n\n pass\n\n\nclass ResumableUploadError(HttpError):\n \"\"\"Error occurred during resumable upload.\"\"\"\n\n pass\n\n\nclass InvalidChunkSizeError(Error):\n \"\"\"The given chunksize is not valid.\"\"\"\n\n pass\n\n\nclass InvalidNotificationError(Error):\n \"\"\"The channel Notification is invalid.\"\"\"\n\n pass\n\n\nclass BatchError(HttpError):\n \"\"\"Error occurred during batch operations.\"\"\"\n\n @util.positional(2)\n def __init__(self, reason, resp=None, content=None):\n self.resp = resp\n self.content = content\n self.reason = reason\n\n def __repr__(self):\n if getattr(self.resp, \"status\", None) is None:\n return '<BatchError \"%s\">' % (self.reason)\n else:\n return '<BatchError %s \"%s\">' % (self.resp.status, self.reason)\n\n __str__ = __repr__\n\n\nclass UnexpectedMethodError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected calls.\"\"\"\n\n @util.positional(1)\n def __init__(self, methodId=None):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedMethodError, self).__init__(\n \"Received unexpected call %s\" % methodId\n )\n\n\nclass UnexpectedBodyError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected bodies.\"\"\"\n\n def __init__(self, expected, provided):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedBodyError, self).__init__(\n \"Expected: [%s] - Provided: [%s]\" % (expected, provided)\n )\n", "path": "googleapiclient/errors.py"}]}
2,261
170
gh_patches_debug_3921
rasdani/github-patches
git_diff
bokeh__bokeh-5427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- embed/animated fails with "did not find model" `Error rendering Bokeh items Error: Did not find model ID_HERE in session`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/embed/animated.py` Content: ``` 1 """ To view this example, first start a Bokeh server: 2 3 bokeh serve --allow-websocket-origin=localhost:8000 4 5 And then load the example into the Bokeh server by 6 running the script: 7 8 python animated.py 9 10 in this directory. Finally, start a simple web server 11 by running: 12 13 python -m SimpleHTTPServer (python 2) 14 15 or 16 17 python -m http.server (python 3) 18 19 in this directory. Navigate to 20 21 http://localhost:8000/animated.html 22 23 """ 24 from __future__ import print_function 25 26 from numpy import pi, cos, sin, linspace, roll 27 28 from bokeh.client import push_session 29 from bokeh.embed import autoload_server 30 from bokeh.plotting import figure, curdoc 31 32 M = 5 33 N = M*10 + 1 34 r_base = 8 35 theta = linspace(0, 2*pi, N) 36 r_x = linspace(0, 6*pi, N-1) 37 rmin = r_base - cos(r_x) - 1 38 rmax = r_base + sin(r_x) + 1 39 40 colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8", 41 "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5 42 43 # figure() function auto-adds the figure to curdoc() 44 p = figure(x_range=(-11, 11), y_range=(-11, 11)) 45 r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:], 46 fill_color=colors, line_color="white") 47 48 # open a session which will keep our local doc in sync with server 49 session = push_session(curdoc()) 50 51 html = """ 52 <html> 53 <head></head> 54 <body> 55 %s 56 </body> 57 </html> 58 """ % autoload_server(p, session_id=session.id) 59 60 with open("animated.html", "w+") as f: 61 f.write(html) 62 63 print(__doc__) 64 65 ds = r.data_source 66 67 def update(): 68 rmin = roll(ds.data["inner_radius"], 1) 69 rmax = roll(ds.data["outer_radius"], -1) 70 ds.data.update(inner_radius=rmin, outer_radius=rmax) 71 72 curdoc().add_periodic_callback(update, 30) 73 74 session.loop_until_closed() # run forever 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/embed/animated.py b/examples/embed/animated.py --- a/examples/embed/animated.py +++ b/examples/embed/animated.py @@ -45,6 +45,9 @@ r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:], fill_color=colors, line_color="white") +# add the plot to curdoc +curdoc().add_root(p) + # open a session which will keep our local doc in sync with server session = push_session(curdoc())
{"golden_diff": "diff --git a/examples/embed/animated.py b/examples/embed/animated.py\n--- a/examples/embed/animated.py\n+++ b/examples/embed/animated.py\n@@ -45,6 +45,9 @@\n r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n \n+# add the plot to curdoc\n+curdoc().add_root(p)\n+\n # open a session which will keep our local doc in sync with server\n session = push_session(curdoc())\n", "issue": "embed/animated fails with \"did not find model\"\n`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.\n\n", "before_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}], "after_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# add the plot to curdoc\ncurdoc().add_root(p)\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}]}
997
124
gh_patches_debug_18219
rasdani/github-patches
git_diff
google-deepmind__optax-111
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement log(cosh()) loss function log-cosh is a doubly differentiable alternative to the huber loss. A naive implementation is prone to overflow (since cosh has an e^x term), so I think it'd be a useful addition to the library. Plus, it's implemented in other libraries, such as [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/losses/log_cosh). If this sounds like a relevant addition, I'd be happy to contribute it! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optax/_src/loss.py` Content: ``` 1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Standard losses used in optimisation. 16 17 We provide implementations of the most canonical losses used in deep 18 learning. These operate transparently on batches, and do not perform any 19 reduction over the batch dimensions, leaving it to the user to, for instance, 20 mean or sum losses across batch dimensions. 21 """ 22 23 from typing import Optional 24 25 import chex 26 import jax 27 import jax.numpy as jnp 28 29 from optax._src import utils 30 31 32 def l2_loss( 33 predictions: chex.Array, 34 targets: Optional[chex.Array] = None, 35 ) -> chex.Array: 36 """Calculates the L2 loss for a set of predictions. 37 38 Note: the 0.5 term is standard in "Pattern Recognition and Machine Learning" 39 by Bishop, but not "The Elements of Statistical Learning" by Tibshirani. 40 41 References: 42 [Chris Bishop, 2006](https://bit.ly/3eeP0ga) 43 44 Args: 45 predictions: a vector of arbitrary shape. 46 targets: a vector of shape compatible with predictions; if not provides 47 then it is assumed to be zero. 48 49 Returns: 50 the squared error loss. 51 """ 52 chex.assert_type([predictions], float) 53 errors = (predictions - targets) if (targets is not None) else predictions 54 return 0.5 * (errors)**2 55 56 57 def huber_loss( 58 predictions: chex.Array, 59 targets: Optional[chex.Array] = None, 60 delta: float = 1.) -> chex.Array: 61 """Huber loss, similar to L2 loss close to zero, L1 loss away from zero. 62 63 If gradient descent is applied to the `huber loss`, it is equivalent to 64 clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass. 65 66 References: 67 [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732) 68 69 Args: 70 predictions: a vector of arbitrary shape. 71 targets: a vector of shape compatible with predictions; if not provides 72 then it is assumed to be zero. 73 delta: the bounds for the huber loss transformation, defaults at 1. 74 75 Returns: 76 a vector of same shape of `x`. 77 """ 78 chex.assert_type([predictions], float) 79 errors = (predictions - targets) if (targets is not None) else predictions 80 # 0.5 * err^2 if |err| <= d 81 # 0.5 * d^2 + d * (|err| - d) if |err| > d 82 abs_errors = jnp.abs(errors) 83 quadratic = jnp.minimum(abs_errors, delta) 84 # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient. 85 linear = abs_errors - quadratic 86 return 0.5 * quadratic ** 2 + delta * linear 87 88 89 def smooth_labels( 90 labels: chex.Array, 91 alpha: float, 92 ) -> jnp.ndarray: 93 """Apply label smoothing. 94 95 Label smoothing is often used in combination with a cross-entropy loss. 96 Smoothed labels favour small logit gaps, and it has been shown that this can 97 provide better model calibration by preventing overconfident predictions. 98 99 References: 100 [Müller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf) 101 102 Args: 103 labels: one hot labels to be smoothed. 104 alpha: the smoothing factor, the greedy category with be assigned 105 probability `(1-alpha) + alpha / num_categories` 106 107 Returns: 108 a smoothed version of the one hot input labels. 109 110 """ 111 chex.assert_type([labels], float) 112 num_categories = labels.shape[-1] 113 return (1.0 - alpha) * labels + alpha / num_categories 114 115 116 def sigmoid_binary_cross_entropy(logits, labels): 117 """Computes sigmoid cross entropy given logits and multiple class labels. 118 119 Measures the probability error in discrete classification tasks in which 120 each class is an independent binary prediction and different classes are 121 not mutually exclusive. This may be used for multilabel image classification 122 for instance a model may predict that an image contains both a cat and a dog. 123 124 References: 125 [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html) 126 127 Args: 128 logits: unnormalized log probabilities. 129 labels: the probability for that class. 130 131 Returns: 132 a sigmoid cross entropy loss. 133 """ 134 chex.assert_equal_shape([logits, labels]) 135 chex.assert_type([logits, labels], float) 136 log_p = jax.nn.log_sigmoid(logits) 137 # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable 138 log_not_p = jax.nn.log_sigmoid(-logits) 139 return -labels * log_p - (1. - labels) * log_not_p 140 141 142 def softmax_cross_entropy( 143 logits: chex.Array, 144 labels: chex.Array, 145 ) -> chex.Array: 146 """Computes the softmax cross entropy between sets of logits and labels. 147 148 Measures the probability error in discrete classification tasks in which 149 the classes are mutually exclusive (each entry is in exactly one class). 150 For example, each CIFAR-10 image is labeled with one and only one label: 151 an image can be a dog or a truck, but not both. 152 153 References: 154 [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html) 155 156 Args: 157 logits: unnormalized log probabilities. 158 labels: a valid probability distribution (non-negative, sum to 1), e.g a 159 one hot encoding of which class is the correct one for each input. 160 161 Returns: 162 the cross entropy loss. 163 """ 164 chex.assert_equal_shape([logits, labels]) 165 chex.assert_type([logits, labels], float) 166 return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) 167 168 169 def cosine_similarity( 170 predictions: chex.Array, 171 targets: chex.Array, 172 epsilon: float = 0., 173 ) -> chex.Array: 174 r"""Computes the cosine similarity between targets and predictions. 175 176 The cosine **similarity** is a measure of similarity between vectors defined 177 as the cosine of the angle between them, which is also the inner product of 178 those vectors normalized to have unit norm. 179 180 References: 181 [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity) 182 183 Args: 184 predictions: The predicted vector. 185 targets: Ground truth target vector. 186 epsilon: minimum norm for terms in the denominator of the cosine similarity. 187 188 Returns: 189 cosine similarity values. 190 """ 191 chex.assert_equal_shape([targets, predictions]) 192 chex.assert_type([targets, predictions], float) 193 # vectorize norm fn, to treat all dimensions except the last as batch dims. 194 batched_norm_fn = jnp.vectorize( 195 utils.safe_norm, signature='(k)->()', excluded={1}) 196 # normalise the last dimension of targets and predictions. 197 unit_targets = targets / jnp.expand_dims( 198 batched_norm_fn(targets, epsilon), axis=-1) 199 unit_predictions = predictions / jnp.expand_dims( 200 batched_norm_fn(predictions, epsilon), axis=-1) 201 # return cosine similarity. 202 return jnp.sum(unit_targets * unit_predictions, axis=-1) 203 204 205 def cosine_distance( 206 predictions: chex.Array, 207 targets: chex.Array, 208 epsilon: float = 0., 209 ) -> chex.Array: 210 r"""Computes the cosine distance between targets and predictions. 211 212 The cosine **distance**, implemented here, measures the **dissimilarity** 213 of two vectors as the opposite of cosine **similarity**: `1 - cos(\theta)`. 214 215 References: 216 [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity) 217 218 Args: 219 predictions: The predicted vector. 220 targets: Ground truth target vector. 221 epsilon: minimum norm for terms in the denominator of the cosine similarity. 222 223 Returns: 224 cosine similarity values. 225 """ 226 chex.assert_equal_shape([targets, predictions]) 227 chex.assert_type([targets, predictions], float) 228 # cosine distance = 1 - cosine similarity. 229 return 1. - cosine_similarity(predictions, targets, epsilon) 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optax/_src/loss.py b/optax/_src/loss.py --- a/optax/_src/loss.py +++ b/optax/_src/loss.py @@ -227,3 +227,29 @@ chex.assert_type([targets, predictions], float) # cosine distance = 1 - cosine similarity. return 1. - cosine_similarity(predictions, targets, epsilon) + + +def log_cosh( + predictions: chex.Array, + targets: Optional[chex.Array] = None, +) -> chex.Array: + """Calculates the log-cosh loss for a set of predictions. + + log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)` + for large x. It is a twice differentiable alternative to the Huber loss. + + References: + [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym) + + Args: + predictions: a vector of arbitrary shape. + targets: a vector of shape compatible with predictions; if not provided + then it is assumed to be zero. + + Returns: + the log-cosh loss. + """ + chex.assert_type([predictions], float) + errors = (predictions - targets) if (targets is not None) else predictions + # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2) + return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)
{"golden_diff": "diff --git a/optax/_src/loss.py b/optax/_src/loss.py\n--- a/optax/_src/loss.py\n+++ b/optax/_src/loss.py\n@@ -227,3 +227,29 @@\n chex.assert_type([targets, predictions], float)\n # cosine distance = 1 - cosine similarity.\n return 1. - cosine_similarity(predictions, targets, epsilon)\n+\n+\n+def log_cosh(\n+ predictions: chex.Array,\n+ targets: Optional[chex.Array] = None,\n+) -> chex.Array:\n+ \"\"\"Calculates the log-cosh loss for a set of predictions.\n+\n+ log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)`\n+ for large x. It is a twice differentiable alternative to the Huber loss.\n+\n+ References:\n+ [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym)\n+\n+ Args:\n+ predictions: a vector of arbitrary shape.\n+ targets: a vector of shape compatible with predictions; if not provided\n+ then it is assumed to be zero.\n+\n+ Returns:\n+ the log-cosh loss.\n+ \"\"\"\n+ chex.assert_type([predictions], float)\n+ errors = (predictions - targets) if (targets is not None) else predictions\n+ # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2)\n+ return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)\n", "issue": "Implement log(cosh()) loss function\nlog-cosh is a doubly differentiable alternative to the huber loss. A naive implementation is prone to overflow (since cosh has an e^x term), so I think it'd be a useful addition to the library. Plus, it's implemented in other libraries, such as [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/losses/log_cosh).\r\n\r\nIf this sounds like a relevant addition, I'd be happy to contribute it!\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Standard losses used in optimisation.\n\nWe provide implementations of the most canonical losses used in deep\nlearning. These operate transparently on batches, and do not perform any\nreduction over the batch dimensions, leaving it to the user to, for instance,\nmean or sum losses across batch dimensions.\n\"\"\"\n\nfrom typing import Optional\n\nimport chex\nimport jax\nimport jax.numpy as jnp\n\nfrom optax._src import utils\n\n\ndef l2_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n) -> chex.Array:\n \"\"\"Calculates the L2 loss for a set of predictions.\n\n Note: the 0.5 term is standard in \"Pattern Recognition and Machine Learning\"\n by Bishop, but not \"The Elements of Statistical Learning\" by Tibshirani.\n\n References:\n [Chris Bishop, 2006](https://bit.ly/3eeP0ga)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n\n Returns:\n the squared error loss.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n return 0.5 * (errors)**2\n\n\ndef huber_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n delta: float = 1.) -> chex.Array:\n \"\"\"Huber loss, similar to L2 loss close to zero, L1 loss away from zero.\n\n If gradient descent is applied to the `huber loss`, it is equivalent to\n clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass.\n\n References:\n [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n delta: the bounds for the huber loss transformation, defaults at 1.\n\n Returns:\n a vector of same shape of `x`.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n # 0.5 * err^2 if |err| <= d\n # 0.5 * d^2 + d * (|err| - d) if |err| > d\n abs_errors = jnp.abs(errors)\n quadratic = jnp.minimum(abs_errors, delta)\n # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.\n linear = abs_errors - quadratic\n return 0.5 * quadratic ** 2 + delta * linear\n\n\ndef smooth_labels(\n labels: chex.Array,\n alpha: float,\n) -> jnp.ndarray:\n \"\"\"Apply label smoothing.\n\n Label smoothing is often used in combination with a cross-entropy loss.\n Smoothed labels favour small logit gaps, and it has been shown that this can\n provide better model calibration by preventing overconfident predictions.\n\n References:\n [M\u00fcller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf)\n\n Args:\n labels: one hot labels to be smoothed.\n alpha: the smoothing factor, the greedy category with be assigned\n probability `(1-alpha) + alpha / num_categories`\n\n Returns:\n a smoothed version of the one hot input labels.\n\n \"\"\"\n chex.assert_type([labels], float)\n num_categories = labels.shape[-1]\n return (1.0 - alpha) * labels + alpha / num_categories\n\n\ndef sigmoid_binary_cross_entropy(logits, labels):\n \"\"\"Computes sigmoid cross entropy given logits and multiple class labels.\n\n Measures the probability error in discrete classification tasks in which\n each class is an independent binary prediction and different classes are\n not mutually exclusive. This may be used for multilabel image classification\n for instance a model may predict that an image contains both a cat and a dog.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: the probability for that class.\n\n Returns:\n a sigmoid cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n log_p = jax.nn.log_sigmoid(logits)\n # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable\n log_not_p = jax.nn.log_sigmoid(-logits)\n return -labels * log_p - (1. - labels) * log_not_p\n\n\ndef softmax_cross_entropy(\n logits: chex.Array,\n labels: chex.Array,\n) -> chex.Array:\n \"\"\"Computes the softmax cross entropy between sets of logits and labels.\n\n Measures the probability error in discrete classification tasks in which\n the classes are mutually exclusive (each entry is in exactly one class).\n For example, each CIFAR-10 image is labeled with one and only one label:\n an image can be a dog or a truck, but not both.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: a valid probability distribution (non-negative, sum to 1), e.g a\n one hot encoding of which class is the correct one for each input.\n\n Returns:\n the cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)\n\n\ndef cosine_similarity(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine similarity between targets and predictions.\n\n The cosine **similarity** is a measure of similarity between vectors defined\n as the cosine of the angle between them, which is also the inner product of\n those vectors normalized to have unit norm.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # vectorize norm fn, to treat all dimensions except the last as batch dims.\n batched_norm_fn = jnp.vectorize(\n utils.safe_norm, signature='(k)->()', excluded={1})\n # normalise the last dimension of targets and predictions.\n unit_targets = targets / jnp.expand_dims(\n batched_norm_fn(targets, epsilon), axis=-1)\n unit_predictions = predictions / jnp.expand_dims(\n batched_norm_fn(predictions, epsilon), axis=-1)\n # return cosine similarity.\n return jnp.sum(unit_targets * unit_predictions, axis=-1)\n\n\ndef cosine_distance(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine distance between targets and predictions.\n\n The cosine **distance**, implemented here, measures the **dissimilarity**\n of two vectors as the opposite of cosine **similarity**: `1 - cos(\\theta)`.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # cosine distance = 1 - cosine similarity.\n return 1. - cosine_similarity(predictions, targets, epsilon)\n", "path": "optax/_src/loss.py"}], "after_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Standard losses used in optimisation.\n\nWe provide implementations of the most canonical losses used in deep\nlearning. These operate transparently on batches, and do not perform any\nreduction over the batch dimensions, leaving it to the user to, for instance,\nmean or sum losses across batch dimensions.\n\"\"\"\n\nfrom typing import Optional\n\nimport chex\nimport jax\nimport jax.numpy as jnp\n\nfrom optax._src import utils\n\n\ndef l2_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n) -> chex.Array:\n \"\"\"Calculates the L2 loss for a set of predictions.\n\n Note: the 0.5 term is standard in \"Pattern Recognition and Machine Learning\"\n by Bishop, but not \"The Elements of Statistical Learning\" by Tibshirani.\n\n References:\n [Chris Bishop, 2006](https://bit.ly/3eeP0ga)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n\n Returns:\n the squared error loss.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n return 0.5 * (errors)**2\n\n\ndef huber_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n delta: float = 1.) -> chex.Array:\n \"\"\"Huber loss, similar to L2 loss close to zero, L1 loss away from zero.\n\n If gradient descent is applied to the `huber loss`, it is equivalent to\n clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass.\n\n References:\n [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n delta: the bounds for the huber loss transformation, defaults at 1.\n\n Returns:\n a vector of same shape of `x`.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n # 0.5 * err^2 if |err| <= d\n # 0.5 * d^2 + d * (|err| - d) if |err| > d\n abs_errors = jnp.abs(errors)\n quadratic = jnp.minimum(abs_errors, delta)\n # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.\n linear = abs_errors - quadratic\n return 0.5 * quadratic ** 2 + delta * linear\n\n\ndef smooth_labels(\n labels: chex.Array,\n alpha: float,\n) -> jnp.ndarray:\n \"\"\"Apply label smoothing.\n\n Label smoothing is often used in combination with a cross-entropy loss.\n Smoothed labels favour small logit gaps, and it has been shown that this can\n provide better model calibration by preventing overconfident predictions.\n\n References:\n [M\u00fcller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf)\n\n Args:\n labels: one hot labels to be smoothed.\n alpha: the smoothing factor, the greedy category with be assigned\n probability `(1-alpha) + alpha / num_categories`\n\n Returns:\n a smoothed version of the one hot input labels.\n\n \"\"\"\n chex.assert_type([labels], float)\n num_categories = labels.shape[-1]\n return (1.0 - alpha) * labels + alpha / num_categories\n\n\ndef sigmoid_binary_cross_entropy(logits, labels):\n \"\"\"Computes sigmoid cross entropy given logits and multiple class labels.\n\n Measures the probability error in discrete classification tasks in which\n each class is an independent binary prediction and different classes are\n not mutually exclusive. This may be used for multilabel image classification\n for instance a model may predict that an image contains both a cat and a dog.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: the probability for that class.\n\n Returns:\n a sigmoid cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n log_p = jax.nn.log_sigmoid(logits)\n # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable\n log_not_p = jax.nn.log_sigmoid(-logits)\n return -labels * log_p - (1. - labels) * log_not_p\n\n\ndef softmax_cross_entropy(\n logits: chex.Array,\n labels: chex.Array,\n) -> chex.Array:\n \"\"\"Computes the softmax cross entropy between sets of logits and labels.\n\n Measures the probability error in discrete classification tasks in which\n the classes are mutually exclusive (each entry is in exactly one class).\n For example, each CIFAR-10 image is labeled with one and only one label:\n an image can be a dog or a truck, but not both.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: a valid probability distribution (non-negative, sum to 1), e.g a\n one hot encoding of which class is the correct one for each input.\n\n Returns:\n the cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)\n\n\ndef cosine_similarity(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine similarity between targets and predictions.\n\n The cosine **similarity** is a measure of similarity between vectors defined\n as the cosine of the angle between them, which is also the inner product of\n those vectors normalized to have unit norm.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # vectorize norm fn, to treat all dimensions except the last as batch dims.\n batched_norm_fn = jnp.vectorize(\n utils.safe_norm, signature='(k)->()', excluded={1})\n # normalise the last dimension of targets and predictions.\n unit_targets = targets / jnp.expand_dims(\n batched_norm_fn(targets, epsilon), axis=-1)\n unit_predictions = predictions / jnp.expand_dims(\n batched_norm_fn(predictions, epsilon), axis=-1)\n # return cosine similarity.\n return jnp.sum(unit_targets * unit_predictions, axis=-1)\n\n\ndef cosine_distance(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine distance between targets and predictions.\n\n The cosine **distance**, implemented here, measures the **dissimilarity**\n of two vectors as the opposite of cosine **similarity**: `1 - cos(\\theta)`.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # cosine distance = 1 - cosine similarity.\n return 1. - cosine_similarity(predictions, targets, epsilon)\n\n\ndef log_cosh(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n) -> chex.Array:\n \"\"\"Calculates the log-cosh loss for a set of predictions.\n\n log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)`\n for large x. It is a twice differentiable alternative to the Huber loss.\n\n References:\n [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provided\n then it is assumed to be zero.\n\n Returns:\n the log-cosh loss.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2)\n return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)\n", "path": "optax/_src/loss.py"}]}
2,945
378
gh_patches_debug_43313
rasdani/github-patches
git_diff
microsoft__hi-ml-161
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DatasetConfig with empty target_folder creates invalid pathOneCompute argument target_folder=="" will turn into PosixPath("."), but that creates problems on AML. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hi-ml-azure/src/health_azure/datasets.py` Content: ``` 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 import logging 6 import tempfile 7 from pathlib import Path 8 from typing import List, Optional, Tuple, Union 9 10 from azureml.core import Dataset, Datastore, Workspace 11 from azureml.data import FileDataset, OutputFileDatasetConfig 12 from azureml.data.dataset_consumption_config import DatasetConsumptionConfig 13 from azureml.dataprep.fuse.daemon import MountContext 14 15 from health_azure.utils import PathOrString, get_workspace 16 17 18 def get_datastore(workspace: Workspace, datastore_name: str) -> Datastore: 19 """ 20 Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if 21 the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name. 22 23 :param workspace: The AzureML workspace to read from. 24 :param datastore_name: The name of the datastore to retrieve. 25 :return: An AzureML datastore. 26 """ 27 datastores = workspace.datastores 28 existing_stores = list(datastores.keys()) 29 if not datastore_name: 30 if len(existing_stores) == 1: 31 return datastores[existing_stores[0]] 32 raise ValueError("No datastore name provided. This is only possible if the workspace has a single datastore. " 33 f"However, the workspace has {len(existing_stores)} datastores: {existing_stores}") 34 if datastore_name in datastores: 35 return datastores[datastore_name] 36 raise ValueError(f"Datastore {datastore_name} was not found in the workspace. Existing datastores: " 37 f"{existing_stores}") 38 39 40 def get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset: 41 """ 42 Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is 43 created and registered, assuming that the files are in a folder that has the same name as the dataset. 44 For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder 45 <container_root>/datasets/dataset_name/ 46 """ 47 if not dataset_name: 48 raise ValueError("No dataset name provided.") 49 try: 50 logging.info(f"Trying to retrieve AzureML Dataset '{dataset_name}'") 51 azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name) 52 logging.info("Dataset found.") 53 except Exception: 54 logging.info(f"Retrieving datastore '{datastore_name}' from AzureML workspace") 55 datastore = get_datastore(workspace, datastore_name) 56 logging.info(f"Creating a new dataset from data in folder '{dataset_name}' in the datastore") 57 # Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create 58 # trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from "foo") 59 azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + "/")) 60 logging.info("Registering the dataset for future use.") 61 azureml_dataset.register(workspace, name=dataset_name) 62 return azureml_dataset 63 64 65 def _input_dataset_key(index: int) -> str: 66 return f"INPUT_{index}" 67 68 69 def _output_dataset_key(index: int) -> str: 70 return f"OUTPUT_{index}" 71 72 73 class DatasetConfig: 74 """ 75 Contains information to use AzureML datasets as inputs or outputs. 76 """ 77 78 def __init__(self, 79 name: str, 80 datastore: str = "", 81 version: Optional[int] = None, 82 use_mounting: Optional[bool] = None, 83 target_folder: Optional[PathOrString] = None, 84 local_folder: Optional[PathOrString] = None): 85 """ 86 :param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets, 87 this will be the name given to the newly created dataset. 88 :param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML 89 workspace has only a single datastore, or if the default datastore should be used. 90 :param version: The version of the dataset that should be used. This is only used for input datasets. 91 If the version is not specified, the latest version will be used. 92 :param use_mounting: If True, the dataset will be "mounted", that is, individual files will be read 93 or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts, 94 respectively fully uploaded at job end for output datasets. 95 Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are 96 script outputs. 97 :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a 98 random folder on /tmp will be chosen. 99 :param local_folder: The folder on the local machine at which the dataset is available. This 100 is used only for runs outside of AzureML. If this is empty then the target_folder will be used to 101 mount or download the dataset. 102 """ 103 # This class would be a good candidate for a dataclass, but having an explicit constructor makes 104 # documentation tools in the editor work nicer. 105 name = name.strip() 106 if not name: 107 raise ValueError("The name of the dataset must be a non-empty string.") 108 self.name = name 109 self.datastore = datastore 110 self.version = version 111 self.use_mounting = use_mounting 112 self.target_folder = Path(target_folder) if target_folder is not None else None 113 self.local_folder = Path(local_folder) if local_folder is not None else None 114 115 def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]: 116 """ 117 Return a local path to the dataset when outside of an AzureML run. 118 If local_folder is supplied, then this is assumed to be a local dataset, and this is returned. 119 Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is 120 returned. 121 122 :param workspace: The AzureML workspace to read from. 123 :return: Pair of optional path to dataset and optional mountcontext. 124 """ 125 status = f"Dataset {self.name} will be " 126 127 if self.local_folder is not None: 128 status += f"obtained from local folder {str(self.local_folder)}" 129 print(status) 130 return self.local_folder, None 131 132 if workspace is None: 133 status += "'None' - neither local_folder nor workspace available" 134 print(status) 135 return None, None 136 137 azureml_dataset = get_or_create_dataset(workspace=workspace, 138 dataset_name=self.name, 139 datastore_name=self.datastore) 140 141 target_path = self.target_folder or Path(tempfile.mkdtemp()) 142 use_mounting = self.use_mounting if self.use_mounting is not None else False 143 if use_mounting: 144 status += "mounted at " 145 mount_context = azureml_dataset.mount(mount_point=str(target_path)) 146 result = target_path, mount_context 147 else: 148 status += "downloaded to " 149 azureml_dataset.download(target_path=str(target_path), overwrite=False) 150 result = target_path, None 151 if self.target_folder is not None: 152 status += f"{str(self.target_folder)}." 153 else: 154 status += f"a randomly chosen folder: {target_path}." 155 print(status) 156 return result 157 158 def to_input_dataset(self, 159 workspace: Workspace, 160 dataset_index: int) -> DatasetConsumptionConfig: 161 """ 162 Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML 163 dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0. 164 165 :param workspace: The AzureML workspace to read from. 166 :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index} 167 """ 168 status = f"Dataset {self.name} (index {dataset_index}) will be " 169 azureml_dataset = get_or_create_dataset(workspace=workspace, 170 dataset_name=self.name, 171 datastore_name=self.datastore) 172 named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index)) 173 path_on_compute = str(self.target_folder) if self.target_folder is not None else None 174 use_mounting = False if self.use_mounting is None else self.use_mounting 175 if use_mounting: 176 status += "mounted at " 177 result = named_input.as_mount(path_on_compute) 178 else: 179 status += "downloaded to " 180 result = named_input.as_download(path_on_compute) 181 if path_on_compute: 182 status += f"{path_on_compute}." 183 else: 184 status += "a randomly chosen folder." 185 print(status) 186 return result 187 188 def to_output_dataset(self, 189 workspace: Workspace, 190 dataset_index: int) -> OutputFileDatasetConfig: 191 """ 192 Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new 193 dataset will be taken from the present object. 194 195 :param workspace: The AzureML workspace to read from. 196 :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index} 197 :return: 198 """ 199 status = f"Output dataset {self.name} (index {dataset_index}) will be " 200 datastore = get_datastore(workspace, self.datastore) 201 dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index), 202 destination=(datastore, self.name + "/")) 203 # TODO: Can we get tags into here too? 204 dataset = dataset.register_on_complete(name=self.name) 205 if self.target_folder is not None: 206 raise ValueError("Output datasets can't have a target_folder set.") 207 use_mounting = True if self.use_mounting is None else self.use_mounting 208 if use_mounting: 209 status += "uploaded while the job runs." 210 result = dataset.as_mount() 211 else: 212 status += "uploaded when the job completes." 213 result = dataset.as_upload() 214 logging.info(status) 215 return result 216 217 218 StrOrDatasetConfig = Union[str, DatasetConfig] 219 220 221 def _replace_string_datasets(datasets: List[StrOrDatasetConfig], 222 default_datastore_name: str) -> List[DatasetConfig]: 223 """ 224 Processes a list of input or output datasets. All entries in the list that are strings are turned into 225 DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore. 226 227 :param datasets: A list of datasets, each given either as a string or a DatasetConfig object. 228 :param default_datastore_name: The datastore to use for all datasets that are only specified via their name. 229 :return: A list of DatasetConfig objects, in the same order as the input list. 230 """ 231 return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) else d 232 for d in datasets] 233 234 235 def find_workspace_for_local_datasets(aml_workspace: Optional[Workspace], 236 workspace_config_path: Optional[Path], 237 dataset_configs: List[DatasetConfig]) -> Optional[Workspace]: 238 """ 239 If any of the dataset_configs require an AzureML workspace then try to get one, otherwise return None. 240 241 :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is 242 to pass it in as a parameter. 243 :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the 244 Azure portal from which we can retrieve the existing Workspace. 245 :param dataset_configs: List of DatasetConfig describing the input datasets. 246 :return: Workspace if required, None otherwise. 247 """ 248 workspace: Workspace = None 249 # Check whether an attempt will be made to mount or download a dataset when running locally. 250 # If so, try to get the AzureML workspace. 251 if any(dc.local_folder is None for dc in dataset_configs): 252 try: 253 workspace = get_workspace(aml_workspace, workspace_config_path) 254 logging.info(f"Found workspace for datasets: {workspace.name}") 255 except Exception: 256 logging.info("Could not find workspace for datasets.") 257 return workspace 258 259 260 def setup_local_datasets(aml_workspace: Optional[Workspace], 261 workspace_config_path: Optional[Path], 262 dataset_configs: List[DatasetConfig]) -> Tuple[List[Optional[Path]], List[MountContext]]: 263 """ 264 When running outside of AzureML, setup datasets to be used locally. 265 266 For each DatasetConfig, if local_folder is supplied, then this is assumed to be a local dataset, and this is 267 used. Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is 268 used. 269 270 :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is 271 to pass it in as a parameter. 272 :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the 273 Azure portal from which we can retrieve the existing Workspace. 274 :param dataset_configs: List of DatasetConfig describing the input datasets. 275 :return: Pair of: list of optional paths to the input datasets, list of mountcontexts, one for each mounted dataset. 276 """ 277 workspace = find_workspace_for_local_datasets(aml_workspace, workspace_config_path, dataset_configs) 278 279 mounted_input_datasets: List[Optional[Path]] = [] 280 mount_contexts: List[MountContext] = [] 281 282 for d in dataset_configs: 283 target_path, mount_context = d.to_input_dataset_local(workspace) 284 285 mounted_input_datasets.append(target_path) 286 287 if mount_context is not None: 288 mount_context.start() 289 mount_contexts.append(mount_context) 290 291 return mounted_input_datasets, mount_contexts 292 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hi-ml-azure/src/health_azure/datasets.py b/hi-ml-azure/src/health_azure/datasets.py --- a/hi-ml-azure/src/health_azure/datasets.py +++ b/hi-ml-azure/src/health_azure/datasets.py @@ -95,7 +95,7 @@ Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are script outputs. :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a - random folder on /tmp will be chosen. + random folder on /tmp will be chosen. Do NOT use "." as the target_folder. :param local_folder: The folder on the local machine at which the dataset is available. This is used only for runs outside of AzureML. If this is empty then the target_folder will be used to mount or download the dataset. @@ -109,8 +109,11 @@ self.datastore = datastore self.version = version self.use_mounting = use_mounting - self.target_folder = Path(target_folder) if target_folder is not None else None - self.local_folder = Path(local_folder) if local_folder is not None else None + # If target_folder is "" then convert to None + self.target_folder = Path(target_folder) if target_folder else None + if str(self.target_folder) == ".": + raise ValueError("Can't mount or download a dataset to the current working directory.") + self.local_folder = Path(local_folder) if local_folder else None def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]: """ @@ -137,7 +140,6 @@ azureml_dataset = get_or_create_dataset(workspace=workspace, dataset_name=self.name, datastore_name=self.datastore) - target_path = self.target_folder or Path(tempfile.mkdtemp()) use_mounting = self.use_mounting if self.use_mounting is not None else False if use_mounting: @@ -170,7 +172,9 @@ dataset_name=self.name, datastore_name=self.datastore) named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index)) - path_on_compute = str(self.target_folder) if self.target_folder is not None else None + # If running on windows then self.target_folder may be a WindowsPath, make sure it is + # in posix format for Azure. + path_on_compute = self.target_folder.as_posix() if self.target_folder is not None else None use_mounting = False if self.use_mounting is None else self.use_mounting if use_mounting: status += "mounted at " @@ -202,7 +206,7 @@ destination=(datastore, self.name + "/")) # TODO: Can we get tags into here too? dataset = dataset.register_on_complete(name=self.name) - if self.target_folder is not None: + if self.target_folder: raise ValueError("Output datasets can't have a target_folder set.") use_mounting = True if self.use_mounting is None else self.use_mounting if use_mounting:
{"golden_diff": "diff --git a/hi-ml-azure/src/health_azure/datasets.py b/hi-ml-azure/src/health_azure/datasets.py\n--- a/hi-ml-azure/src/health_azure/datasets.py\n+++ b/hi-ml-azure/src/health_azure/datasets.py\n@@ -95,7 +95,7 @@\n Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are\n script outputs.\n :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a\n- random folder on /tmp will be chosen.\n+ random folder on /tmp will be chosen. Do NOT use \".\" as the target_folder.\n :param local_folder: The folder on the local machine at which the dataset is available. This\n is used only for runs outside of AzureML. If this is empty then the target_folder will be used to\n mount or download the dataset.\n@@ -109,8 +109,11 @@\n self.datastore = datastore\n self.version = version\n self.use_mounting = use_mounting\n- self.target_folder = Path(target_folder) if target_folder is not None else None\n- self.local_folder = Path(local_folder) if local_folder is not None else None\n+ # If target_folder is \"\" then convert to None\n+ self.target_folder = Path(target_folder) if target_folder else None\n+ if str(self.target_folder) == \".\":\n+ raise ValueError(\"Can't mount or download a dataset to the current working directory.\")\n+ self.local_folder = Path(local_folder) if local_folder else None\n \n def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]:\n \"\"\"\n@@ -137,7 +140,6 @@\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n-\n target_path = self.target_folder or Path(tempfile.mkdtemp())\n use_mounting = self.use_mounting if self.use_mounting is not None else False\n if use_mounting:\n@@ -170,7 +172,9 @@\n dataset_name=self.name,\n datastore_name=self.datastore)\n named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))\n- path_on_compute = str(self.target_folder) if self.target_folder is not None else None\n+ # If running on windows then self.target_folder may be a WindowsPath, make sure it is\n+ # in posix format for Azure.\n+ path_on_compute = self.target_folder.as_posix() if self.target_folder is not None else None\n use_mounting = False if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"mounted at \"\n@@ -202,7 +206,7 @@\n destination=(datastore, self.name + \"/\"))\n # TODO: Can we get tags into here too?\n dataset = dataset.register_on_complete(name=self.name)\n- if self.target_folder is not None:\n+ if self.target_folder:\n raise ValueError(\"Output datasets can't have a target_folder set.\")\n use_mounting = True if self.use_mounting is None else self.use_mounting\n if use_mounting:\n", "issue": "DatasetConfig with empty target_folder creates invalid pathOneCompute argument\ntarget_folder==\"\" will turn into PosixPath(\".\"), but that creates problems on AML.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nimport logging\nimport tempfile\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\n\nfrom azureml.core import Dataset, Datastore, Workspace\nfrom azureml.data import FileDataset, OutputFileDatasetConfig\nfrom azureml.data.dataset_consumption_config import DatasetConsumptionConfig\nfrom azureml.dataprep.fuse.daemon import MountContext\n\nfrom health_azure.utils import PathOrString, get_workspace\n\n\ndef get_datastore(workspace: Workspace, datastore_name: str) -> Datastore:\n \"\"\"\n Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if\n the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name.\n\n :param workspace: The AzureML workspace to read from.\n :param datastore_name: The name of the datastore to retrieve.\n :return: An AzureML datastore.\n \"\"\"\n datastores = workspace.datastores\n existing_stores = list(datastores.keys())\n if not datastore_name:\n if len(existing_stores) == 1:\n return datastores[existing_stores[0]]\n raise ValueError(\"No datastore name provided. This is only possible if the workspace has a single datastore. \"\n f\"However, the workspace has {len(existing_stores)} datastores: {existing_stores}\")\n if datastore_name in datastores:\n return datastores[datastore_name]\n raise ValueError(f\"Datastore {datastore_name} was not found in the workspace. Existing datastores: \"\n f\"{existing_stores}\")\n\n\ndef get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset:\n \"\"\"\n Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is\n created and registered, assuming that the files are in a folder that has the same name as the dataset.\n For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder\n <container_root>/datasets/dataset_name/\n \"\"\"\n if not dataset_name:\n raise ValueError(\"No dataset name provided.\")\n try:\n logging.info(f\"Trying to retrieve AzureML Dataset '{dataset_name}'\")\n azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name)\n logging.info(\"Dataset found.\")\n except Exception:\n logging.info(f\"Retrieving datastore '{datastore_name}' from AzureML workspace\")\n datastore = get_datastore(workspace, datastore_name)\n logging.info(f\"Creating a new dataset from data in folder '{dataset_name}' in the datastore\")\n # Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create\n # trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from \"foo\")\n azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + \"/\"))\n logging.info(\"Registering the dataset for future use.\")\n azureml_dataset.register(workspace, name=dataset_name)\n return azureml_dataset\n\n\ndef _input_dataset_key(index: int) -> str:\n return f\"INPUT_{index}\"\n\n\ndef _output_dataset_key(index: int) -> str:\n return f\"OUTPUT_{index}\"\n\n\nclass DatasetConfig:\n \"\"\"\n Contains information to use AzureML datasets as inputs or outputs.\n \"\"\"\n\n def __init__(self,\n name: str,\n datastore: str = \"\",\n version: Optional[int] = None,\n use_mounting: Optional[bool] = None,\n target_folder: Optional[PathOrString] = None,\n local_folder: Optional[PathOrString] = None):\n \"\"\"\n :param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets,\n this will be the name given to the newly created dataset.\n :param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML\n workspace has only a single datastore, or if the default datastore should be used.\n :param version: The version of the dataset that should be used. This is only used for input datasets.\n If the version is not specified, the latest version will be used.\n :param use_mounting: If True, the dataset will be \"mounted\", that is, individual files will be read\n or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts,\n respectively fully uploaded at job end for output datasets.\n Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are\n script outputs.\n :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a\n random folder on /tmp will be chosen.\n :param local_folder: The folder on the local machine at which the dataset is available. This\n is used only for runs outside of AzureML. If this is empty then the target_folder will be used to\n mount or download the dataset.\n \"\"\"\n # This class would be a good candidate for a dataclass, but having an explicit constructor makes\n # documentation tools in the editor work nicer.\n name = name.strip()\n if not name:\n raise ValueError(\"The name of the dataset must be a non-empty string.\")\n self.name = name\n self.datastore = datastore\n self.version = version\n self.use_mounting = use_mounting\n self.target_folder = Path(target_folder) if target_folder is not None else None\n self.local_folder = Path(local_folder) if local_folder is not None else None\n\n def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]:\n \"\"\"\n Return a local path to the dataset when outside of an AzureML run.\n If local_folder is supplied, then this is assumed to be a local dataset, and this is returned.\n Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is\n returned.\n\n :param workspace: The AzureML workspace to read from.\n :return: Pair of optional path to dataset and optional mountcontext.\n \"\"\"\n status = f\"Dataset {self.name} will be \"\n\n if self.local_folder is not None:\n status += f\"obtained from local folder {str(self.local_folder)}\"\n print(status)\n return self.local_folder, None\n\n if workspace is None:\n status += \"'None' - neither local_folder nor workspace available\"\n print(status)\n return None, None\n\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n\n target_path = self.target_folder or Path(tempfile.mkdtemp())\n use_mounting = self.use_mounting if self.use_mounting is not None else False\n if use_mounting:\n status += \"mounted at \"\n mount_context = azureml_dataset.mount(mount_point=str(target_path))\n result = target_path, mount_context\n else:\n status += \"downloaded to \"\n azureml_dataset.download(target_path=str(target_path), overwrite=False)\n result = target_path, None\n if self.target_folder is not None:\n status += f\"{str(self.target_folder)}.\"\n else:\n status += f\"a randomly chosen folder: {target_path}.\"\n print(status)\n return result\n\n def to_input_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> DatasetConsumptionConfig:\n \"\"\"\n Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML\n dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0.\n\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index}\n \"\"\"\n status = f\"Dataset {self.name} (index {dataset_index}) will be \"\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))\n path_on_compute = str(self.target_folder) if self.target_folder is not None else None\n use_mounting = False if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"mounted at \"\n result = named_input.as_mount(path_on_compute)\n else:\n status += \"downloaded to \"\n result = named_input.as_download(path_on_compute)\n if path_on_compute:\n status += f\"{path_on_compute}.\"\n else:\n status += \"a randomly chosen folder.\"\n print(status)\n return result\n\n def to_output_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> OutputFileDatasetConfig:\n \"\"\"\n Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new\n dataset will be taken from the present object.\n\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index}\n :return:\n \"\"\"\n status = f\"Output dataset {self.name} (index {dataset_index}) will be \"\n datastore = get_datastore(workspace, self.datastore)\n dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index),\n destination=(datastore, self.name + \"/\"))\n # TODO: Can we get tags into here too?\n dataset = dataset.register_on_complete(name=self.name)\n if self.target_folder is not None:\n raise ValueError(\"Output datasets can't have a target_folder set.\")\n use_mounting = True if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"uploaded while the job runs.\"\n result = dataset.as_mount()\n else:\n status += \"uploaded when the job completes.\"\n result = dataset.as_upload()\n logging.info(status)\n return result\n\n\nStrOrDatasetConfig = Union[str, DatasetConfig]\n\n\ndef _replace_string_datasets(datasets: List[StrOrDatasetConfig],\n default_datastore_name: str) -> List[DatasetConfig]:\n \"\"\"\n Processes a list of input or output datasets. All entries in the list that are strings are turned into\n DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore.\n\n :param datasets: A list of datasets, each given either as a string or a DatasetConfig object.\n :param default_datastore_name: The datastore to use for all datasets that are only specified via their name.\n :return: A list of DatasetConfig objects, in the same order as the input list.\n \"\"\"\n return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) else d\n for d in datasets]\n\n\ndef find_workspace_for_local_datasets(aml_workspace: Optional[Workspace],\n workspace_config_path: Optional[Path],\n dataset_configs: List[DatasetConfig]) -> Optional[Workspace]:\n \"\"\"\n If any of the dataset_configs require an AzureML workspace then try to get one, otherwise return None.\n\n :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is\n to pass it in as a parameter.\n :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the\n Azure portal from which we can retrieve the existing Workspace.\n :param dataset_configs: List of DatasetConfig describing the input datasets.\n :return: Workspace if required, None otherwise.\n \"\"\"\n workspace: Workspace = None\n # Check whether an attempt will be made to mount or download a dataset when running locally.\n # If so, try to get the AzureML workspace.\n if any(dc.local_folder is None for dc in dataset_configs):\n try:\n workspace = get_workspace(aml_workspace, workspace_config_path)\n logging.info(f\"Found workspace for datasets: {workspace.name}\")\n except Exception:\n logging.info(\"Could not find workspace for datasets.\")\n return workspace\n\n\ndef setup_local_datasets(aml_workspace: Optional[Workspace],\n workspace_config_path: Optional[Path],\n dataset_configs: List[DatasetConfig]) -> Tuple[List[Optional[Path]], List[MountContext]]:\n \"\"\"\n When running outside of AzureML, setup datasets to be used locally.\n\n For each DatasetConfig, if local_folder is supplied, then this is assumed to be a local dataset, and this is\n used. Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is\n used.\n\n :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is\n to pass it in as a parameter.\n :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the\n Azure portal from which we can retrieve the existing Workspace.\n :param dataset_configs: List of DatasetConfig describing the input datasets.\n :return: Pair of: list of optional paths to the input datasets, list of mountcontexts, one for each mounted dataset.\n \"\"\"\n workspace = find_workspace_for_local_datasets(aml_workspace, workspace_config_path, dataset_configs)\n\n mounted_input_datasets: List[Optional[Path]] = []\n mount_contexts: List[MountContext] = []\n\n for d in dataset_configs:\n target_path, mount_context = d.to_input_dataset_local(workspace)\n\n mounted_input_datasets.append(target_path)\n\n if mount_context is not None:\n mount_context.start()\n mount_contexts.append(mount_context)\n\n return mounted_input_datasets, mount_contexts\n", "path": "hi-ml-azure/src/health_azure/datasets.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nimport logging\nimport tempfile\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\n\nfrom azureml.core import Dataset, Datastore, Workspace\nfrom azureml.data import FileDataset, OutputFileDatasetConfig\nfrom azureml.data.dataset_consumption_config import DatasetConsumptionConfig\nfrom azureml.dataprep.fuse.daemon import MountContext\n\nfrom health_azure.utils import PathOrString, get_workspace\n\n\ndef get_datastore(workspace: Workspace, datastore_name: str) -> Datastore:\n \"\"\"\n Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if\n the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name.\n\n :param workspace: The AzureML workspace to read from.\n :param datastore_name: The name of the datastore to retrieve.\n :return: An AzureML datastore.\n \"\"\"\n datastores = workspace.datastores\n existing_stores = list(datastores.keys())\n if not datastore_name:\n if len(existing_stores) == 1:\n return datastores[existing_stores[0]]\n raise ValueError(\"No datastore name provided. This is only possible if the workspace has a single datastore. \"\n f\"However, the workspace has {len(existing_stores)} datastores: {existing_stores}\")\n if datastore_name in datastores:\n return datastores[datastore_name]\n raise ValueError(f\"Datastore {datastore_name} was not found in the workspace. Existing datastores: \"\n f\"{existing_stores}\")\n\n\ndef get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset:\n \"\"\"\n Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is\n created and registered, assuming that the files are in a folder that has the same name as the dataset.\n For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder\n <container_root>/datasets/dataset_name/\n \"\"\"\n if not dataset_name:\n raise ValueError(\"No dataset name provided.\")\n try:\n logging.info(f\"Trying to retrieve AzureML Dataset '{dataset_name}'\")\n azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name)\n logging.info(\"Dataset found.\")\n except Exception:\n logging.info(f\"Retrieving datastore '{datastore_name}' from AzureML workspace\")\n datastore = get_datastore(workspace, datastore_name)\n logging.info(f\"Creating a new dataset from data in folder '{dataset_name}' in the datastore\")\n # Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create\n # trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from \"foo\")\n azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + \"/\"))\n logging.info(\"Registering the dataset for future use.\")\n azureml_dataset.register(workspace, name=dataset_name)\n return azureml_dataset\n\n\ndef _input_dataset_key(index: int) -> str:\n return f\"INPUT_{index}\"\n\n\ndef _output_dataset_key(index: int) -> str:\n return f\"OUTPUT_{index}\"\n\n\nclass DatasetConfig:\n \"\"\"\n Contains information to use AzureML datasets as inputs or outputs.\n \"\"\"\n\n def __init__(self,\n name: str,\n datastore: str = \"\",\n version: Optional[int] = None,\n use_mounting: Optional[bool] = None,\n target_folder: Optional[PathOrString] = None,\n local_folder: Optional[PathOrString] = None):\n \"\"\"\n :param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets,\n this will be the name given to the newly created dataset.\n :param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML\n workspace has only a single datastore, or if the default datastore should be used.\n :param version: The version of the dataset that should be used. This is only used for input datasets.\n If the version is not specified, the latest version will be used.\n :param use_mounting: If True, the dataset will be \"mounted\", that is, individual files will be read\n or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts,\n respectively fully uploaded at job end for output datasets.\n Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are\n script outputs.\n :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a\n random folder on /tmp will be chosen. Do NOT use \".\" as the target_folder.\n :param local_folder: The folder on the local machine at which the dataset is available. This\n is used only for runs outside of AzureML. If this is empty then the target_folder will be used to\n mount or download the dataset.\n \"\"\"\n # This class would be a good candidate for a dataclass, but having an explicit constructor makes\n # documentation tools in the editor work nicer.\n name = name.strip()\n if not name:\n raise ValueError(\"The name of the dataset must be a non-empty string.\")\n self.name = name\n self.datastore = datastore\n self.version = version\n self.use_mounting = use_mounting\n # If target_folder is \"\" then convert to None\n self.target_folder = Path(target_folder) if target_folder else None\n if str(self.target_folder) == \".\":\n raise ValueError(\"Can't mount or download a dataset to the current working directory.\")\n self.local_folder = Path(local_folder) if local_folder else None\n\n def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]:\n \"\"\"\n Return a local path to the dataset when outside of an AzureML run.\n If local_folder is supplied, then this is assumed to be a local dataset, and this is returned.\n Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is\n returned.\n\n :param workspace: The AzureML workspace to read from.\n :return: Pair of optional path to dataset and optional mountcontext.\n \"\"\"\n status = f\"Dataset {self.name} will be \"\n\n if self.local_folder is not None:\n status += f\"obtained from local folder {str(self.local_folder)}\"\n print(status)\n return self.local_folder, None\n\n if workspace is None:\n status += \"'None' - neither local_folder nor workspace available\"\n print(status)\n return None, None\n\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n target_path = self.target_folder or Path(tempfile.mkdtemp())\n use_mounting = self.use_mounting if self.use_mounting is not None else False\n if use_mounting:\n status += \"mounted at \"\n mount_context = azureml_dataset.mount(mount_point=str(target_path))\n result = target_path, mount_context\n else:\n status += \"downloaded to \"\n azureml_dataset.download(target_path=str(target_path), overwrite=False)\n result = target_path, None\n if self.target_folder is not None:\n status += f\"{str(self.target_folder)}.\"\n else:\n status += f\"a randomly chosen folder: {target_path}.\"\n print(status)\n return result\n\n def to_input_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> DatasetConsumptionConfig:\n \"\"\"\n Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML\n dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0.\n\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index}\n \"\"\"\n status = f\"Dataset {self.name} (index {dataset_index}) will be \"\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))\n # If running on windows then self.target_folder may be a WindowsPath, make sure it is\n # in posix format for Azure.\n path_on_compute = self.target_folder.as_posix() if self.target_folder is not None else None\n use_mounting = False if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"mounted at \"\n result = named_input.as_mount(path_on_compute)\n else:\n status += \"downloaded to \"\n result = named_input.as_download(path_on_compute)\n if path_on_compute:\n status += f\"{path_on_compute}.\"\n else:\n status += \"a randomly chosen folder.\"\n print(status)\n return result\n\n def to_output_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> OutputFileDatasetConfig:\n \"\"\"\n Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new\n dataset will be taken from the present object.\n\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index}\n :return:\n \"\"\"\n status = f\"Output dataset {self.name} (index {dataset_index}) will be \"\n datastore = get_datastore(workspace, self.datastore)\n dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index),\n destination=(datastore, self.name + \"/\"))\n # TODO: Can we get tags into here too?\n dataset = dataset.register_on_complete(name=self.name)\n if self.target_folder:\n raise ValueError(\"Output datasets can't have a target_folder set.\")\n use_mounting = True if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"uploaded while the job runs.\"\n result = dataset.as_mount()\n else:\n status += \"uploaded when the job completes.\"\n result = dataset.as_upload()\n logging.info(status)\n return result\n\n\nStrOrDatasetConfig = Union[str, DatasetConfig]\n\n\ndef _replace_string_datasets(datasets: List[StrOrDatasetConfig],\n default_datastore_name: str) -> List[DatasetConfig]:\n \"\"\"\n Processes a list of input or output datasets. All entries in the list that are strings are turned into\n DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore.\n\n :param datasets: A list of datasets, each given either as a string or a DatasetConfig object.\n :param default_datastore_name: The datastore to use for all datasets that are only specified via their name.\n :return: A list of DatasetConfig objects, in the same order as the input list.\n \"\"\"\n return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) else d\n for d in datasets]\n\n\ndef find_workspace_for_local_datasets(aml_workspace: Optional[Workspace],\n workspace_config_path: Optional[Path],\n dataset_configs: List[DatasetConfig]) -> Optional[Workspace]:\n \"\"\"\n If any of the dataset_configs require an AzureML workspace then try to get one, otherwise return None.\n\n :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is\n to pass it in as a parameter.\n :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the\n Azure portal from which we can retrieve the existing Workspace.\n :param dataset_configs: List of DatasetConfig describing the input datasets.\n :return: Workspace if required, None otherwise.\n \"\"\"\n workspace: Workspace = None\n # Check whether an attempt will be made to mount or download a dataset when running locally.\n # If so, try to get the AzureML workspace.\n if any(dc.local_folder is None for dc in dataset_configs):\n try:\n workspace = get_workspace(aml_workspace, workspace_config_path)\n logging.info(f\"Found workspace for datasets: {workspace.name}\")\n except Exception:\n logging.info(\"Could not find workspace for datasets.\")\n return workspace\n\n\ndef setup_local_datasets(aml_workspace: Optional[Workspace],\n workspace_config_path: Optional[Path],\n dataset_configs: List[DatasetConfig]) -> Tuple[List[Optional[Path]], List[MountContext]]:\n \"\"\"\n When running outside of AzureML, setup datasets to be used locally.\n\n For each DatasetConfig, if local_folder is supplied, then this is assumed to be a local dataset, and this is\n used. Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is\n used.\n\n :param aml_workspace: There are two optional parameters used to glean an existing AzureML Workspace. The simplest is\n to pass it in as a parameter.\n :param workspace_config_path: The 2nd option is to specify the path to the config.json file downloaded from the\n Azure portal from which we can retrieve the existing Workspace.\n :param dataset_configs: List of DatasetConfig describing the input datasets.\n :return: Pair of: list of optional paths to the input datasets, list of mountcontexts, one for each mounted dataset.\n \"\"\"\n workspace = find_workspace_for_local_datasets(aml_workspace, workspace_config_path, dataset_configs)\n\n mounted_input_datasets: List[Optional[Path]] = []\n mount_contexts: List[MountContext] = []\n\n for d in dataset_configs:\n target_path, mount_context = d.to_input_dataset_local(workspace)\n\n mounted_input_datasets.append(target_path)\n\n if mount_context is not None:\n mount_context.start()\n mount_contexts.append(mount_context)\n\n return mounted_input_datasets, mount_contexts\n", "path": "hi-ml-azure/src/health_azure/datasets.py"}]}
4,074
733
gh_patches_debug_40839
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-147
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stop reading Cloud SDK's configuration to obtain the project ID Instead, we should subprocess out to `gcloud config config-helper --format json` Example output: ``` { "configuration": { "active_configuration": "default", "properties": { "core": { "account": "[ELIDED]", "disable_usage_reporting": "False", "project": "[ELIDED]" } } }, "credential": { "access_token": "[ELIDED]", "token_expiry": "2017-03-23T23:09:49Z" }, "sentinels": { "config_sentinel": "/Users/jonwayne/.config/gcloud/config_sentinel" } } ``` Note: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/auth/_cloud_sdk.py` Content: ``` 1 # Copyright 2015 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Helpers for reading the Google Cloud SDK's configuration.""" 16 17 import io 18 import os 19 20 import six 21 from six.moves import configparser 22 23 from google.auth import environment_vars 24 import google.oauth2.credentials 25 26 # The Google OAuth 2.0 token endpoint. Used for authorized user credentials. 27 _GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token' 28 29 # The ~/.config subdirectory containing gcloud credentials. 30 _CONFIG_DIRECTORY = 'gcloud' 31 # Windows systems store config at %APPDATA%\gcloud 32 _WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA' 33 # The name of the file in the Cloud SDK config that contains default 34 # credentials. 35 _CREDENTIALS_FILENAME = 'application_default_credentials.json' 36 # The config section and key for the project ID in the cloud SDK config. 37 _PROJECT_CONFIG_SECTION = 'core' 38 _PROJECT_CONFIG_KEY = 'project' 39 40 41 def get_config_path(): 42 """Returns the absolute path the the Cloud SDK's configuration directory. 43 44 Returns: 45 str: The Cloud SDK config path. 46 """ 47 # If the path is explicitly set, return that. 48 try: 49 return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR] 50 except KeyError: 51 pass 52 53 # Non-windows systems store this at ~/.config/gcloud 54 if os.name != 'nt': 55 return os.path.join( 56 os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY) 57 # Windows systems store config at %APPDATA%\gcloud 58 else: 59 try: 60 return os.path.join( 61 os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], 62 _CONFIG_DIRECTORY) 63 except KeyError: 64 # This should never happen unless someone is really 65 # messing with things, but we'll cover the case anyway. 66 drive = os.environ.get('SystemDrive', 'C:') 67 return os.path.join( 68 drive, '\\', _CONFIG_DIRECTORY) 69 70 71 def get_application_default_credentials_path(): 72 """Gets the path to the application default credentials file. 73 74 The path may or may not exist. 75 76 Returns: 77 str: The full path to application default credentials. 78 """ 79 config_path = get_config_path() 80 return os.path.join(config_path, _CREDENTIALS_FILENAME) 81 82 83 def _get_active_config(config_path): 84 """Gets the active config for the Cloud SDK. 85 86 Args: 87 config_path (str): The Cloud SDK's config path. 88 89 Returns: 90 str: The active configuration name. 91 """ 92 active_config_filename = os.path.join(config_path, 'active_config') 93 94 if not os.path.isfile(active_config_filename): 95 return 'default' 96 97 with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj: 98 active_config_name = file_obj.read().strip() 99 100 return active_config_name 101 102 103 def _get_config_file(config_path, config_name): 104 """Returns the full path to a configuration's config file. 105 106 Args: 107 config_path (str): The Cloud SDK's config path. 108 config_name (str): The configuration name. 109 110 Returns: 111 str: The config file path. 112 """ 113 return os.path.join( 114 config_path, 'configurations', 'config_{}'.format(config_name)) 115 116 117 def get_project_id(): 118 """Gets the project ID from the Cloud SDK's configuration. 119 120 Returns: 121 Optional[str]: The project ID. 122 """ 123 config_path = get_config_path() 124 active_config = _get_active_config(config_path) 125 config_file = _get_config_file(config_path, active_config) 126 127 if not os.path.isfile(config_file): 128 return None 129 130 config = configparser.RawConfigParser() 131 132 try: 133 config.read(config_file) 134 135 if config.has_section(_PROJECT_CONFIG_SECTION): 136 return config.get( 137 _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY) 138 139 except configparser.Error: 140 return None 141 142 143 def load_authorized_user_credentials(info): 144 """Loads an authorized user credential. 145 146 Args: 147 info (Mapping[str, str]): The loaded file's data. 148 149 Returns: 150 google.oauth2.credentials.Credentials: The constructed credentials. 151 152 Raises: 153 ValueError: if the info is in the wrong format or missing data. 154 """ 155 keys_needed = set(('refresh_token', 'client_id', 'client_secret')) 156 missing = keys_needed.difference(six.iterkeys(info)) 157 158 if missing: 159 raise ValueError( 160 'Authorized user info was not in the expected format, missing ' 161 'fields {}.'.format(', '.join(missing))) 162 163 return google.oauth2.credentials.Credentials( 164 None, # No access token, must be refreshed. 165 refresh_token=info['refresh_token'], 166 token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, 167 client_id=info['client_id'], 168 client_secret=info['client_secret']) 169 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py --- a/google/auth/_cloud_sdk.py +++ b/google/auth/_cloud_sdk.py @@ -14,11 +14,11 @@ """Helpers for reading the Google Cloud SDK's configuration.""" -import io +import json import os +import subprocess import six -from six.moves import configparser from google.auth import environment_vars import google.oauth2.credentials @@ -33,9 +33,9 @@ # The name of the file in the Cloud SDK config that contains default # credentials. _CREDENTIALS_FILENAME = 'application_default_credentials.json' -# The config section and key for the project ID in the cloud SDK config. -_PROJECT_CONFIG_SECTION = 'core' -_PROJECT_CONFIG_KEY = 'project' +# The command to get the Cloud SDK configuration +_CLOUD_SDK_CONFIG_COMMAND = ( + 'gcloud', 'config', 'config-helper', '--format', 'json') def get_config_path(): @@ -80,66 +80,6 @@ return os.path.join(config_path, _CREDENTIALS_FILENAME) -def _get_active_config(config_path): - """Gets the active config for the Cloud SDK. - - Args: - config_path (str): The Cloud SDK's config path. - - Returns: - str: The active configuration name. - """ - active_config_filename = os.path.join(config_path, 'active_config') - - if not os.path.isfile(active_config_filename): - return 'default' - - with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj: - active_config_name = file_obj.read().strip() - - return active_config_name - - -def _get_config_file(config_path, config_name): - """Returns the full path to a configuration's config file. - - Args: - config_path (str): The Cloud SDK's config path. - config_name (str): The configuration name. - - Returns: - str: The config file path. - """ - return os.path.join( - config_path, 'configurations', 'config_{}'.format(config_name)) - - -def get_project_id(): - """Gets the project ID from the Cloud SDK's configuration. - - Returns: - Optional[str]: The project ID. - """ - config_path = get_config_path() - active_config = _get_active_config(config_path) - config_file = _get_config_file(config_path, active_config) - - if not os.path.isfile(config_file): - return None - - config = configparser.RawConfigParser() - - try: - config.read(config_file) - - if config.has_section(_PROJECT_CONFIG_SECTION): - return config.get( - _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY) - - except configparser.Error: - return None - - def load_authorized_user_credentials(info): """Loads an authorized user credential. @@ -166,3 +106,28 @@ token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, client_id=info['client_id'], client_secret=info['client_secret']) + + +def get_project_id(): + """Gets the project ID from the Cloud SDK. + + Returns: + Optional[str]: The project ID. + """ + + try: + output = subprocess.check_output( + _CLOUD_SDK_CONFIG_COMMAND, + stderr=subprocess.STDOUT) + except (subprocess.CalledProcessError, OSError, IOError): + return None + + try: + configuration = json.loads(output.decode('utf-8')) + except ValueError: + return None + + try: + return configuration['configuration']['properties']['core']['project'] + except KeyError: + return None
{"golden_diff": "diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py\n--- a/google/auth/_cloud_sdk.py\n+++ b/google/auth/_cloud_sdk.py\n@@ -14,11 +14,11 @@\n \n \"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n \n-import io\n+import json\n import os\n+import subprocess\n \n import six\n-from six.moves import configparser\n \n from google.auth import environment_vars\n import google.oauth2.credentials\n@@ -33,9 +33,9 @@\n # The name of the file in the Cloud SDK config that contains default\n # credentials.\n _CREDENTIALS_FILENAME = 'application_default_credentials.json'\n-# The config section and key for the project ID in the cloud SDK config.\n-_PROJECT_CONFIG_SECTION = 'core'\n-_PROJECT_CONFIG_KEY = 'project'\n+# The command to get the Cloud SDK configuration\n+_CLOUD_SDK_CONFIG_COMMAND = (\n+ 'gcloud', 'config', 'config-helper', '--format', 'json')\n \n \n def get_config_path():\n@@ -80,66 +80,6 @@\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n \n \n-def _get_active_config(config_path):\n- \"\"\"Gets the active config for the Cloud SDK.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n-\n- Returns:\n- str: The active configuration name.\n- \"\"\"\n- active_config_filename = os.path.join(config_path, 'active_config')\n-\n- if not os.path.isfile(active_config_filename):\n- return 'default'\n-\n- with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n- active_config_name = file_obj.read().strip()\n-\n- return active_config_name\n-\n-\n-def _get_config_file(config_path, config_name):\n- \"\"\"Returns the full path to a configuration's config file.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n- config_name (str): The configuration name.\n-\n- Returns:\n- str: The config file path.\n- \"\"\"\n- return os.path.join(\n- config_path, 'configurations', 'config_{}'.format(config_name))\n-\n-\n-def get_project_id():\n- \"\"\"Gets the project ID from the Cloud SDK's configuration.\n-\n- Returns:\n- Optional[str]: The project ID.\n- \"\"\"\n- config_path = get_config_path()\n- active_config = _get_active_config(config_path)\n- config_file = _get_config_file(config_path, active_config)\n-\n- if not os.path.isfile(config_file):\n- return None\n-\n- config = configparser.RawConfigParser()\n-\n- try:\n- config.read(config_file)\n-\n- if config.has_section(_PROJECT_CONFIG_SECTION):\n- return config.get(\n- _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n-\n- except configparser.Error:\n- return None\n-\n-\n def load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n \n@@ -166,3 +106,28 @@\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n+\n+\n+def get_project_id():\n+ \"\"\"Gets the project ID from the Cloud SDK.\n+\n+ Returns:\n+ Optional[str]: The project ID.\n+ \"\"\"\n+\n+ try:\n+ output = subprocess.check_output(\n+ _CLOUD_SDK_CONFIG_COMMAND,\n+ stderr=subprocess.STDOUT)\n+ except (subprocess.CalledProcessError, OSError, IOError):\n+ return None\n+\n+ try:\n+ configuration = json.loads(output.decode('utf-8'))\n+ except ValueError:\n+ return None\n+\n+ try:\n+ return configuration['configuration']['properties']['core']['project']\n+ except KeyError:\n+ return None\n", "issue": "Stop reading Cloud SDK's configuration to obtain the project ID\nInstead, we should subprocess out to `gcloud config config-helper --format json`\r\n\r\nExample output:\r\n\r\n```\r\n{\r\n \"configuration\": {\r\n \"active_configuration\": \"default\",\r\n \"properties\": {\r\n \"core\": {\r\n \"account\": \"[ELIDED]\",\r\n \"disable_usage_reporting\": \"False\",\r\n \"project\": \"[ELIDED]\"\r\n }\r\n }\r\n },\r\n \"credential\": {\r\n \"access_token\": \"[ELIDED]\",\r\n \"token_expiry\": \"2017-03-23T23:09:49Z\"\r\n },\r\n \"sentinels\": {\r\n \"config_sentinel\": \"/Users/jonwayne/.config/gcloud/config_sentinel\"\r\n }\r\n}\r\n```\r\n\r\nNote: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials.\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport io\nimport os\n\nimport six\nfrom six.moves import configparser\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The config section and key for the project ID in the cloud SDK config.\n_PROJECT_CONFIG_SECTION = 'core'\n_PROJECT_CONFIG_KEY = 'project'\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef _get_active_config(config_path):\n \"\"\"Gets the active config for the Cloud SDK.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n\n Returns:\n str: The active configuration name.\n \"\"\"\n active_config_filename = os.path.join(config_path, 'active_config')\n\n if not os.path.isfile(active_config_filename):\n return 'default'\n\n with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n active_config_name = file_obj.read().strip()\n\n return active_config_name\n\n\ndef _get_config_file(config_path, config_name):\n \"\"\"Returns the full path to a configuration's config file.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n config_name (str): The configuration name.\n\n Returns:\n str: The config file path.\n \"\"\"\n return os.path.join(\n config_path, 'configurations', 'config_{}'.format(config_name))\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK's configuration.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n config_path = get_config_path()\n active_config = _get_active_config(config_path)\n config_file = _get_config_file(config_path, active_config)\n\n if not os.path.isfile(config_file):\n return None\n\n config = configparser.RawConfigParser()\n\n try:\n config.read(config_file)\n\n if config.has_section(_PROJECT_CONFIG_SECTION):\n return config.get(\n _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n\n except configparser.Error:\n return None\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n", "path": "google/auth/_cloud_sdk.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport json\nimport os\nimport subprocess\n\nimport six\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The command to get the Cloud SDK configuration\n_CLOUD_SDK_CONFIG_COMMAND = (\n 'gcloud', 'config', 'config-helper', '--format', 'json')\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n\n try:\n output = subprocess.check_output(\n _CLOUD_SDK_CONFIG_COMMAND,\n stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError, IOError):\n return None\n\n try:\n configuration = json.loads(output.decode('utf-8'))\n except ValueError:\n return None\n\n try:\n return configuration['configuration']['properties']['core']['project']\n except KeyError:\n return None\n", "path": "google/auth/_cloud_sdk.py"}]}
2,033
837
gh_patches_debug_36535
rasdani/github-patches
git_diff
pyro-ppl__pyro-2123
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Predictive class fails for non-scalar sites I usually use raw poutines to for prediction: ```py vectorize = pyro.plate("_vectorize", num_samples, dim=-2) trace = poutine.trace(vectorize(guide)).get_trace() samples = poutine.replay(vectorize(model), trace)() ``` When instead I try the new `Predictive` class, this fails on models with nontrivial plate structure: ```py predictive = Predictive(model, guide=guide, return_sites=["x", "y"], num_samples=num_samples, parallel=True) samples = predictive.get_samples() ``` I believe this is due to reshaping of posterior samples without regard to their event shape. I don't know why `_predictive()` even needs to reshape samples. I have added a failing test in this branch. Feel free to take over the branch and create a PR: https://github.com/pyro-ppl/pyro/pull/new/fix-preditive-shapes --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyro/infer/predictive.py` Content: ``` 1 from functools import reduce 2 import warnings 3 4 import torch 5 6 import pyro 7 import pyro.poutine as poutine 8 from pyro.poutine.util import prune_subsample_sites 9 10 11 def _guess_max_plate_nesting(model, args, kwargs): 12 """ 13 Guesses max_plate_nesting by running the model once 14 without enumeration. This optimistically assumes static model 15 structure. 16 """ 17 with poutine.block(): 18 model_trace = poutine.trace(model).get_trace(*args, **kwargs) 19 sites = [site for site in model_trace.nodes.values() 20 if site["type"] == "sample"] 21 22 dims = [frame.dim 23 for site in sites 24 for frame in site["cond_indep_stack"] 25 if frame.vectorized] 26 max_plate_nesting = -min(dims) if dims else 0 27 return max_plate_nesting 28 29 30 def _predictive_sequential(model, posterior_samples, model_args, model_kwargs, 31 num_samples, sample_sites, return_trace=False): 32 collected = [] 33 samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)] 34 for i in range(num_samples): 35 trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs) 36 if return_trace: 37 collected.append(trace) 38 else: 39 collected.append({site: trace.nodes[site]['value'] for site in sample_sites}) 40 41 return collected if return_trace else {site: torch.stack([s[site] for s in collected]) 42 for site in sample_sites} 43 44 45 def _predictive(model, posterior_samples, num_samples, return_sites=None, 46 return_trace=False, parallel=False, model_args=(), model_kwargs={}): 47 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs) 48 vectorize = pyro.plate("_num_predictive_samples", num_samples, dim=-max_plate_nesting-1) 49 model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*model_args, **model_kwargs)) 50 reshaped_samples = {} 51 52 for name, sample in posterior_samples.items(): 53 sample_shape = sample.shape[1:] 54 sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape) 55 reshaped_samples[name] = sample 56 57 if return_trace: 58 trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\ 59 .get_trace(*model_args, **model_kwargs) 60 return trace 61 62 return_site_shapes = {} 63 for site in model_trace.stochastic_nodes + model_trace.observation_nodes: 64 site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape 65 if isinstance(return_sites, (list, tuple, set)): 66 if site in return_sites: 67 return_site_shapes[site] = site_shape 68 else: 69 if (return_sites is not None) or (site not in reshaped_samples): 70 return_site_shapes[site] = site_shape 71 72 # handle _RETURN site 73 if isinstance(return_sites, (list, tuple, set)) and '_RETURN' in return_sites: 74 value = model_trace.nodes['_RETURN']['value'] 75 shape = (num_samples,) + value.shape if torch.is_tensor(value) else None 76 return_site_shapes['_RETURN'] = shape 77 78 if not parallel: 79 return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples, 80 return_site_shapes.keys(), return_trace=False) 81 82 trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\ 83 .get_trace(*model_args, **model_kwargs) 84 predictions = {} 85 for site, shape in return_site_shapes.items(): 86 value = trace.nodes[site]['value'] 87 if site == '_RETURN' and shape is None: 88 predictions[site] = value 89 continue 90 if value.numel() < reduce((lambda x, y: x * y), shape): 91 predictions[site] = value.expand(shape) 92 else: 93 predictions[site] = value.reshape(shape) 94 95 return predictions 96 97 98 class Predictive: 99 """ 100 This class is used to construct predictive distribution. The predictive distribution is obtained 101 by running model conditioned on latent samples from `posterior_samples`. 102 103 .. warning:: 104 The interface for the :class:`Predictive` class is experimental, and 105 might change in the future. 106 107 :param model: Python callable containing Pyro primitives. 108 :param dict posterior_samples: dictionary of samples from the posterior. 109 :param callable guide: optional guide to get posterior samples of sites not present 110 in `posterior_samples`. 111 :param int num_samples: number of samples to draw from the predictive distribution. 112 This argument has no effect if ``posterior_samples`` is non-empty, in which case, 113 the leading dimension size of samples in ``posterior_samples`` is used. 114 :param return_sites: sites to return; by default only sample sites not present 115 in `posterior_samples` are returned. 116 :type return_sites: list, tuple, or set 117 :param bool parallel: predict in parallel by wrapping the existing model 118 in an outermost `plate` messenger. Note that this requires that the model has 119 all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`. 120 """ 121 def __init__(self, model, posterior_samples=None, guide=None, num_samples=None, 122 return_sites=None, parallel=False): 123 if posterior_samples is None and num_samples is None: 124 raise ValueError("Either posterior_samples or num_samples must be specified.") 125 126 posterior_samples = {} if posterior_samples is None else posterior_samples 127 128 for name, sample in posterior_samples.items(): 129 batch_size = sample.shape[0] 130 if num_samples is None: 131 num_samples = batch_size 132 elif num_samples != batch_size: 133 warnings.warn("Sample's leading dimension size {} is different from the " 134 "provided {} num_samples argument. Defaulting to {}." 135 .format(batch_size, num_samples, batch_size), UserWarning) 136 num_samples = batch_size 137 138 if num_samples is None: 139 raise ValueError("No sample sites in posterior samples to infer `num_samples`.") 140 141 if return_sites is not None: 142 assert isinstance(return_sites, (list, tuple, set)) 143 144 self.model = model 145 self.posterior_samples = {} if posterior_samples is None else posterior_samples 146 self.num_samples = num_samples 147 self.guide = guide 148 self.return_sites = return_sites 149 self.parallel = parallel 150 151 def get_samples(self, *args, **kwargs): 152 """ 153 Returns dict of samples from the predictive distribution. By default, only sample sites not 154 contained in `posterior_samples` are returned. This can be modified by changing the 155 `return_sites` keyword argument of this :class:`Predictive` instance. 156 157 :param args: model arguments. 158 :param kwargs: model keyword arguments. 159 """ 160 posterior_samples = self.posterior_samples 161 if self.guide is not None: 162 # use return_sites='' as a special signal to return all sites 163 posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples, 164 return_sites='', parallel=self.parallel, 165 model_args=args, model_kwargs=kwargs) 166 return _predictive(self.model, posterior_samples, self.num_samples, 167 return_sites=self.return_sites, parallel=self.parallel, 168 model_args=args, model_kwargs=kwargs) 169 170 def get_vectorized_trace(self, *args, **kwargs): 171 """ 172 Returns a single vectorized `trace` from the predictive distribution. Note that this 173 requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`. 174 175 :param args: model arguments. 176 :param kwargs: model keyword arguments. 177 """ 178 posterior_samples = self.posterior_samples 179 if self.guide is not None: 180 # use return_sites='' as a special signal to return all sites 181 posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples, 182 return_sites='', parallel=self.parallel, 183 model_args=args, model_kwargs=kwargs) 184 return _predictive(self.model, posterior_samples, self.num_samples, 185 return_trace=True, model_args=args, model_kwargs=kwargs) 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py --- a/pyro/infer/predictive.py +++ b/pyro/infer/predictive.py @@ -28,7 +28,7 @@ def _predictive_sequential(model, posterior_samples, model_args, model_kwargs, - num_samples, sample_sites, return_trace=False): + num_samples, return_site_shapes, return_trace=False): collected = [] samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)] for i in range(num_samples): @@ -36,10 +36,13 @@ if return_trace: collected.append(trace) else: - collected.append({site: trace.nodes[site]['value'] for site in sample_sites}) + collected.append({site: trace.nodes[site]['value'] for site in return_site_shapes}) - return collected if return_trace else {site: torch.stack([s[site] for s in collected]) - for site in sample_sites} + if return_trace: + return collected + else: + return {site: torch.stack([s[site] for s in collected]).reshape(shape) + for site, shape in return_site_shapes.items()} def _predictive(model, posterior_samples, num_samples, return_sites=None, @@ -61,7 +64,8 @@ return_site_shapes = {} for site in model_trace.stochastic_nodes + model_trace.observation_nodes: - site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape + append_ndim = max_plate_nesting - len(model_trace.nodes[site]["fn"].batch_shape) + site_shape = (num_samples,) + (1,) * append_ndim + model_trace.nodes[site]['value'].shape if isinstance(return_sites, (list, tuple, set)): if site in return_sites: return_site_shapes[site] = site_shape @@ -77,7 +81,7 @@ if not parallel: return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples, - return_site_shapes.keys(), return_trace=False) + return_site_shapes, return_trace=False) trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\ .get_trace(*model_args, **model_kwargs)
{"golden_diff": "diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py\n--- a/pyro/infer/predictive.py\n+++ b/pyro/infer/predictive.py\n@@ -28,7 +28,7 @@\n \n \n def _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n- num_samples, sample_sites, return_trace=False):\n+ num_samples, return_site_shapes, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n@@ -36,10 +36,13 @@\n if return_trace:\n collected.append(trace)\n else:\n- collected.append({site: trace.nodes[site]['value'] for site in sample_sites})\n+ collected.append({site: trace.nodes[site]['value'] for site in return_site_shapes})\n \n- return collected if return_trace else {site: torch.stack([s[site] for s in collected])\n- for site in sample_sites}\n+ if return_trace:\n+ return collected\n+ else:\n+ return {site: torch.stack([s[site] for s in collected]).reshape(shape)\n+ for site, shape in return_site_shapes.items()}\n \n \n def _predictive(model, posterior_samples, num_samples, return_sites=None,\n@@ -61,7 +64,8 @@\n \n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n- site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape\n+ append_ndim = max_plate_nesting - len(model_trace.nodes[site][\"fn\"].batch_shape)\n+ site_shape = (num_samples,) + (1,) * append_ndim + model_trace.nodes[site]['value'].shape\n if isinstance(return_sites, (list, tuple, set)):\n if site in return_sites:\n return_site_shapes[site] = site_shape\n@@ -77,7 +81,7 @@\n \n if not parallel:\n return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,\n- return_site_shapes.keys(), return_trace=False)\n+ return_site_shapes, return_trace=False)\n \n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n", "issue": "Predictive class fails for non-scalar sites\nI usually use raw poutines to for prediction:\r\n```py\r\nvectorize = pyro.plate(\"_vectorize\", num_samples, dim=-2)\r\ntrace = poutine.trace(vectorize(guide)).get_trace()\r\nsamples = poutine.replay(vectorize(model), trace)()\r\n```\r\nWhen instead I try the new `Predictive` class, this fails on models with nontrivial plate structure:\r\n```py\r\npredictive = Predictive(model, guide=guide, return_sites=[\"x\", \"y\"],\r\n num_samples=num_samples, parallel=True)\r\nsamples = predictive.get_samples()\r\n```\r\nI believe this is due to reshaping of posterior samples without regard to their event shape. I don't know why `_predictive()` even needs to reshape samples.\r\n\r\nI have added a failing test in this branch. Feel free to take over the branch and create a PR:\r\nhttps://github.com/pyro-ppl/pyro/pull/new/fix-preditive-shapes\n", "before_files": [{"content": "from functools import reduce\nimport warnings\n\nimport torch\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.poutine.util import prune_subsample_sites\n\n\ndef _guess_max_plate_nesting(model, args, kwargs):\n \"\"\"\n Guesses max_plate_nesting by running the model once\n without enumeration. This optimistically assumes static model\n structure.\n \"\"\"\n with poutine.block():\n model_trace = poutine.trace(model).get_trace(*args, **kwargs)\n sites = [site for site in model_trace.nodes.values()\n if site[\"type\"] == \"sample\"]\n\n dims = [frame.dim\n for site in sites\n for frame in site[\"cond_indep_stack\"]\n if frame.vectorized]\n max_plate_nesting = -min(dims) if dims else 0\n return max_plate_nesting\n\n\ndef _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n num_samples, sample_sites, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs)\n if return_trace:\n collected.append(trace)\n else:\n collected.append({site: trace.nodes[site]['value'] for site in sample_sites})\n\n return collected if return_trace else {site: torch.stack([s[site] for s in collected])\n for site in sample_sites}\n\n\ndef _predictive(model, posterior_samples, num_samples, return_sites=None,\n return_trace=False, parallel=False, model_args=(), model_kwargs={}):\n max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)\n vectorize = pyro.plate(\"_num_predictive_samples\", num_samples, dim=-max_plate_nesting-1)\n model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*model_args, **model_kwargs))\n reshaped_samples = {}\n\n for name, sample in posterior_samples.items():\n sample_shape = sample.shape[1:]\n sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape)\n reshaped_samples[name] = sample\n\n if return_trace:\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n return trace\n\n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape\n if isinstance(return_sites, (list, tuple, set)):\n if site in return_sites:\n return_site_shapes[site] = site_shape\n else:\n if (return_sites is not None) or (site not in reshaped_samples):\n return_site_shapes[site] = site_shape\n\n # handle _RETURN site\n if isinstance(return_sites, (list, tuple, set)) and '_RETURN' in return_sites:\n value = model_trace.nodes['_RETURN']['value']\n shape = (num_samples,) + value.shape if torch.is_tensor(value) else None\n return_site_shapes['_RETURN'] = shape\n\n if not parallel:\n return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,\n return_site_shapes.keys(), return_trace=False)\n\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n predictions = {}\n for site, shape in return_site_shapes.items():\n value = trace.nodes[site]['value']\n if site == '_RETURN' and shape is None:\n predictions[site] = value\n continue\n if value.numel() < reduce((lambda x, y: x * y), shape):\n predictions[site] = value.expand(shape)\n else:\n predictions[site] = value.reshape(shape)\n\n return predictions\n\n\nclass Predictive:\n \"\"\"\n This class is used to construct predictive distribution. The predictive distribution is obtained\n by running model conditioned on latent samples from `posterior_samples`.\n\n .. warning::\n The interface for the :class:`Predictive` class is experimental, and\n might change in the future.\n\n :param model: Python callable containing Pyro primitives.\n :param dict posterior_samples: dictionary of samples from the posterior.\n :param callable guide: optional guide to get posterior samples of sites not present\n in `posterior_samples`.\n :param int num_samples: number of samples to draw from the predictive distribution.\n This argument has no effect if ``posterior_samples`` is non-empty, in which case,\n the leading dimension size of samples in ``posterior_samples`` is used.\n :param return_sites: sites to return; by default only sample sites not present\n in `posterior_samples` are returned.\n :type return_sites: list, tuple, or set\n :param bool parallel: predict in parallel by wrapping the existing model\n in an outermost `plate` messenger. Note that this requires that the model has\n all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.\n \"\"\"\n def __init__(self, model, posterior_samples=None, guide=None, num_samples=None,\n return_sites=None, parallel=False):\n if posterior_samples is None and num_samples is None:\n raise ValueError(\"Either posterior_samples or num_samples must be specified.\")\n\n posterior_samples = {} if posterior_samples is None else posterior_samples\n\n for name, sample in posterior_samples.items():\n batch_size = sample.shape[0]\n if num_samples is None:\n num_samples = batch_size\n elif num_samples != batch_size:\n warnings.warn(\"Sample's leading dimension size {} is different from the \"\n \"provided {} num_samples argument. Defaulting to {}.\"\n .format(batch_size, num_samples, batch_size), UserWarning)\n num_samples = batch_size\n\n if num_samples is None:\n raise ValueError(\"No sample sites in posterior samples to infer `num_samples`.\")\n\n if return_sites is not None:\n assert isinstance(return_sites, (list, tuple, set))\n\n self.model = model\n self.posterior_samples = {} if posterior_samples is None else posterior_samples\n self.num_samples = num_samples\n self.guide = guide\n self.return_sites = return_sites\n self.parallel = parallel\n\n def get_samples(self, *args, **kwargs):\n \"\"\"\n Returns dict of samples from the predictive distribution. By default, only sample sites not\n contained in `posterior_samples` are returned. This can be modified by changing the\n `return_sites` keyword argument of this :class:`Predictive` instance.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_sites=self.return_sites, parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n\n def get_vectorized_trace(self, *args, **kwargs):\n \"\"\"\n Returns a single vectorized `trace` from the predictive distribution. Note that this\n requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_trace=True, model_args=args, model_kwargs=kwargs)\n", "path": "pyro/infer/predictive.py"}], "after_files": [{"content": "from functools import reduce\nimport warnings\n\nimport torch\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.poutine.util import prune_subsample_sites\n\n\ndef _guess_max_plate_nesting(model, args, kwargs):\n \"\"\"\n Guesses max_plate_nesting by running the model once\n without enumeration. This optimistically assumes static model\n structure.\n \"\"\"\n with poutine.block():\n model_trace = poutine.trace(model).get_trace(*args, **kwargs)\n sites = [site for site in model_trace.nodes.values()\n if site[\"type\"] == \"sample\"]\n\n dims = [frame.dim\n for site in sites\n for frame in site[\"cond_indep_stack\"]\n if frame.vectorized]\n max_plate_nesting = -min(dims) if dims else 0\n return max_plate_nesting\n\n\ndef _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n num_samples, return_site_shapes, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs)\n if return_trace:\n collected.append(trace)\n else:\n collected.append({site: trace.nodes[site]['value'] for site in return_site_shapes})\n\n if return_trace:\n return collected\n else:\n return {site: torch.stack([s[site] for s in collected]).reshape(shape)\n for site, shape in return_site_shapes.items()}\n\n\ndef _predictive(model, posterior_samples, num_samples, return_sites=None,\n return_trace=False, parallel=False, model_args=(), model_kwargs={}):\n max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)\n vectorize = pyro.plate(\"_num_predictive_samples\", num_samples, dim=-max_plate_nesting-1)\n model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*model_args, **model_kwargs))\n reshaped_samples = {}\n\n for name, sample in posterior_samples.items():\n sample_shape = sample.shape[1:]\n sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape)\n reshaped_samples[name] = sample\n\n if return_trace:\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n return trace\n\n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n append_ndim = max_plate_nesting - len(model_trace.nodes[site][\"fn\"].batch_shape)\n site_shape = (num_samples,) + (1,) * append_ndim + model_trace.nodes[site]['value'].shape\n if isinstance(return_sites, (list, tuple, set)):\n if site in return_sites:\n return_site_shapes[site] = site_shape\n else:\n if (return_sites is not None) or (site not in reshaped_samples):\n return_site_shapes[site] = site_shape\n\n # handle _RETURN site\n if isinstance(return_sites, (list, tuple, set)) and '_RETURN' in return_sites:\n value = model_trace.nodes['_RETURN']['value']\n shape = (num_samples,) + value.shape if torch.is_tensor(value) else None\n return_site_shapes['_RETURN'] = shape\n\n if not parallel:\n return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,\n return_site_shapes, return_trace=False)\n\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n predictions = {}\n for site, shape in return_site_shapes.items():\n value = trace.nodes[site]['value']\n if site == '_RETURN' and shape is None:\n predictions[site] = value\n continue\n if value.numel() < reduce((lambda x, y: x * y), shape):\n predictions[site] = value.expand(shape)\n else:\n predictions[site] = value.reshape(shape)\n\n return predictions\n\n\nclass Predictive:\n \"\"\"\n This class is used to construct predictive distribution. The predictive distribution is obtained\n by running model conditioned on latent samples from `posterior_samples`.\n\n .. warning::\n The interface for the :class:`Predictive` class is experimental, and\n might change in the future.\n\n :param model: Python callable containing Pyro primitives.\n :param dict posterior_samples: dictionary of samples from the posterior.\n :param callable guide: optional guide to get posterior samples of sites not present\n in `posterior_samples`.\n :param int num_samples: number of samples to draw from the predictive distribution.\n This argument has no effect if ``posterior_samples`` is non-empty, in which case,\n the leading dimension size of samples in ``posterior_samples`` is used.\n :param return_sites: sites to return; by default only sample sites not present\n in `posterior_samples` are returned.\n :type return_sites: list, tuple, or set\n :param bool parallel: predict in parallel by wrapping the existing model\n in an outermost `plate` messenger. Note that this requires that the model has\n all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.\n \"\"\"\n def __init__(self, model, posterior_samples=None, guide=None, num_samples=None,\n return_sites=None, parallel=False):\n if posterior_samples is None and num_samples is None:\n raise ValueError(\"Either posterior_samples or num_samples must be specified.\")\n\n posterior_samples = {} if posterior_samples is None else posterior_samples\n\n for name, sample in posterior_samples.items():\n batch_size = sample.shape[0]\n if num_samples is None:\n num_samples = batch_size\n elif num_samples != batch_size:\n warnings.warn(\"Sample's leading dimension size {} is different from the \"\n \"provided {} num_samples argument. Defaulting to {}.\"\n .format(batch_size, num_samples, batch_size), UserWarning)\n num_samples = batch_size\n\n if num_samples is None:\n raise ValueError(\"No sample sites in posterior samples to infer `num_samples`.\")\n\n if return_sites is not None:\n assert isinstance(return_sites, (list, tuple, set))\n\n self.model = model\n self.posterior_samples = {} if posterior_samples is None else posterior_samples\n self.num_samples = num_samples\n self.guide = guide\n self.return_sites = return_sites\n self.parallel = parallel\n\n def get_samples(self, *args, **kwargs):\n \"\"\"\n Returns dict of samples from the predictive distribution. By default, only sample sites not\n contained in `posterior_samples` are returned. This can be modified by changing the\n `return_sites` keyword argument of this :class:`Predictive` instance.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_sites=self.return_sites, parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n\n def get_vectorized_trace(self, *args, **kwargs):\n \"\"\"\n Returns a single vectorized `trace` from the predictive distribution. Note that this\n requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_trace=True, model_args=args, model_kwargs=kwargs)\n", "path": "pyro/infer/predictive.py"}]}
2,696
529
gh_patches_debug_25173
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-3626
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- az postgres up does not create the resource group ## Describe the bug **Tutorial link:** [Deploy a Django web app with PostgreSQL in Azure App Service](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone) It is expected that the `az postgres up` command creates the resource group if it does not already exist, as mentioned in the [doc](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone#3-create-postgres-database-in-azure). Running this command on Azure CLI version **2.22.1** and `db-up` extension version **0.2.3** works as expected. ![image](https://user-images.githubusercontent.com/44050820/121654952-de2b3200-cabb-11eb-9d67-de07c7d20097.png) However, this does not work with the latest Azure CLI version **2.24.2** and `db-up` extension version **0.2.3**. **Command Name** `az postgres up Extension Name: db-up. Version: 0.2.3.` **Errors:** ``` (ResourceGroupNotFound) Resource group 'DjangoPostgres-tutorial-rg-2242' could not be found. ``` ## To Reproduce: Steps to reproduce the behavior. Note that argument values have been redacted, as they may contain sensitive information. - Install Azure CLI version `2.24.2`. - `az postgres up --resource-group {} --location {} --sku-name {} --server-name {} --database-name {} --admin-user {} --admin-password {} --ssl-enforcement {}` ## Expected Behavior The command should create the specified resource group and proceed with provisioning the postgres db server, as shown in the screenshot above. ## Environment Summary ``` Windows-10-10.0.19041-SP0 Python 3.8.9 Installer: MSI azure-cli 2.24.2 azure-cli-ml 1.24.0 Extensions: azure-cli-ml 1.24.0 azure-devops 0.18.0 azure-iot 0.10.9 db-up 0.2.3 mesh 0.10.6 resource-graph 1.1.0 scheduled-query 0.2.0 spring-cloud 1.1.1 ``` ## Additional Context GitHub Issue: https://github.com/MicrosoftDocs/azure-docs/issues/76722 <!--Please don't remove this:--> <!--auto-generated--> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/db-up/setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # -------------------------------------------------------------------------------------------- 4 # Copyright (c) Microsoft Corporation. All rights reserved. 5 # Licensed under the MIT License. See License.txt in the project root for license information. 6 # -------------------------------------------------------------------------------------------- 7 8 from codecs import open 9 from setuptools import setup, find_packages 10 11 VERSION = "0.2.3" 12 13 CLASSIFIERS = [ 14 'Development Status :: 4 - Beta', 15 'Intended Audience :: Developers', 16 'Intended Audience :: System Administrators', 17 'Programming Language :: Python', 18 'Programming Language :: Python :: 3', 19 'Programming Language :: Python :: 3.6', 20 'Programming Language :: Python :: 3.7', 21 'Programming Language :: Python :: 3.8', 22 'License :: OSI Approved :: MIT License', 23 ] 24 25 DEPENDENCIES = [ 26 'Cython==0.29.17', 27 'mysql-connector-python==8.0.14', 28 'psycopg2-binary==2.8.5' 29 ] 30 31 setup( 32 name='db-up', 33 version=VERSION, 34 description='Additional commands to simplify Azure Database workflows.', 35 long_description='An Azure CLI Extension to provide additional DB commands.', 36 license='MIT', 37 author='Microsoft Corporation', 38 author_email='[email protected]', 39 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/db-up', 40 classifiers=CLASSIFIERS, 41 package_data={'azext_db_up': ['azext_metadata.json', 'random_name/*']}, 42 packages=find_packages(exclude=["tests"]), 43 install_requires=DEPENDENCIES 44 ) 45 ``` Path: `src/db-up/azext_db_up/_validators.py` Content: ``` 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 6 # pylint: disable=import-error 7 import uuid 8 from six.moves import configparser 9 from azure.cli.core.commands.validators import get_default_location_from_resource_group 10 from azure.mgmt.resource.resources.models import ResourceGroup 11 from knack.log import get_logger 12 from knack.util import CLIError 13 from msrestazure.azure_exceptions import CloudError 14 from msrest.exceptions import ValidationError 15 from azext_db_up._client_factory import resource_client_factory 16 from azext_db_up.random_name.generate import generate_username 17 from azext_db_up.util import create_random_resource_name, get_config_value, set_config_value, remove_config_value 18 19 logger = get_logger(__name__) 20 21 DEFAULT_LOCATION = 'westus2' 22 DEFAULT_DATABASE_NAME = 'sampledb' 23 24 25 def db_up_namespace_processor(db_type): 26 return lambda cmd, namespace: _process_db_up_namespace(cmd, namespace, db_type=db_type) 27 28 29 def db_down_namespace_processor(db_type): 30 return lambda cmd, namespace: _process_db_down_namespace(namespace, db_type=db_type) 31 32 33 # pylint: disable=bare-except 34 def _process_db_up_namespace(cmd, namespace, db_type=None): 35 # populate from cache if existing when no resource group name provided 36 resource_client = resource_client_factory(cmd.cli_ctx) 37 if namespace.resource_group_name is None: 38 _set_value(db_type, namespace, 'resource_group_name', 'group', cache=False) 39 try: 40 resource_client.resource_groups.get(namespace.resource_group_name) 41 except: # Clear resource group name information when it is invalid 42 namespace.resource_group_name = None 43 44 # populate from cache if existing when no location provided 45 if namespace.location is None: 46 _set_value(db_type, namespace, 'location', 'location', cache=False) 47 # generate smart defaults when namespace.location is None 48 if _get_value(db_type, namespace, 'location', 'location') is None: 49 try: 50 get_default_location_from_resource_group(cmd, namespace) 51 except (CLIError, ValidationError): 52 namespace.location = 'eastus' 53 _set_value(db_type, namespace, 'location', 'location', default=namespace.location) 54 55 # When resource group name in namespace is different from what in cache, reset it and create new server name 56 if namespace.resource_group_name != get_config_value(db_type, 'group', None): 57 set_config_value(db_type, 'group', namespace.resource_group_name) 58 if namespace.server_name is None: 59 namespace.server_name = create_random_resource_name('server') 60 set_config_value(db_type, 'server', namespace.server_name) 61 62 # When no resource group name in namespace and cache, create new resource group with random name 63 create_resource_group = True 64 if namespace.resource_group_name is None: # No resource group provided and in cache 65 namespace.resource_group_name = create_random_resource_name('group') 66 else: 67 try: 68 resource_client.resource_groups.get(namespace.resource_group_name) 69 create_resource_group = False 70 except CloudError: # throw exception when resource group name is invalid 71 pass 72 73 if create_resource_group: 74 # create new resource group 75 params = ResourceGroup(location=namespace.location) 76 logger.warning('Creating Resource Group \'%s\'...', namespace.resource_group_name) 77 resource_client.resource_groups.create_or_update(namespace.resource_group_name, params) 78 _set_value(db_type, namespace, 'resource_group_name', 'group', default=namespace.resource_group_name) 79 80 _set_value(db_type, namespace, 'server_name', 'server', default=create_random_resource_name('server')) 81 _set_value(db_type, namespace, 'administrator_login', 'login', default=generate_username()) 82 if namespace.generate_password: 83 namespace.administrator_login_password = str(uuid.uuid4()) 84 del namespace.generate_password 85 _set_value(db_type, namespace, 'database_name', 'database', default=DEFAULT_DATABASE_NAME) 86 87 88 def _process_db_down_namespace(namespace, db_type=None): 89 # populate from cache if existing 90 if namespace.resource_group_name is None: 91 namespace.resource_group_name = _get_value(db_type, namespace, 'resource_group_name', 'group') 92 remove_config_value(db_type, 'group') 93 if namespace.server_name is None and not namespace.delete_group: 94 namespace.server_name = _get_value(db_type, namespace, 'server_name', 'server') 95 remove_config_value(db_type, 'server') 96 remove_config_value(db_type, 'login') 97 remove_config_value(db_type, 'database') 98 remove_config_value(db_type, 'location') 99 100 # put resource group info back in config if user does not want to delete it 101 if not namespace.delete_group and namespace.resource_group_name: 102 _set_value(db_type, namespace, 'resource_group_name', 'group') 103 104 # error handling 105 if namespace.delete_group and not namespace.resource_group_name: 106 raise CLIError("Please specify the resource group name to delete.") 107 if not namespace.delete_group and not namespace.resource_group_name and not namespace.server_name: 108 raise CLIError("Please specify the {} server name to delete and its resource group name if you only want to " 109 "delete the specific {} server.".format(db_type, db_type)) 110 111 112 def _set_value(db_type, namespace, attribute, option, default=None, cache=True): 113 if getattr(namespace, attribute) is None: 114 try: 115 if get_config_value(db_type, option): 116 setattr(namespace, attribute, get_config_value(db_type, option)) 117 else: 118 setattr(namespace, attribute, default) 119 except (configparser.NoSectionError, configparser.NoOptionError): 120 if default is not None: 121 setattr(namespace, attribute, default) 122 if cache: 123 set_config_value(db_type, option, getattr(namespace, attribute)) 124 125 126 def _get_value(db_type, namespace, attribute, option): 127 return getattr(namespace, attribute, None) or get_config_value(db_type, option, None) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/db-up/azext_db_up/_validators.py b/src/db-up/azext_db_up/_validators.py --- a/src/db-up/azext_db_up/_validators.py +++ b/src/db-up/azext_db_up/_validators.py @@ -48,7 +48,7 @@ if _get_value(db_type, namespace, 'location', 'location') is None: try: get_default_location_from_resource_group(cmd, namespace) - except (CLIError, ValidationError): + except Exception: namespace.location = 'eastus' _set_value(db_type, namespace, 'location', 'location', default=namespace.location) @@ -67,7 +67,7 @@ try: resource_client.resource_groups.get(namespace.resource_group_name) create_resource_group = False - except CloudError: # throw exception when resource group name is invalid + except Exception: # throw exception when resource group name is invalid pass if create_resource_group: diff --git a/src/db-up/setup.py b/src/db-up/setup.py --- a/src/db-up/setup.py +++ b/src/db-up/setup.py @@ -8,7 +8,7 @@ from codecs import open from setuptools import setup, find_packages -VERSION = "0.2.3" +VERSION = "0.2.4" CLASSIFIERS = [ 'Development Status :: 4 - Beta',
{"golden_diff": "diff --git a/src/db-up/azext_db_up/_validators.py b/src/db-up/azext_db_up/_validators.py\n--- a/src/db-up/azext_db_up/_validators.py\n+++ b/src/db-up/azext_db_up/_validators.py\n@@ -48,7 +48,7 @@\n if _get_value(db_type, namespace, 'location', 'location') is None:\n try:\n get_default_location_from_resource_group(cmd, namespace)\n- except (CLIError, ValidationError):\n+ except Exception:\n namespace.location = 'eastus'\n _set_value(db_type, namespace, 'location', 'location', default=namespace.location)\n \n@@ -67,7 +67,7 @@\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n create_resource_group = False\n- except CloudError: # throw exception when resource group name is invalid\n+ except Exception: # throw exception when resource group name is invalid\n pass\n \n if create_resource_group:\ndiff --git a/src/db-up/setup.py b/src/db-up/setup.py\n--- a/src/db-up/setup.py\n+++ b/src/db-up/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.2.3\"\n+VERSION = \"0.2.4\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "az postgres up does not create the resource group\n## Describe the bug\r\n**Tutorial link:** [Deploy a Django web app with PostgreSQL in Azure App Service](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone)\r\n\r\nIt is expected that the `az postgres up` command creates the resource group if it does not already exist, as mentioned in the [doc](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone#3-create-postgres-database-in-azure).\r\n\r\nRunning this command on Azure CLI version **2.22.1** and `db-up` extension version **0.2.3** works as expected.\r\n![image](https://user-images.githubusercontent.com/44050820/121654952-de2b3200-cabb-11eb-9d67-de07c7d20097.png)\r\n\r\nHowever, this does not work with the latest Azure CLI version **2.24.2** and `db-up` extension version **0.2.3**.\r\n\r\n**Command Name**\r\n`az postgres up\r\nExtension Name: db-up. Version: 0.2.3.`\r\n\r\n**Errors:**\r\n```\r\n(ResourceGroupNotFound) Resource group 'DjangoPostgres-tutorial-rg-2242' could not be found.\r\n```\r\n\r\n## To Reproduce:\r\nSteps to reproduce the behavior. Note that argument values have been redacted, as they may contain sensitive information.\r\n\r\n- Install Azure CLI version `2.24.2`.\r\n- `az postgres up --resource-group {} --location {} --sku-name {} --server-name {} --database-name {} --admin-user {} --admin-password {} --ssl-enforcement {}`\r\n\r\n## Expected Behavior\r\nThe command should create the specified resource group and proceed with provisioning the postgres db server, as shown in the screenshot above.\r\n\r\n## Environment Summary\r\n```\r\nWindows-10-10.0.19041-SP0\r\nPython 3.8.9\r\nInstaller: MSI\r\n\r\nazure-cli 2.24.2\r\nazure-cli-ml 1.24.0\r\n\r\nExtensions:\r\nazure-cli-ml 1.24.0\r\nazure-devops 0.18.0\r\nazure-iot 0.10.9\r\ndb-up 0.2.3\r\nmesh 0.10.6\r\nresource-graph 1.1.0\r\nscheduled-query 0.2.0\r\nspring-cloud 1.1.1\r\n\r\n```\r\n## Additional Context\r\nGitHub Issue: https://github.com/MicrosoftDocs/azure-docs/issues/76722\r\n\r\n<!--Please don't remove this:-->\r\n<!--auto-generated-->\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.2.3\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = [\n 'Cython==0.29.17',\n 'mysql-connector-python==8.0.14',\n 'psycopg2-binary==2.8.5'\n]\n\nsetup(\n name='db-up',\n version=VERSION,\n description='Additional commands to simplify Azure Database workflows.',\n long_description='An Azure CLI Extension to provide additional DB commands.',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/db-up',\n classifiers=CLASSIFIERS,\n package_data={'azext_db_up': ['azext_metadata.json', 'random_name/*']},\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/db-up/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=import-error\nimport uuid\nfrom six.moves import configparser\nfrom azure.cli.core.commands.validators import get_default_location_from_resource_group\nfrom azure.mgmt.resource.resources.models import ResourceGroup\nfrom knack.log import get_logger\nfrom knack.util import CLIError\nfrom msrestazure.azure_exceptions import CloudError\nfrom msrest.exceptions import ValidationError\nfrom azext_db_up._client_factory import resource_client_factory\nfrom azext_db_up.random_name.generate import generate_username\nfrom azext_db_up.util import create_random_resource_name, get_config_value, set_config_value, remove_config_value\n\nlogger = get_logger(__name__)\n\nDEFAULT_LOCATION = 'westus2'\nDEFAULT_DATABASE_NAME = 'sampledb'\n\n\ndef db_up_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_up_namespace(cmd, namespace, db_type=db_type)\n\n\ndef db_down_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_down_namespace(namespace, db_type=db_type)\n\n\n# pylint: disable=bare-except\ndef _process_db_up_namespace(cmd, namespace, db_type=None):\n # populate from cache if existing when no resource group name provided\n resource_client = resource_client_factory(cmd.cli_ctx)\n if namespace.resource_group_name is None:\n _set_value(db_type, namespace, 'resource_group_name', 'group', cache=False)\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n except: # Clear resource group name information when it is invalid\n namespace.resource_group_name = None\n\n # populate from cache if existing when no location provided\n if namespace.location is None:\n _set_value(db_type, namespace, 'location', 'location', cache=False)\n # generate smart defaults when namespace.location is None\n if _get_value(db_type, namespace, 'location', 'location') is None:\n try:\n get_default_location_from_resource_group(cmd, namespace)\n except (CLIError, ValidationError):\n namespace.location = 'eastus'\n _set_value(db_type, namespace, 'location', 'location', default=namespace.location)\n\n # When resource group name in namespace is different from what in cache, reset it and create new server name\n if namespace.resource_group_name != get_config_value(db_type, 'group', None):\n set_config_value(db_type, 'group', namespace.resource_group_name)\n if namespace.server_name is None:\n namespace.server_name = create_random_resource_name('server')\n set_config_value(db_type, 'server', namespace.server_name)\n\n # When no resource group name in namespace and cache, create new resource group with random name\n create_resource_group = True\n if namespace.resource_group_name is None: # No resource group provided and in cache\n namespace.resource_group_name = create_random_resource_name('group')\n else:\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n create_resource_group = False\n except CloudError: # throw exception when resource group name is invalid\n pass\n\n if create_resource_group:\n # create new resource group\n params = ResourceGroup(location=namespace.location)\n logger.warning('Creating Resource Group \\'%s\\'...', namespace.resource_group_name)\n resource_client.resource_groups.create_or_update(namespace.resource_group_name, params)\n _set_value(db_type, namespace, 'resource_group_name', 'group', default=namespace.resource_group_name)\n\n _set_value(db_type, namespace, 'server_name', 'server', default=create_random_resource_name('server'))\n _set_value(db_type, namespace, 'administrator_login', 'login', default=generate_username())\n if namespace.generate_password:\n namespace.administrator_login_password = str(uuid.uuid4())\n del namespace.generate_password\n _set_value(db_type, namespace, 'database_name', 'database', default=DEFAULT_DATABASE_NAME)\n\n\ndef _process_db_down_namespace(namespace, db_type=None):\n # populate from cache if existing\n if namespace.resource_group_name is None:\n namespace.resource_group_name = _get_value(db_type, namespace, 'resource_group_name', 'group')\n remove_config_value(db_type, 'group')\n if namespace.server_name is None and not namespace.delete_group:\n namespace.server_name = _get_value(db_type, namespace, 'server_name', 'server')\n remove_config_value(db_type, 'server')\n remove_config_value(db_type, 'login')\n remove_config_value(db_type, 'database')\n remove_config_value(db_type, 'location')\n\n # put resource group info back in config if user does not want to delete it\n if not namespace.delete_group and namespace.resource_group_name:\n _set_value(db_type, namespace, 'resource_group_name', 'group')\n\n # error handling\n if namespace.delete_group and not namespace.resource_group_name:\n raise CLIError(\"Please specify the resource group name to delete.\")\n if not namespace.delete_group and not namespace.resource_group_name and not namespace.server_name:\n raise CLIError(\"Please specify the {} server name to delete and its resource group name if you only want to \"\n \"delete the specific {} server.\".format(db_type, db_type))\n\n\ndef _set_value(db_type, namespace, attribute, option, default=None, cache=True):\n if getattr(namespace, attribute) is None:\n try:\n if get_config_value(db_type, option):\n setattr(namespace, attribute, get_config_value(db_type, option))\n else:\n setattr(namespace, attribute, default)\n except (configparser.NoSectionError, configparser.NoOptionError):\n if default is not None:\n setattr(namespace, attribute, default)\n if cache:\n set_config_value(db_type, option, getattr(namespace, attribute))\n\n\ndef _get_value(db_type, namespace, attribute, option):\n return getattr(namespace, attribute, None) or get_config_value(db_type, option, None)\n", "path": "src/db-up/azext_db_up/_validators.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.2.4\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = [\n 'Cython==0.29.17',\n 'mysql-connector-python==8.0.14',\n 'psycopg2-binary==2.8.5'\n]\n\nsetup(\n name='db-up',\n version=VERSION,\n description='Additional commands to simplify Azure Database workflows.',\n long_description='An Azure CLI Extension to provide additional DB commands.',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/db-up',\n classifiers=CLASSIFIERS,\n package_data={'azext_db_up': ['azext_metadata.json', 'random_name/*']},\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/db-up/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=import-error\nimport uuid\nfrom six.moves import configparser\nfrom azure.cli.core.commands.validators import get_default_location_from_resource_group\nfrom azure.mgmt.resource.resources.models import ResourceGroup\nfrom knack.log import get_logger\nfrom knack.util import CLIError\nfrom msrestazure.azure_exceptions import CloudError\nfrom msrest.exceptions import ValidationError\nfrom azext_db_up._client_factory import resource_client_factory\nfrom azext_db_up.random_name.generate import generate_username\nfrom azext_db_up.util import create_random_resource_name, get_config_value, set_config_value, remove_config_value\n\nlogger = get_logger(__name__)\n\nDEFAULT_LOCATION = 'westus2'\nDEFAULT_DATABASE_NAME = 'sampledb'\n\n\ndef db_up_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_up_namespace(cmd, namespace, db_type=db_type)\n\n\ndef db_down_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_down_namespace(namespace, db_type=db_type)\n\n\n# pylint: disable=bare-except\ndef _process_db_up_namespace(cmd, namespace, db_type=None):\n # populate from cache if existing when no resource group name provided\n resource_client = resource_client_factory(cmd.cli_ctx)\n if namespace.resource_group_name is None:\n _set_value(db_type, namespace, 'resource_group_name', 'group', cache=False)\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n except: # Clear resource group name information when it is invalid\n namespace.resource_group_name = None\n\n # populate from cache if existing when no location provided\n if namespace.location is None:\n _set_value(db_type, namespace, 'location', 'location', cache=False)\n # generate smart defaults when namespace.location is None\n if _get_value(db_type, namespace, 'location', 'location') is None:\n try:\n get_default_location_from_resource_group(cmd, namespace)\n except Exception:\n namespace.location = 'eastus'\n _set_value(db_type, namespace, 'location', 'location', default=namespace.location)\n\n # When resource group name in namespace is different from what in cache, reset it and create new server name\n if namespace.resource_group_name != get_config_value(db_type, 'group', None):\n set_config_value(db_type, 'group', namespace.resource_group_name)\n if namespace.server_name is None:\n namespace.server_name = create_random_resource_name('server')\n set_config_value(db_type, 'server', namespace.server_name)\n\n # When no resource group name in namespace and cache, create new resource group with random name\n create_resource_group = True\n if namespace.resource_group_name is None: # No resource group provided and in cache\n namespace.resource_group_name = create_random_resource_name('group')\n else:\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n create_resource_group = False\n except Exception: # throw exception when resource group name is invalid\n pass\n\n if create_resource_group:\n # create new resource group\n params = ResourceGroup(location=namespace.location)\n logger.warning('Creating Resource Group \\'%s\\'...', namespace.resource_group_name)\n resource_client.resource_groups.create_or_update(namespace.resource_group_name, params)\n _set_value(db_type, namespace, 'resource_group_name', 'group', default=namespace.resource_group_name)\n\n _set_value(db_type, namespace, 'server_name', 'server', default=create_random_resource_name('server'))\n _set_value(db_type, namespace, 'administrator_login', 'login', default=generate_username())\n if namespace.generate_password:\n namespace.administrator_login_password = str(uuid.uuid4())\n del namespace.generate_password\n _set_value(db_type, namespace, 'database_name', 'database', default=DEFAULT_DATABASE_NAME)\n\n\ndef _process_db_down_namespace(namespace, db_type=None):\n # populate from cache if existing\n if namespace.resource_group_name is None:\n namespace.resource_group_name = _get_value(db_type, namespace, 'resource_group_name', 'group')\n remove_config_value(db_type, 'group')\n if namespace.server_name is None and not namespace.delete_group:\n namespace.server_name = _get_value(db_type, namespace, 'server_name', 'server')\n remove_config_value(db_type, 'server')\n remove_config_value(db_type, 'login')\n remove_config_value(db_type, 'database')\n remove_config_value(db_type, 'location')\n\n # put resource group info back in config if user does not want to delete it\n if not namespace.delete_group and namespace.resource_group_name:\n _set_value(db_type, namespace, 'resource_group_name', 'group')\n\n # error handling\n if namespace.delete_group and not namespace.resource_group_name:\n raise CLIError(\"Please specify the resource group name to delete.\")\n if not namespace.delete_group and not namespace.resource_group_name and not namespace.server_name:\n raise CLIError(\"Please specify the {} server name to delete and its resource group name if you only want to \"\n \"delete the specific {} server.\".format(db_type, db_type))\n\n\ndef _set_value(db_type, namespace, attribute, option, default=None, cache=True):\n if getattr(namespace, attribute) is None:\n try:\n if get_config_value(db_type, option):\n setattr(namespace, attribute, get_config_value(db_type, option))\n else:\n setattr(namespace, attribute, default)\n except (configparser.NoSectionError, configparser.NoOptionError):\n if default is not None:\n setattr(namespace, attribute, default)\n if cache:\n set_config_value(db_type, option, getattr(namespace, attribute))\n\n\ndef _get_value(db_type, namespace, attribute, option):\n return getattr(namespace, attribute, None) or get_config_value(db_type, option, None)\n", "path": "src/db-up/azext_db_up/_validators.py"}]}
2,844
310
gh_patches_debug_67107
rasdani/github-patches
git_diff
beeware__toga-1078
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- toga-winforms\windows.py openFileDialog needs an "s" **Describe the bug** When using multiselect = True in winforms the returned value is a single file because if dialog.ShowDialog() == WinForms.DialogResult.OK: return dialog.FileName should be if dialog.ShowDialog() == WinForms.DialogResult.OK: return dialog.FileName**s** **To Reproduce** fname = self.main_window.open_file_dialog( title="Open the file", multiselect=True ) **Expected behavior** returns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list. **Environment:** - Operating System: Mac OS 10.15.6 & Windows 10 - Python version: 3.8 - Software versions: - Briefcase: 0.3.3 - Toga: 0.3.0 dev23 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/winforms/toga_winforms/window.py` Content: ``` 1 from toga import GROUP_BREAK, SECTION_BREAK 2 3 from .libs import Size, WinForms 4 5 6 class WinFormsViewport: 7 def __init__(self, native, frame): 8 self.native = native 9 self.frame = frame 10 self.baseline_dpi = 96 11 12 @property 13 def width(self): 14 # Treat `native=None` as a 0x0 viewport 15 if self.native is None: 16 return 0 17 return self.native.ClientSize.Width 18 19 @property 20 def height(self): 21 if self.native is None: 22 return 0 23 # Subtract any vertical shift of the frame. This is to allow 24 # for toolbars, or any other viewport-level decoration. 25 return self.native.ClientSize.Height - self.frame.vertical_shift 26 27 @property 28 def dpi(self): 29 if self.native is None: 30 return self.baseline_dpi 31 return self.native.CreateGraphics().DpiX 32 33 34 class Window: 35 def __init__(self, interface): 36 self.interface = interface 37 self.interface._impl = self 38 self.create() 39 40 def create(self): 41 self.native = WinForms.Form(self) 42 self.native.ClientSize = Size(*self.interface._size) 43 self.native.interface = self.interface 44 self.native.Resize += self.winforms_resize 45 self.toolbar_native = None 46 self.toolbar_items = None 47 48 def create_toolbar(self): 49 self.toolbar_native = WinForms.ToolStrip() 50 for cmd in self.interface.toolbar: 51 if cmd == GROUP_BREAK: 52 item = WinForms.ToolStripSeparator() 53 elif cmd == SECTION_BREAK: 54 item = WinForms.ToolStripSeparator() 55 else: 56 if cmd.icon is not None: 57 native_icon = cmd.icon._impl.native 58 item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap()) 59 else: 60 item = WinForms.ToolStripMenuItem(cmd.label) 61 item.Click += cmd._impl.as_handler() 62 cmd._impl.native.append(item) 63 self.toolbar_native.Items.Add(item) 64 65 def set_position(self, position): 66 pass 67 68 def set_size(self, size): 69 self.native.ClientSize = Size(*self.interface._size) 70 71 def set_app(self, app): 72 if app is None: 73 return 74 icon_impl = app.interface.icon._impl 75 if icon_impl is None: 76 return 77 self.native.Icon = icon_impl.native 78 79 @property 80 def vertical_shift(self): 81 # vertical shift is the toolbar height or 0 82 result = 0 83 try: 84 result += self.native.interface._impl.toolbar_native.Height 85 except AttributeError: 86 pass 87 try: 88 result += self.native.interface._impl.native.MainMenuStrip.Height 89 except AttributeError: 90 pass 91 return result 92 93 def set_content(self, widget): 94 if self.toolbar_native: 95 self.native.Controls.Add(self.toolbar_native) 96 # Create the lookup table of menu items, 97 # then force the creation of the menus. 98 self.native.Controls.Add(widget.native) 99 100 # Set the widget's viewport to be based on the window's content. 101 widget.viewport = WinFormsViewport(native=self.native, frame=self) 102 widget.frame = self 103 104 # Add all children to the content widget. 105 for child in widget.interface.children: 106 child._impl.container = widget 107 108 def set_title(self, title): 109 self.native.Text = title 110 111 def show(self): 112 # The first render of the content will establish the 113 # minimum possible content size; use that to enforce 114 # a minimum window size. 115 TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight 116 # Now that the content is visible, we can do our initial hinting, 117 # and use that as the basis for setting the minimum window size. 118 self.interface.content._impl.rehint() 119 self.interface.content.style.layout( 120 self.interface.content, 121 WinFormsViewport(native=None, frame=None), 122 ) 123 self.native.MinimumSize = Size( 124 int(self.interface.content.layout.width), 125 int(self.interface.content.layout.height) + TITLEBAR_HEIGHT 126 ) 127 self.interface.content.refresh() 128 129 self.native.Show() 130 131 def winforms_FormClosing(self, event, handler): 132 if self.interface.app.on_exit: 133 self.interface.app.on_exit(self.interface.app) 134 135 def set_full_screen(self, is_full_screen): 136 self.interface.factory.not_implemented('Window.set_full_screen()') 137 138 def on_close(self): 139 pass 140 141 def close(self): 142 self.native.Close() 143 144 def winforms_resize(self, sender, args): 145 if self.interface.content: 146 # Re-layout the content 147 self.interface.content.refresh() 148 149 def info_dialog(self, title, message): 150 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK) 151 152 def question_dialog(self, title, message): 153 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo) 154 return result 155 156 def confirm_dialog(self, title, message): 157 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel) 158 # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel 159 return True if result == WinForms.DialogResult.OK else False 160 161 def error_dialog(self, title, message): 162 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK, 163 WinForms.MessageBoxIcon.Error) 164 165 def stack_trace_dialog(self, title, message, content, retry=False): 166 pass 167 168 def save_file_dialog(self, title, suggested_filename, file_types): 169 dialog = WinForms.SaveFileDialog() 170 dialog.Title = title 171 if suggested_filename is not None: 172 dialog.FileName = suggested_filename 173 if file_types is not None: 174 dialog.Filter = self.build_filter(file_types) 175 if dialog.ShowDialog() == WinForms.DialogResult.OK: 176 return dialog.FileName 177 else: 178 raise ValueError("No filename provided in the save file dialog") 179 180 def open_file_dialog(self, title, initial_directory, file_types, multiselect): 181 dialog = WinForms.OpenFileDialog() 182 dialog.Title = title 183 if initial_directory is not None: 184 dialog.InitialDirectory = initial_directory 185 if file_types is not None: 186 dialog.Filter = self.build_filter(file_types) 187 if multiselect: 188 dialog.Multiselect = True 189 if dialog.ShowDialog() == WinForms.DialogResult.OK: 190 return dialog.FileName 191 else: 192 raise ValueError("No filename provided in the open file dialog") 193 194 def select_folder_dialog(self, title, initial_directory, multiselect): 195 dialog = WinForms.FolderBrowserDialog() 196 dialog.Title = title 197 if initial_directory is not None: 198 dialog.InitialDirectory = initial_directory 199 200 if dialog.ShowDialog() == WinForms.DialogResult.OK: 201 return [dialog.SelectedPath] 202 else: 203 raise ValueError("No folder provided in the select folder dialog") 204 205 def build_filter(self, file_types): 206 file_string = "{0} files (*.{0})|*.{0}" 207 return '|'.join([file_string.format(ext) for ext in file_types]) + \ 208 "|All files (*.*)|*.*" 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py --- a/src/winforms/toga_winforms/window.py +++ b/src/winforms/toga_winforms/window.py @@ -187,7 +187,7 @@ if multiselect: dialog.Multiselect = True if dialog.ShowDialog() == WinForms.DialogResult.OK: - return dialog.FileName + return dialog.FileNames if multiselect else dialog.FileName else: raise ValueError("No filename provided in the open file dialog")
{"golden_diff": "diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py\n--- a/src/winforms/toga_winforms/window.py\n+++ b/src/winforms/toga_winforms/window.py\n@@ -187,7 +187,7 @@\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n- return dialog.FileName\n+ return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n", "issue": "toga-winforms\\windows.py openFileDialog needs an \"s\"\n**Describe the bug**\r\nWhen using multiselect = True in winforms the returned value is a single file because \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName\r\n\r\nshould be \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName**s**\r\n\r\n \r\n\r\n**To Reproduce**\r\n\r\n fname = self.main_window.open_file_dialog(\r\n title=\"Open the file\",\r\n multiselect=True\r\n )\r\n\r\n\r\n**Expected behavior**\r\nreturns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.\r\n\r\n\r\n**Environment:**\r\n - Operating System: Mac OS 10.15.6 & Windows 10\r\n - Python version: 3.8\r\n - Software versions:\r\n - Briefcase: 0.3.3\r\n - Toga: 0.3.0 dev23\r\n\n", "before_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}], "after_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}]}
2,505
120
gh_patches_debug_8679
rasdani/github-patches
git_diff
ydataai__ydata-profiling-728
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ExpectationProfiler fails to profile a datetime column **Describe the bug** I've encountered the following `TypeError` when trying to profile a dataframe that has a datetime column <details><summary>Click to expand <strong><em>Traceback</em></strong></summary> ```python profile.to_expectation_suite(suite_name=f"{schema}.{table}") [03/11/21 14:49:33] INFO INFO - 2021-03-11 14:49:33,088 - utils.py - NumExpr utils.py:157 defaulting to 4 threads. --------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/ds-vroom-demand/great_expectations/uncommitted/test_profile.py in ----> 15 profile.to_expectation_suite(suite_name=f"{schema}.{table}") ~/.pyenv/versions/vroom/lib/python3.7/site-packages/pandas_profiling/expectations_report.py in to_expectation_suite(self, suite_name, data_context, save_suite, run_validation, build_data_docs, handler) 88 # Dispatch to expectations per semantic variable type 89 for name, variable_summary in summary["variables"].items(): ---> 90 handler.handle(variable_summary["type"], name, variable_summary, batch) 91 92 # We don't actually update the suite object on the batch in place, so need ... ~/.pyenv/versions/vroom/lib/python3.7/site-packages/great_expectations/dataset/pandas_dataset.py in is_between(val) 1223 ): 1224 raise TypeError( -> 1225 "Column values, min_value, and max_value must either be None or of the same type." 1226 ) 1227 ``` </details> ```python TypeError: Column values, min_value, and max_value must either be None or of the same type. ``` From looking at the arguments to `is_between` in `great_expectations.dataset.pandas_dataset.py`, this seems to be due to `min_value` and `max_value` being serialised to their string representations, but the column itself doesn't. I think this can be resolved by modifying the arguments to `datetime_expectations` in `pandas_profiling.model.expectation_algorithms` as follows ```python def datetime_expectations(name, summary, batch, *args): if any(k in summary for k in ["min", "max"]): batch.expect_column_values_to_be_between( name, min_value=summary.get("min"), max_value=summary.get("max"), parse_strings_as_datetimes=True ) return name, summary, batch ``` I've been able to resolve this issue myself by altering that function. I'm happy to submit this as a PR if this is acceptable. **To Reproduce** ```python """ Test for issue XXX: https://github.com/pandas-profiling/pandas-profiling/issues/XXX """ import pandas as pd import pandas_profiling def test_issueXXX(): df = pd.DataFrame({'date':pd.date_range('2021-01-01', '2021-03-01', freq='d')}) profile = pandas_profiling.ProfileReport(df=df, explorative=True) profile.to_expectation_suite(suite_name='test', save_suite=False, build_data_docs=False, run_validation=False) ``` **Version information:** * _Python version_: 3.7.6 * _Environment_: Pycharm * _`pip`_: <details><summary>Click to expand <strong><em>Version information</em></strong></summary> <p> ``` alembic==1.4.1 altair==4.1.0 appnope==0.1.2 argon2-cffi==20.1.0 arrow==0.17.0 astor==0.8.1 async-generator==1.10 attrs==20.3.0 azure-core==1.11.0 azure-storage-blob==12.7.1 backcall==0.2.0 base58==2.1.0 binaryornot==0.4.4 bleach==3.2.3 blinker==1.4 boto3==1.17.2 botocore==1.20.2 cachetools==4.2.1 category-encoders==2.2.2 certifi==2020.12.5 cffi==1.14.4 chardet==3.0.4 click==7.1.2 cloudpickle==1.6.0 colorama==0.4.4 commonmark==0.9.1 confuse==1.4.0 cookiecutter==1.7.2 cryptography==3.3.1 cycler==0.10.0 dask==2021.2.0 databricks-cli==0.14.1 decorator==4.4.2 defusedxml==0.6.0 distributed==2021.2.0 docker==4.4.2 docutils==0.16 entrypoints==0.3 Faker==6.0.0 fastapi==0.63.0 featuretools==0.23.1 Flask==1.1.2 fsspec==0.8.5 gitdb==4.0.5 GitPython==3.1.12 great-expectations==0.13.8 gunicorn==20.0.4 HeapDict==1.0.1 htmlmin==0.1.12 hypothesis==6.3.4 idna==2.10 ImageHash==4.2.0 importlib-metadata==2.1.1 iniconfig==1.1.1 ipykernel==5.4.3 ipython==7.19.0 ipython-genutils==0.2.0 ipywidgets==7.6.3 isodate==0.6.0 itsdangerous==1.1.0 jedi==0.18.0 Jinja2==2.11.2 jinja2-time==0.2.0 jinjasql==0.1.8 jmespath==0.10.0 joblib==1.0.0 jsonpatch==1.28 jsonpointer==2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.11 jupyter-console==6.2.0 jupyter-core==4.7.0 jupyterlab-pygments==0.1.2 jupyterlab-widgets==1.0.0 kiwisolver==1.3.1 lightgbm==3.1.1 locket==0.2.1 loky==2.9.0 Mako==1.1.4 MarkupSafe==1.1.1 matplotlib==3.3.3 missingno==0.4.2 mistune==0.8.4 mlflow==1.13.1 mock==4.0.3 msgpack==1.0.2 msrest==0.6.21 mypy-extensions==0.4.3 nbclient==0.5.1 nbconvert==6.0.7 nbformat==5.1.2 nest-asyncio==1.5.1 networkx==2.5 nltk==3.5 notebook==6.2.0 numpy==1.19.5 oauthlib==3.1.0 opt-einsum==3.3.0 packaging==20.8 pandas==1.2.1 pandas-profiling==2.11.0 pandera==0.6.2 pandocfilters==1.4.3 parso==0.8.1 partd==1.1.0 patsy==0.5.1 pexpect==4.8.0 phik==0.11.2 pickleshare==0.7.5 Pillow==8.1.0 pluggy==0.13.1 poyo==0.5.0 prometheus-client==0.9.0 prometheus-flask-exporter==0.18.1 prompt-toolkit==3.0.14 protobuf==3.14.0 psutil==5.8.0 psycopg2-binary==2.8.6 ptyprocess==0.7.0 py==1.10.0 pycparser==2.20 pydantic==1.7.3 pydeck==0.5.0 Pygments==2.7.4 pyOpenSSL==20.0.1 pyparsing==2.4.7 PyPika==0.47.4 pyro-api==0.1.2 pyro-ppl==1.5.2 pyrsistent==0.17.3 pytest==6.2.2 python-dateutil==2.8.1 python-dotenv==0.15.0 python-editor==1.0.4 python-slugify==4.0.1 pytz==2020.5 PyWavelets==1.1.1 PyYAML==5.4.1 pyzmq==21.0.2 qtconsole==5.0.2 QtPy==1.9.0 querystring-parser==1.2.4 regex==2020.11.13 requests==2.25.1 requests-oauthlib==1.3.0 rich==9.11.0 rpy2==3.4.2 ruamel.yaml==0.16.12 s3transfer==0.3.4 scikit-learn==0.24.1 scipy==1.6.0 seaborn==0.11.1 Send2Trash==1.5.0 shellingham==1.4.0 six==1.15.0 smart-open==4.2.0 smmap==3.0.5 sortedcontainers==2.3.0 SQLAlchemy==1.3.23 sqlparse==0.4.1 starlette==0.13.6 statsmodels==0.12.2 streamlit==0.77.0 tabulate==0.8.7 tangled-up-in-unicode==0.0.6 tblib==1.7.0 termcolor==1.1.0 terminado==0.9.2 testpath==0.4.4 text-unidecode==1.3 threadpoolctl==2.1.0 toml==0.10.2 toolz==0.11.1 torch==1.7.1 tornado==6.1 tqdm==4.56.0 traitlets==5.0.5 typer==0.3.2 typer-cli==0.0.11 typing-extensions==3.7.4.3 typing-inspect==0.6.0 tzlocal==2.1 urllib3==1.25.11 validators==0.18.2 visions==0.6.0 wcwidth==0.2.5 webencodings==0.5.1 websocket-client==0.57.0 Werkzeug==1.0.1 widgetsnbextension==3.5.1 wrapt==1.12.1 zict==2.0.0 zipp==3.4.0 ``` </p></summary></details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pandas_profiling/model/expectation_algorithms.py` Content: ``` 1 def generic_expectations(name, summary, batch, *args): 2 batch.expect_column_to_exist(name) 3 4 if summary["n_missing"] == 0: 5 batch.expect_column_values_to_not_be_null(name) 6 7 if summary["p_unique"] == 1.0: 8 batch.expect_column_values_to_be_unique(name) 9 10 return name, summary, batch 11 12 13 def numeric_expectations(name, summary, batch, *args): 14 from great_expectations.profile.base import ProfilerTypeMapping 15 16 numeric_type_names = ( 17 ProfilerTypeMapping.INT_TYPE_NAMES + ProfilerTypeMapping.FLOAT_TYPE_NAMES 18 ) 19 20 batch.expect_column_values_to_be_in_type_list( 21 name, 22 numeric_type_names, 23 meta={ 24 "notes": { 25 "format": "markdown", 26 "content": [ 27 "The column values should be stored in one of these types." 28 ], 29 } 30 }, 31 ) 32 33 if summary["monotonic_increase"]: 34 batch.expect_column_values_to_be_increasing( 35 name, strictly=summary["monotonic_increase_strict"] 36 ) 37 38 if summary["monotonic_decrease"]: 39 batch.expect_column_values_to_be_decreasing( 40 name, strictly=summary["monotonic_decrease_strict"] 41 ) 42 43 if any(k in summary for k in ["min", "max"]): 44 batch.expect_column_values_to_be_between( 45 name, min_value=summary.get("min"), max_value=summary.get("max") 46 ) 47 48 return name, summary, batch 49 50 51 def categorical_expectations(name, summary, batch, *args): 52 # Use for both categorical and special case (boolean) 53 absolute_threshold = 10 54 relative_threshold = 0.2 55 if ( 56 summary["n_distinct"] < absolute_threshold 57 or summary["p_distinct"] < relative_threshold 58 ): 59 batch.expect_column_values_to_be_in_set( 60 name, set(summary["value_counts_without_nan"].keys()) 61 ) 62 return name, summary, batch 63 64 65 def path_expectations(name, summary, batch, *args): 66 return name, summary, batch 67 68 69 def datetime_expectations(name, summary, batch, *args): 70 if any(k in summary for k in ["min", "max"]): 71 batch.expect_column_values_to_be_between( 72 name, min_value=summary.get("min"), max_value=summary.get("max") 73 ) 74 75 return name, summary, batch 76 77 78 def image_expectations(name, summary, batch, *args): 79 return name, summary, batch 80 81 82 def url_expectations(name, summary, batch, *args): 83 return name, summary, batch 84 85 86 def file_expectations(name, summary, batch, *args): 87 # By definition within our type logic, a file exists (as it's a path that also exists) 88 batch.expect_file_to_exist(name) 89 90 return name, summary, batch 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pandas_profiling/model/expectation_algorithms.py b/src/pandas_profiling/model/expectation_algorithms.py --- a/src/pandas_profiling/model/expectation_algorithms.py +++ b/src/pandas_profiling/model/expectation_algorithms.py @@ -69,7 +69,10 @@ def datetime_expectations(name, summary, batch, *args): if any(k in summary for k in ["min", "max"]): batch.expect_column_values_to_be_between( - name, min_value=summary.get("min"), max_value=summary.get("max") + name, + min_value=summary.get("min"), + max_value=summary.get("max"), + parse_strings_as_datetimes=True, ) return name, summary, batch
{"golden_diff": "diff --git a/src/pandas_profiling/model/expectation_algorithms.py b/src/pandas_profiling/model/expectation_algorithms.py\n--- a/src/pandas_profiling/model/expectation_algorithms.py\n+++ b/src/pandas_profiling/model/expectation_algorithms.py\n@@ -69,7 +69,10 @@\n def datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n- name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n+ name,\r\n+ min_value=summary.get(\"min\"),\r\n+ max_value=summary.get(\"max\"),\r\n+ parse_strings_as_datetimes=True,\r\n )\r\n \r\n return name, summary, batch\n", "issue": "ExpectationProfiler fails to profile a datetime column\n**Describe the bug**\r\nI've encountered the following `TypeError` when trying to profile a dataframe that has a datetime column\r\n<details><summary>Click to expand <strong><em>Traceback</em></strong></summary>\r\n\r\n```python\r\n\r\nprofile.to_expectation_suite(suite_name=f\"{schema}.{table}\")\r\n[03/11/21 14:49:33] INFO INFO - 2021-03-11 14:49:33,088 - utils.py - NumExpr utils.py:157\r\n defaulting to 4 threads. \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/ds-vroom-demand/great_expectations/uncommitted/test_profile.py in \r\n----> 15 profile.to_expectation_suite(suite_name=f\"{schema}.{table}\")\r\n\r\n~/.pyenv/versions/vroom/lib/python3.7/site-packages/pandas_profiling/expectations_report.py in to_expectation_suite(self, suite_name, data_context, save_suite, run_validation, build_data_docs, handler)\r\n 88 # Dispatch to expectations per semantic variable type\r\n 89 for name, variable_summary in summary[\"variables\"].items():\r\n---> 90 handler.handle(variable_summary[\"type\"], name, variable_summary, batch)\r\n 91 \r\n 92 # We don't actually update the suite object on the batch in place, so need\r\n\r\n...\r\n~/.pyenv/versions/vroom/lib/python3.7/site-packages/great_expectations/dataset/pandas_dataset.py in is_between(val)\r\n 1223 ):\r\n 1224 raise TypeError(\r\n-> 1225 \"Column values, min_value, and max_value must either be None or of the same type.\"\r\n 1226 )\r\n 1227 \r\n\r\n```\r\n</details>\r\n\r\n```python\r\nTypeError: Column values, min_value, and max_value must either be None or of the same type.\r\n```\r\n\r\nFrom looking at the arguments to `is_between` in `great_expectations.dataset.pandas_dataset.py`, this seems to be due to `min_value` and `max_value` being serialised to their string representations, but the column itself doesn't.\r\n\r\nI think this can be resolved by modifying the arguments to `datetime_expectations` in `pandas_profiling.model.expectation_algorithms` as follows \r\n\r\n```python\r\ndef datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\"), parse_strings_as_datetimes=True\r\n )\r\n\r\n return name, summary, batch\r\n```\r\nI've been able to resolve this issue myself by altering that function. I'm happy to submit this as a PR if this is acceptable.\r\n\r\n**To Reproduce**\r\n```python\r\n\"\"\"\r\nTest for issue XXX:\r\nhttps://github.com/pandas-profiling/pandas-profiling/issues/XXX\r\n\"\"\"\r\nimport pandas as pd\r\nimport pandas_profiling\r\n\r\n\r\ndef test_issueXXX():\r\n df = pd.DataFrame({'date':pd.date_range('2021-01-01', '2021-03-01', freq='d')})\r\n profile = pandas_profiling.ProfileReport(df=df, explorative=True)\r\n profile.to_expectation_suite(suite_name='test', save_suite=False, build_data_docs=False, run_validation=False)\r\n \r\n```\r\n\r\n**Version information:**\r\n\r\n* _Python version_: 3.7.6\r\n* _Environment_: Pycharm\r\n* _`pip`_: \r\n\r\n<details><summary>Click to expand <strong><em>Version information</em></strong></summary>\r\n<p>\r\n\r\n```\r\nalembic==1.4.1\r\naltair==4.1.0\r\nappnope==0.1.2\r\nargon2-cffi==20.1.0\r\narrow==0.17.0\r\nastor==0.8.1\r\nasync-generator==1.10\r\nattrs==20.3.0\r\nazure-core==1.11.0\r\nazure-storage-blob==12.7.1\r\nbackcall==0.2.0\r\nbase58==2.1.0\r\nbinaryornot==0.4.4\r\nbleach==3.2.3\r\nblinker==1.4\r\nboto3==1.17.2\r\nbotocore==1.20.2\r\ncachetools==4.2.1\r\ncategory-encoders==2.2.2\r\ncertifi==2020.12.5\r\ncffi==1.14.4\r\nchardet==3.0.4\r\nclick==7.1.2\r\ncloudpickle==1.6.0\r\ncolorama==0.4.4\r\ncommonmark==0.9.1\r\nconfuse==1.4.0\r\ncookiecutter==1.7.2\r\ncryptography==3.3.1\r\ncycler==0.10.0\r\ndask==2021.2.0\r\ndatabricks-cli==0.14.1\r\ndecorator==4.4.2\r\ndefusedxml==0.6.0\r\ndistributed==2021.2.0\r\ndocker==4.4.2\r\ndocutils==0.16\r\nentrypoints==0.3\r\nFaker==6.0.0\r\nfastapi==0.63.0\r\nfeaturetools==0.23.1\r\nFlask==1.1.2\r\nfsspec==0.8.5\r\ngitdb==4.0.5\r\nGitPython==3.1.12\r\ngreat-expectations==0.13.8\r\ngunicorn==20.0.4\r\nHeapDict==1.0.1\r\nhtmlmin==0.1.12\r\nhypothesis==6.3.4\r\nidna==2.10\r\nImageHash==4.2.0\r\nimportlib-metadata==2.1.1\r\niniconfig==1.1.1\r\nipykernel==5.4.3\r\nipython==7.19.0\r\nipython-genutils==0.2.0\r\nipywidgets==7.6.3\r\nisodate==0.6.0\r\nitsdangerous==1.1.0\r\njedi==0.18.0\r\nJinja2==2.11.2\r\njinja2-time==0.2.0\r\njinjasql==0.1.8\r\njmespath==0.10.0\r\njoblib==1.0.0\r\njsonpatch==1.28\r\njsonpointer==2.0\r\njsonschema==3.2.0\r\njupyter==1.0.0\r\njupyter-client==6.1.11\r\njupyter-console==6.2.0\r\njupyter-core==4.7.0\r\njupyterlab-pygments==0.1.2\r\njupyterlab-widgets==1.0.0\r\nkiwisolver==1.3.1\r\nlightgbm==3.1.1\r\nlocket==0.2.1\r\nloky==2.9.0\r\nMako==1.1.4\r\nMarkupSafe==1.1.1\r\nmatplotlib==3.3.3\r\nmissingno==0.4.2\r\nmistune==0.8.4\r\nmlflow==1.13.1\r\nmock==4.0.3\r\nmsgpack==1.0.2\r\nmsrest==0.6.21\r\nmypy-extensions==0.4.3\r\nnbclient==0.5.1\r\nnbconvert==6.0.7\r\nnbformat==5.1.2\r\nnest-asyncio==1.5.1\r\nnetworkx==2.5\r\nnltk==3.5\r\nnotebook==6.2.0\r\nnumpy==1.19.5\r\noauthlib==3.1.0\r\nopt-einsum==3.3.0\r\npackaging==20.8\r\npandas==1.2.1\r\npandas-profiling==2.11.0\r\npandera==0.6.2\r\npandocfilters==1.4.3\r\nparso==0.8.1\r\npartd==1.1.0\r\npatsy==0.5.1\r\npexpect==4.8.0\r\nphik==0.11.2\r\npickleshare==0.7.5\r\nPillow==8.1.0\r\npluggy==0.13.1\r\npoyo==0.5.0\r\nprometheus-client==0.9.0\r\nprometheus-flask-exporter==0.18.1\r\nprompt-toolkit==3.0.14\r\nprotobuf==3.14.0\r\npsutil==5.8.0\r\npsycopg2-binary==2.8.6\r\nptyprocess==0.7.0\r\npy==1.10.0\r\npycparser==2.20\r\npydantic==1.7.3\r\npydeck==0.5.0\r\nPygments==2.7.4\r\npyOpenSSL==20.0.1\r\npyparsing==2.4.7\r\nPyPika==0.47.4\r\npyro-api==0.1.2\r\npyro-ppl==1.5.2\r\npyrsistent==0.17.3\r\npytest==6.2.2\r\npython-dateutil==2.8.1\r\npython-dotenv==0.15.0\r\npython-editor==1.0.4\r\npython-slugify==4.0.1\r\npytz==2020.5\r\nPyWavelets==1.1.1\r\nPyYAML==5.4.1\r\npyzmq==21.0.2\r\nqtconsole==5.0.2\r\nQtPy==1.9.0\r\nquerystring-parser==1.2.4\r\nregex==2020.11.13\r\nrequests==2.25.1\r\nrequests-oauthlib==1.3.0\r\nrich==9.11.0\r\nrpy2==3.4.2\r\nruamel.yaml==0.16.12\r\ns3transfer==0.3.4\r\nscikit-learn==0.24.1\r\nscipy==1.6.0\r\nseaborn==0.11.1\r\nSend2Trash==1.5.0\r\nshellingham==1.4.0\r\nsix==1.15.0\r\nsmart-open==4.2.0\r\nsmmap==3.0.5\r\nsortedcontainers==2.3.0\r\nSQLAlchemy==1.3.23\r\nsqlparse==0.4.1\r\nstarlette==0.13.6\r\nstatsmodels==0.12.2\r\nstreamlit==0.77.0\r\ntabulate==0.8.7\r\ntangled-up-in-unicode==0.0.6\r\ntblib==1.7.0\r\ntermcolor==1.1.0\r\nterminado==0.9.2\r\ntestpath==0.4.4\r\ntext-unidecode==1.3\r\nthreadpoolctl==2.1.0\r\ntoml==0.10.2\r\ntoolz==0.11.1\r\ntorch==1.7.1\r\ntornado==6.1\r\ntqdm==4.56.0\r\ntraitlets==5.0.5\r\ntyper==0.3.2\r\ntyper-cli==0.0.11\r\ntyping-extensions==3.7.4.3\r\ntyping-inspect==0.6.0\r\ntzlocal==2.1\r\nurllib3==1.25.11\r\nvalidators==0.18.2\r\nvisions==0.6.0\r\nwcwidth==0.2.5\r\nwebencodings==0.5.1\r\nwebsocket-client==0.57.0\r\nWerkzeug==1.0.1\r\nwidgetsnbextension==3.5.1\r\nwrapt==1.12.1\r\nzict==2.0.0\r\nzipp==3.4.0\r\n```\r\n</p></summary></details>\r\n\r\n\n", "before_files": [{"content": "def generic_expectations(name, summary, batch, *args):\r\n batch.expect_column_to_exist(name)\r\n\r\n if summary[\"n_missing\"] == 0:\r\n batch.expect_column_values_to_not_be_null(name)\r\n\r\n if summary[\"p_unique\"] == 1.0:\r\n batch.expect_column_values_to_be_unique(name)\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef numeric_expectations(name, summary, batch, *args):\r\n from great_expectations.profile.base import ProfilerTypeMapping\r\n\r\n numeric_type_names = (\r\n ProfilerTypeMapping.INT_TYPE_NAMES + ProfilerTypeMapping.FLOAT_TYPE_NAMES\r\n )\r\n\r\n batch.expect_column_values_to_be_in_type_list(\r\n name,\r\n numeric_type_names,\r\n meta={\r\n \"notes\": {\r\n \"format\": \"markdown\",\r\n \"content\": [\r\n \"The column values should be stored in one of these types.\"\r\n ],\r\n }\r\n },\r\n )\r\n\r\n if summary[\"monotonic_increase\"]:\r\n batch.expect_column_values_to_be_increasing(\r\n name, strictly=summary[\"monotonic_increase_strict\"]\r\n )\r\n\r\n if summary[\"monotonic_decrease\"]:\r\n batch.expect_column_values_to_be_decreasing(\r\n name, strictly=summary[\"monotonic_decrease_strict\"]\r\n )\r\n\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef categorical_expectations(name, summary, batch, *args):\r\n # Use for both categorical and special case (boolean)\r\n absolute_threshold = 10\r\n relative_threshold = 0.2\r\n if (\r\n summary[\"n_distinct\"] < absolute_threshold\r\n or summary[\"p_distinct\"] < relative_threshold\r\n ):\r\n batch.expect_column_values_to_be_in_set(\r\n name, set(summary[\"value_counts_without_nan\"].keys())\r\n )\r\n return name, summary, batch\r\n\r\n\r\ndef path_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef image_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef url_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef file_expectations(name, summary, batch, *args):\r\n # By definition within our type logic, a file exists (as it's a path that also exists)\r\n batch.expect_file_to_exist(name)\r\n\r\n return name, summary, batch\r\n", "path": "src/pandas_profiling/model/expectation_algorithms.py"}], "after_files": [{"content": "def generic_expectations(name, summary, batch, *args):\r\n batch.expect_column_to_exist(name)\r\n\r\n if summary[\"n_missing\"] == 0:\r\n batch.expect_column_values_to_not_be_null(name)\r\n\r\n if summary[\"p_unique\"] == 1.0:\r\n batch.expect_column_values_to_be_unique(name)\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef numeric_expectations(name, summary, batch, *args):\r\n from great_expectations.profile.base import ProfilerTypeMapping\r\n\r\n numeric_type_names = (\r\n ProfilerTypeMapping.INT_TYPE_NAMES + ProfilerTypeMapping.FLOAT_TYPE_NAMES\r\n )\r\n\r\n batch.expect_column_values_to_be_in_type_list(\r\n name,\r\n numeric_type_names,\r\n meta={\r\n \"notes\": {\r\n \"format\": \"markdown\",\r\n \"content\": [\r\n \"The column values should be stored in one of these types.\"\r\n ],\r\n }\r\n },\r\n )\r\n\r\n if summary[\"monotonic_increase\"]:\r\n batch.expect_column_values_to_be_increasing(\r\n name, strictly=summary[\"monotonic_increase_strict\"]\r\n )\r\n\r\n if summary[\"monotonic_decrease\"]:\r\n batch.expect_column_values_to_be_decreasing(\r\n name, strictly=summary[\"monotonic_decrease_strict\"]\r\n )\r\n\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef categorical_expectations(name, summary, batch, *args):\r\n # Use for both categorical and special case (boolean)\r\n absolute_threshold = 10\r\n relative_threshold = 0.2\r\n if (\r\n summary[\"n_distinct\"] < absolute_threshold\r\n or summary[\"p_distinct\"] < relative_threshold\r\n ):\r\n batch.expect_column_values_to_be_in_set(\r\n name, set(summary[\"value_counts_without_nan\"].keys())\r\n )\r\n return name, summary, batch\r\n\r\n\r\ndef path_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name,\r\n min_value=summary.get(\"min\"),\r\n max_value=summary.get(\"max\"),\r\n parse_strings_as_datetimes=True,\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef image_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef url_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef file_expectations(name, summary, batch, *args):\r\n # By definition within our type logic, a file exists (as it's a path that also exists)\r\n batch.expect_file_to_exist(name)\r\n\r\n return name, summary, batch\r\n", "path": "src/pandas_profiling/model/expectation_algorithms.py"}]}
3,780
175
gh_patches_debug_23173
rasdani/github-patches
git_diff
scrapy__scrapy-4555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate Response.body_as_unicode --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/http/response/text.py` Content: ``` 1 """ 2 This module implements the TextResponse class which adds encoding handling and 3 discovering (through HTTP headers) to base Response class. 4 5 See documentation in docs/topics/request-response.rst 6 """ 7 8 from contextlib import suppress 9 from typing import Generator 10 from urllib.parse import urljoin 11 12 import parsel 13 from w3lib.encoding import (html_body_declared_encoding, html_to_unicode, 14 http_content_type_encoding, resolve_encoding) 15 from w3lib.html import strip_html5_whitespace 16 17 from scrapy.http import Request 18 from scrapy.http.response import Response 19 from scrapy.utils.python import memoizemethod_noargs, to_unicode 20 from scrapy.utils.response import get_base_url 21 22 23 class TextResponse(Response): 24 25 _DEFAULT_ENCODING = 'ascii' 26 27 def __init__(self, *args, **kwargs): 28 self._encoding = kwargs.pop('encoding', None) 29 self._cached_benc = None 30 self._cached_ubody = None 31 self._cached_selector = None 32 super(TextResponse, self).__init__(*args, **kwargs) 33 34 def _set_url(self, url): 35 if isinstance(url, str): 36 self._url = to_unicode(url, self.encoding) 37 else: 38 super(TextResponse, self)._set_url(url) 39 40 def _set_body(self, body): 41 self._body = b'' # used by encoding detection 42 if isinstance(body, str): 43 if self._encoding is None: 44 raise TypeError('Cannot convert unicode body - %s has no encoding' % 45 type(self).__name__) 46 self._body = body.encode(self._encoding) 47 else: 48 super(TextResponse, self)._set_body(body) 49 50 def replace(self, *args, **kwargs): 51 kwargs.setdefault('encoding', self.encoding) 52 return Response.replace(self, *args, **kwargs) 53 54 @property 55 def encoding(self): 56 return self._declared_encoding() or self._body_inferred_encoding() 57 58 def _declared_encoding(self): 59 return self._encoding or self._headers_encoding() \ 60 or self._body_declared_encoding() 61 62 def body_as_unicode(self): 63 """Return body as unicode""" 64 return self.text 65 66 @property 67 def text(self): 68 """ Body as unicode """ 69 # access self.encoding before _cached_ubody to make sure 70 # _body_inferred_encoding is called 71 benc = self.encoding 72 if self._cached_ubody is None: 73 charset = 'charset=%s' % benc 74 self._cached_ubody = html_to_unicode(charset, self.body)[1] 75 return self._cached_ubody 76 77 def urljoin(self, url): 78 """Join this Response's url with a possible relative url to form an 79 absolute interpretation of the latter.""" 80 return urljoin(get_base_url(self), url) 81 82 @memoizemethod_noargs 83 def _headers_encoding(self): 84 content_type = self.headers.get(b'Content-Type', b'') 85 return http_content_type_encoding(to_unicode(content_type)) 86 87 def _body_inferred_encoding(self): 88 if self._cached_benc is None: 89 content_type = to_unicode(self.headers.get(b'Content-Type', b'')) 90 benc, ubody = html_to_unicode(content_type, self.body, 91 auto_detect_fun=self._auto_detect_fun, 92 default_encoding=self._DEFAULT_ENCODING) 93 self._cached_benc = benc 94 self._cached_ubody = ubody 95 return self._cached_benc 96 97 def _auto_detect_fun(self, text): 98 for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'): 99 try: 100 text.decode(enc) 101 except UnicodeError: 102 continue 103 return resolve_encoding(enc) 104 105 @memoizemethod_noargs 106 def _body_declared_encoding(self): 107 return html_body_declared_encoding(self.body) 108 109 @property 110 def selector(self): 111 from scrapy.selector import Selector 112 if self._cached_selector is None: 113 self._cached_selector = Selector(self) 114 return self._cached_selector 115 116 def xpath(self, query, **kwargs): 117 return self.selector.xpath(query, **kwargs) 118 119 def css(self, query): 120 return self.selector.css(query) 121 122 def follow(self, url, callback=None, method='GET', headers=None, body=None, 123 cookies=None, meta=None, encoding=None, priority=0, 124 dont_filter=False, errback=None, cb_kwargs=None, flags=None): 125 # type: (...) -> Request 126 """ 127 Return a :class:`~.Request` instance to follow a link ``url``. 128 It accepts the same arguments as ``Request.__init__`` method, 129 but ``url`` can be not only an absolute URL, but also 130 131 * a relative URL 132 * a :class:`~scrapy.link.Link` object, e.g. the result of 133 :ref:`topics-link-extractors` 134 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g. 135 ``response.css('a.my_link')[0]`` 136 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g. 137 ``response.css('a::attr(href)')[0]`` or 138 ``response.xpath('//img/@src')[0]`` 139 140 See :ref:`response-follow-example` for usage examples. 141 """ 142 if isinstance(url, parsel.Selector): 143 url = _url_from_selector(url) 144 elif isinstance(url, parsel.SelectorList): 145 raise ValueError("SelectorList is not supported") 146 encoding = self.encoding if encoding is None else encoding 147 return super(TextResponse, self).follow( 148 url=url, 149 callback=callback, 150 method=method, 151 headers=headers, 152 body=body, 153 cookies=cookies, 154 meta=meta, 155 encoding=encoding, 156 priority=priority, 157 dont_filter=dont_filter, 158 errback=errback, 159 cb_kwargs=cb_kwargs, 160 flags=flags, 161 ) 162 163 def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None, 164 cookies=None, meta=None, encoding=None, priority=0, 165 dont_filter=False, errback=None, cb_kwargs=None, flags=None, 166 css=None, xpath=None): 167 # type: (...) -> Generator[Request, None, None] 168 """ 169 A generator that produces :class:`~.Request` instances to follow all 170 links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s 171 ``__init__`` method, except that each ``urls`` element does not need to be 172 an absolute URL, it can be any of the following: 173 174 * a relative URL 175 * a :class:`~scrapy.link.Link` object, e.g. the result of 176 :ref:`topics-link-extractors` 177 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g. 178 ``response.css('a.my_link')[0]`` 179 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g. 180 ``response.css('a::attr(href)')[0]`` or 181 ``response.xpath('//img/@src')[0]`` 182 183 In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction 184 within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted). 185 186 Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or 187 using the ``css`` or ``xpath`` parameters, this method will not produce requests for 188 selectors from which links cannot be obtained (for instance, anchor tags without an 189 ``href`` attribute) 190 """ 191 arguments = [x for x in (urls, css, xpath) if x is not None] 192 if len(arguments) != 1: 193 raise ValueError( 194 "Please supply exactly one of the following arguments: urls, css, xpath" 195 ) 196 if not urls: 197 if css: 198 urls = self.css(css) 199 if xpath: 200 urls = self.xpath(xpath) 201 if isinstance(urls, parsel.SelectorList): 202 selectors = urls 203 urls = [] 204 for sel in selectors: 205 with suppress(_InvalidSelector): 206 urls.append(_url_from_selector(sel)) 207 return super(TextResponse, self).follow_all( 208 urls=urls, 209 callback=callback, 210 method=method, 211 headers=headers, 212 body=body, 213 cookies=cookies, 214 meta=meta, 215 encoding=encoding, 216 priority=priority, 217 dont_filter=dont_filter, 218 errback=errback, 219 cb_kwargs=cb_kwargs, 220 flags=flags, 221 ) 222 223 224 class _InvalidSelector(ValueError): 225 """ 226 Raised when a URL cannot be obtained from a Selector 227 """ 228 229 230 def _url_from_selector(sel): 231 # type: (parsel.Selector) -> str 232 if isinstance(sel.root, str): 233 # e.g. ::attr(href) result 234 return strip_html5_whitespace(sel.root) 235 if not hasattr(sel.root, 'tag'): 236 raise _InvalidSelector("Unsupported selector: %s" % sel) 237 if sel.root.tag not in ('a', 'link'): 238 raise _InvalidSelector("Only <a> and <link> elements are supported; got <%s>" % 239 sel.root.tag) 240 href = sel.root.get('href') 241 if href is None: 242 raise _InvalidSelector("<%s> element has no href attribute: %s" % 243 (sel.root.tag, sel)) 244 return strip_html5_whitespace(href) 245 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py --- a/scrapy/http/response/text.py +++ b/scrapy/http/response/text.py @@ -5,6 +5,7 @@ See documentation in docs/topics/request-response.rst """ +import warnings from contextlib import suppress from typing import Generator from urllib.parse import urljoin @@ -14,6 +15,7 @@ http_content_type_encoding, resolve_encoding) from w3lib.html import strip_html5_whitespace +from scrapy.exceptions import ScrapyDeprecationWarning from scrapy.http import Request from scrapy.http.response import Response from scrapy.utils.python import memoizemethod_noargs, to_unicode @@ -61,6 +63,9 @@ def body_as_unicode(self): """Return body as unicode""" + warnings.warn('Response.body_as_unicode() is deprecated, ' + 'please use Response.text instead.', + ScrapyDeprecationWarning) return self.text @property
{"golden_diff": "diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py\n--- a/scrapy/http/response/text.py\n+++ b/scrapy/http/response/text.py\n@@ -5,6 +5,7 @@\n See documentation in docs/topics/request-response.rst\n \"\"\"\n \n+import warnings\n from contextlib import suppress\n from typing import Generator\n from urllib.parse import urljoin\n@@ -14,6 +15,7 @@\n http_content_type_encoding, resolve_encoding)\n from w3lib.html import strip_html5_whitespace\n \n+from scrapy.exceptions import ScrapyDeprecationWarning\n from scrapy.http import Request\n from scrapy.http.response import Response\n from scrapy.utils.python import memoizemethod_noargs, to_unicode\n@@ -61,6 +63,9 @@\n \n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n+ warnings.warn('Response.body_as_unicode() is deprecated, '\n+ 'please use Response.text instead.',\n+ ScrapyDeprecationWarning)\n return self.text\n \n @property\n", "issue": "Deprecate Response.body_as_unicode\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}], "after_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nimport warnings\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n warnings.warn('Response.body_as_unicode() is deprecated, '\n 'please use Response.text instead.',\n ScrapyDeprecationWarning)\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}]}
2,966
220
gh_patches_debug_23289
rasdani/github-patches
git_diff
fossasia__open-event-server-6100
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Speakers are queried by role in after event mail cron job but no such role exists Currently, speakers are being queried under the assumption that there is a role called speaker, no such role exists. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/helpers/scheduled_jobs.py` Content: ``` 1 import datetime 2 3 import pytz 4 from dateutil.relativedelta import relativedelta 5 6 from app.api.helpers.db import safe_query, save_to_db 7 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \ 8 send_followup_email_for_monthly_fee_payment 9 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \ 10 send_notif_after_event 11 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name 12 from app.api.helpers.utilities import monthdelta 13 from app.models import db 14 from app.models.event import Event 15 from app.models.event_invoice import EventInvoice 16 from app.models.order import Order 17 from app.models.session import Session 18 from app.models.ticket import Ticket 19 from app.models.ticket_fee import get_fee 20 from app.settings import get_settings 21 22 23 def send_after_event_mail(): 24 from app import current_app as app 25 with app.app_context(): 26 events = Event.query.all() 27 upcoming_events = get_upcoming_events() 28 upcoming_event_links = "<ul>" 29 for upcoming_event in upcoming_events: 30 frontend_url = get_settings()['frontend_url'] 31 upcoming_event_links += "<li><a href='{}/events/{}'>{}</a></li>" \ 32 .format(frontend_url, upcoming_event.id, upcoming_event.name) 33 upcoming_event_links += "</ul>" 34 for event in events: 35 organizers = get_user_event_roles_by_role_name(event.id, 'organizer') 36 speakers = get_user_event_roles_by_role_name(event.id, 'speaker') 37 current_time = datetime.datetime.now(pytz.timezone(event.timezone)) 38 time_difference = current_time - event.ends_at 39 time_difference_minutes = (time_difference.days * 24 * 60) + \ 40 (time_difference.seconds / 60) 41 if current_time > event.ends_at and time_difference_minutes < 1440: 42 for speaker in speakers: 43 send_email_after_event(speaker.user.email, event.name, upcoming_event_links) 44 send_notif_after_event(speaker.user, event.name) 45 for organizer in organizers: 46 send_email_after_event(organizer.user.email, event.name, upcoming_event_links) 47 send_notif_after_event(organizer.user, event.name) 48 49 50 def change_session_state_on_event_completion(): 51 from app import current_app as app 52 with app.app_context(): 53 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\ 54 .filter(Event.ends_at < datetime.datetime.now()) 55 for session in sessions_to_be_changed: 56 session.state = 'rejected' 57 save_to_db(session, 'Changed {} session state to rejected'.format(session.title)) 58 59 60 def send_event_fee_notification(): 61 from app import current_app as app 62 with app.app_context(): 63 events = Event.query.all() 64 for event in events: 65 latest_invoice = EventInvoice.query.filter_by( 66 event_id=event.id).order_by(EventInvoice.created_at.desc()).first() 67 68 if latest_invoice: 69 orders = Order.query \ 70 .filter_by(event_id=event.id) \ 71 .filter_by(status='completed') \ 72 .filter(Order.completed_at > latest_invoice.created_at).all() 73 else: 74 orders = Order.query.filter_by( 75 event_id=event.id).filter_by(status='completed').all() 76 77 fee_total = 0 78 for order in orders: 79 for order_ticket in order.tickets: 80 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id') 81 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0: 82 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0) 83 fee_total += fee 84 85 if fee_total > 0: 86 organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first() 87 new_invoice = EventInvoice( 88 amount=fee_total, event_id=event.id, user_id=organizer.user.id) 89 90 if event.discount_code_id and event.discount_code: 91 r = relativedelta(datetime.utcnow(), event.created_at) 92 if r <= event.discount_code.valid_till: 93 new_invoice.amount = fee_total - \ 94 (fee_total * (event.discount_code.value / 100.0)) 95 new_invoice.discount_code_id = event.discount_code_id 96 97 save_to_db(new_invoice) 98 prev_month = monthdelta(new_invoice.created_at, 1).strftime( 99 "%b %Y") # Displayed as Aug 2016 100 app_name = get_settings()['app_name'] 101 frontend_url = get_settings()['frontend_url'] 102 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier) 103 send_email_for_monthly_fee_payment(new_invoice.user.email, 104 event.name, 105 prev_month, 106 new_invoice.amount, 107 app_name, 108 link) 109 send_notif_monthly_fee_payment(new_invoice.user, 110 event.name, 111 prev_month, 112 new_invoice.amount, 113 app_name, 114 link, 115 new_invoice.event_id) 116 117 118 def send_event_fee_notification_followup(): 119 from app import current_app as app 120 with app.app_context(): 121 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all() 122 for incomplete_invoice in incomplete_invoices: 123 if incomplete_invoice.amount > 0: 124 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime( 125 "%b %Y") # Displayed as Aug 2016 126 app_name = get_settings()['app_name'] 127 frontend_url = get_settings()['frontend_url'] 128 link = '{}/invoices/{}'.format(frontend_url, 129 incomplete_invoice.identifier) 130 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email, 131 incomplete_invoice.event.name, 132 prev_month, 133 incomplete_invoice.amount, 134 app_name, 135 link) 136 send_followup_notif_monthly_fee_payment(incomplete_invoice.user, 137 incomplete_invoice.event.name, 138 prev_month, 139 incomplete_invoice.amount, 140 app_name, 141 link, 142 incomplete_invoice.event.id) 143 144 145 def expire_pending_tickets(): 146 from app import current_app as app 147 with app.app_context(): 148 db.session.query(Order).filter(Order.status == 'pending', 149 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\ 150 update({'status': 'expired'}) 151 db.session.commit() 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py --- a/app/api/helpers/scheduled_jobs.py +++ b/app/api/helpers/scheduled_jobs.py @@ -14,6 +14,7 @@ from app.models.event import Event from app.models.event_invoice import EventInvoice from app.models.order import Order +from app.models.speaker import Speaker from app.models.session import Session from app.models.ticket import Ticket from app.models.ticket_fee import get_fee @@ -33,7 +34,7 @@ upcoming_event_links += "</ul>" for event in events: organizers = get_user_event_roles_by_role_name(event.id, 'organizer') - speakers = get_user_event_roles_by_role_name(event.id, 'speaker') + speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all() current_time = datetime.datetime.now(pytz.timezone(event.timezone)) time_difference = current_time - event.ends_at time_difference_minutes = (time_difference.days * 24 * 60) + \
{"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -14,6 +14,7 @@\n from app.models.event import Event\n from app.models.event_invoice import EventInvoice\n from app.models.order import Order\n+from app.models.speaker import Speaker\n from app.models.session import Session\n from app.models.ticket import Ticket\n from app.models.ticket_fee import get_fee\n@@ -33,7 +34,7 @@\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n- speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n+ speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n", "issue": "Speakers are queried by role in after event mail cron job but no such role exists\nCurrently, speakers are being queried under the assumption that there is a role called speaker, no such role exists.\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import get_fee\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=organizer.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n", "path": "app/api/helpers/scheduled_jobs.py"}], "after_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import get_fee\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=organizer.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n", "path": "app/api/helpers/scheduled_jobs.py"}]}
2,007
231
gh_patches_debug_13464
rasdani/github-patches
git_diff
ckan__ckan-3285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LanguageError: IOError: [Errno 2] No translation file found for domain: 'ckan' Hi, We are noticing some 500 errors are being generated when /e or /n are being passed instead of /en Passing other letters such as /a /b /c will trigger a 404 ### CKAN Version if known (or site URL) Ckan 2.3.X - 2.5.2+ ### Please describe the expected behaviour http://demo.ckan.org/en = 200 http://demo.ckan.org/e = 404 http://demo.ckan.org/n = 404 ### Please describe the actual behaviour http://demo.ckan.org/en = 200 http://demo.ckan.org/e = 500 http://demo.ckan.org/n = 500 ### What steps can be taken to reproduce the issue? pass /e or /n in any ckan instance url and you can trigger a 500 internal server error. http://demo.ckan.org/en/dataset = 200 http://demo.ckan.org/e/dataset = 500 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/lib/i18n.py` Content: ``` 1 # encoding: utf-8 2 3 import os 4 5 from babel import Locale 6 from babel.core import (LOCALE_ALIASES, 7 get_locale_identifier, 8 UnknownLocaleError) 9 from babel.support import Translations 10 from paste.deploy.converters import aslist 11 from pylons import i18n 12 import pylons 13 14 15 from ckan.common import config 16 import ckan.i18n 17 from ckan.plugins import PluginImplementations 18 from ckan.plugins.interfaces import ITranslation 19 20 # Default Portuguese language to Brazilian territory, since 21 # we don't have a Portuguese territory translation currently. 22 LOCALE_ALIASES['pt'] = 'pt_BR' 23 24 25 def get_locales_from_config(): 26 ''' despite the name of this function it gets the locales defined by 27 the config AND also the locals available subject to the config. ''' 28 locales_offered = config.get('ckan.locales_offered', '').split() 29 filtered_out = config.get('ckan.locales_filtered_out', '').split() 30 locale_default = config.get('ckan.locale_default', 'en') 31 locale_order = config.get('ckan.locale_order', '').split() 32 known_locales = get_locales() 33 all_locales = (set(known_locales) | 34 set(locales_offered) | 35 set(locale_order) | 36 set(locale_default)) 37 all_locales -= set(filtered_out) 38 return all_locales 39 40 41 def _get_locales(): 42 # FIXME this wants cleaning up and merging with get_locales_from_config() 43 assert not config.get('lang'), \ 44 ('"lang" config option not supported - please use ckan.locale_default ' 45 'instead.') 46 locales_offered = config.get('ckan.locales_offered', '').split() 47 filtered_out = config.get('ckan.locales_filtered_out', '').split() 48 locale_default = config.get('ckan.locale_default', 'en') 49 locale_order = config.get('ckan.locale_order', '').split() 50 51 locales = ['en'] 52 if config.get('ckan.i18n_directory'): 53 i18n_path = os.path.join(config.get('ckan.i18n_directory'), 'i18n') 54 else: 55 i18n_path = os.path.dirname(ckan.i18n.__file__) 56 57 # For every file in the ckan i18n directory see if babel can understand 58 # the locale. If yes, add it to the available locales 59 for locale in os.listdir(i18n_path): 60 try: 61 Locale.parse(locale) 62 locales.append(locale) 63 except (ValueError, UnknownLocaleError): 64 # Babel does not know how to make a locale out of this. 65 # This is fine since we are passing all files in the 66 # ckan.i18n_directory here which e.g. includes the __init__.py 67 pass 68 69 assert locale_default in locales, \ 70 'default language "%s" not available' % locale_default 71 72 locale_list = [] 73 for locale in locales: 74 # no duplicates 75 if locale in locale_list: 76 continue 77 # if offered locales then check locale is offered 78 if locales_offered and locale not in locales_offered: 79 continue 80 # remove if filtered out 81 if locale in filtered_out: 82 continue 83 # ignore the default as it will be added first 84 if locale == locale_default: 85 continue 86 locale_list.append(locale) 87 # order the list if specified 88 ordered_list = [locale_default] 89 for locale in locale_order: 90 if locale in locale_list: 91 ordered_list.append(locale) 92 # added so remove from our list 93 locale_list.remove(locale) 94 # add any remaining locales not ordered 95 ordered_list += locale_list 96 97 return ordered_list 98 99 available_locales = None 100 locales = None 101 locales_dict = None 102 _non_translated_locals = None 103 104 105 def get_locales(): 106 ''' Get list of available locales 107 e.g. [ 'en', 'de', ... ] 108 ''' 109 global locales 110 if not locales: 111 locales = _get_locales() 112 return locales 113 114 115 def non_translated_locals(): 116 ''' These are the locales that are available but for which there are 117 no translations. returns a list like ['en', 'de', ...] ''' 118 global _non_translated_locals 119 if not _non_translated_locals: 120 locales = config.get('ckan.locale_order', '').split() 121 _non_translated_locals = [x for x in locales if x not in get_locales()] 122 return _non_translated_locals 123 124 125 def get_locales_dict(): 126 ''' Get a dict of the available locales 127 e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } ''' 128 global locales_dict 129 if not locales_dict: 130 locales = _get_locales() 131 locales_dict = {} 132 for locale in locales: 133 locales_dict[str(locale)] = Locale.parse(locale) 134 return locales_dict 135 136 137 def get_available_locales(): 138 ''' Get a list of the available locales 139 e.g. [ Locale('en'), Locale('de'), ... ] ''' 140 global available_locales 141 if not available_locales: 142 available_locales = [] 143 for locale in get_locales(): 144 # Add the short names for the locales. This equals the filename 145 # of the ckan translation files as opposed to the long name 146 # that includes the script which is generated by babel 147 # so e.g. `zn_CH` instead of `zn_Hans_CH` this is needed 148 # to properly construct urls with url_for 149 parsed_locale = Locale.parse(locale) 150 parsed_locale.short_name = locale 151 152 # Add the full identifier (eg `pt_BR`) to the locale classes, 153 # as it does not offer a way of accessing it directly 154 parsed_locale.identifier = \ 155 get_identifier_from_locale_class(parsed_locale) 156 available_locales.append(parsed_locale) 157 return available_locales 158 159 160 def get_identifier_from_locale_class(locale): 161 return get_locale_identifier( 162 (locale.language, 163 locale.territory, 164 locale.script, 165 locale.variant)) 166 167 168 def _set_lang(lang): 169 ''' Allows a custom i18n directory to be specified. 170 Creates a fake config file to pass to pylons.i18n.set_lang, which 171 sets the Pylons root path to desired i18n_directory. 172 This is needed as Pylons will only look for an i18n directory in 173 the application root.''' 174 if config.get('ckan.i18n_directory'): 175 fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']}, 176 'pylons.package': config['pylons.package']} 177 i18n.set_lang(lang, config=fake_config, class_=Translations) 178 else: 179 i18n.set_lang(lang, class_=Translations) 180 181 182 def handle_request(request, tmpl_context): 183 ''' Set the language for the request ''' 184 lang = request.environ.get('CKAN_LANG') or \ 185 config.get('ckan.locale_default', 'en') 186 if lang != 'en': 187 set_lang(lang) 188 189 for plugin in PluginImplementations(ITranslation): 190 if lang in plugin.i18n_locales(): 191 _add_extra_translations(plugin.i18n_directory(), lang, 192 plugin.i18n_domain()) 193 194 extra_directory = config.get('ckan.i18n.extra_directory') 195 extra_domain = config.get('ckan.i18n.extra_gettext_domain') 196 extra_locales = aslist(config.get('ckan.i18n.extra_locales')) 197 if extra_directory and extra_domain and extra_locales: 198 if lang in extra_locales: 199 _add_extra_translations(extra_directory, lang, extra_domain) 200 201 tmpl_context.language = lang 202 return lang 203 204 205 def _add_extra_translations(dirname, locales, domain): 206 translator = Translations.load(dirname=dirname, locales=locales, 207 domain=domain) 208 try: 209 pylons.translator.merge(translator) 210 except AttributeError: 211 # this occurs when an extension has 'en' translations that 212 # replace the default strings. As set_lang has not been run, 213 # pylons.translation is the NullTranslation, so we have to 214 # replace the StackedObjectProxy ourselves manually. 215 environ = pylons.request.environ 216 environ['pylons.pylons'].translator = translator 217 if 'paste.registry' in environ: 218 environ['paste.registry'].replace(pylons.translator, 219 translator) 220 221 222 def get_lang(): 223 ''' Returns the current language. Based on babel.i18n.get_lang but 224 works when set_lang has not been run (i.e. still in English). ''' 225 langs = i18n.get_lang() 226 if langs: 227 return langs[0] 228 else: 229 return 'en' 230 231 232 def set_lang(language_code): 233 ''' Wrapper to pylons call ''' 234 if language_code in non_translated_locals(): 235 language_code = config.get('ckan.locale_default', 'en') 236 if language_code != 'en': 237 _set_lang(language_code) 238 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/lib/i18n.py b/ckan/lib/i18n.py --- a/ckan/lib/i18n.py +++ b/ckan/lib/i18n.py @@ -27,8 +27,9 @@ the config AND also the locals available subject to the config. ''' locales_offered = config.get('ckan.locales_offered', '').split() filtered_out = config.get('ckan.locales_filtered_out', '').split() - locale_default = config.get('ckan.locale_default', 'en') + locale_default = [config.get('ckan.locale_default', 'en')] locale_order = config.get('ckan.locale_order', '').split() + known_locales = get_locales() all_locales = (set(known_locales) | set(locales_offered) |
{"golden_diff": "diff --git a/ckan/lib/i18n.py b/ckan/lib/i18n.py\n--- a/ckan/lib/i18n.py\n+++ b/ckan/lib/i18n.py\n@@ -27,8 +27,9 @@\n the config AND also the locals available subject to the config. '''\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n- locale_default = config.get('ckan.locale_default', 'en')\n+ locale_default = [config.get('ckan.locale_default', 'en')]\n locale_order = config.get('ckan.locale_order', '').split()\n+\n known_locales = get_locales()\n all_locales = (set(known_locales) |\n set(locales_offered) |\n", "issue": "LanguageError: IOError: [Errno 2] No translation file found for domain: 'ckan'\nHi, \n\nWe are noticing some 500 errors are being generated when /e or /n are being passed instead of /en\n\nPassing other letters such as /a /b /c will trigger a 404\n### CKAN Version if known (or site URL)\n\nCkan 2.3.X - 2.5.2+\n### Please describe the expected behaviour\n\nhttp://demo.ckan.org/en = 200\nhttp://demo.ckan.org/e = 404\nhttp://demo.ckan.org/n = 404\n### Please describe the actual behaviour\n\nhttp://demo.ckan.org/en = 200\nhttp://demo.ckan.org/e = 500\nhttp://demo.ckan.org/n = 500\n### What steps can be taken to reproduce the issue?\n\npass /e or /n in any ckan instance url and you can trigger a 500 internal server error.\n\nhttp://demo.ckan.org/en/dataset = 200\n\nhttp://demo.ckan.org/e/dataset = 500\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\n\nfrom babel import Locale\nfrom babel.core import (LOCALE_ALIASES,\n get_locale_identifier,\n UnknownLocaleError)\nfrom babel.support import Translations\nfrom paste.deploy.converters import aslist\nfrom pylons import i18n\nimport pylons\n\n\nfrom ckan.common import config\nimport ckan.i18n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import ITranslation\n\n# Default Portuguese language to Brazilian territory, since\n# we don't have a Portuguese territory translation currently.\nLOCALE_ALIASES['pt'] = 'pt_BR'\n\n\ndef get_locales_from_config():\n ''' despite the name of this function it gets the locales defined by\n the config AND also the locals available subject to the config. '''\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = config.get('ckan.locale_default', 'en')\n locale_order = config.get('ckan.locale_order', '').split()\n known_locales = get_locales()\n all_locales = (set(known_locales) |\n set(locales_offered) |\n set(locale_order) |\n set(locale_default))\n all_locales -= set(filtered_out)\n return all_locales\n\n\ndef _get_locales():\n # FIXME this wants cleaning up and merging with get_locales_from_config()\n assert not config.get('lang'), \\\n ('\"lang\" config option not supported - please use ckan.locale_default '\n 'instead.')\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = config.get('ckan.locale_default', 'en')\n locale_order = config.get('ckan.locale_order', '').split()\n\n locales = ['en']\n if config.get('ckan.i18n_directory'):\n i18n_path = os.path.join(config.get('ckan.i18n_directory'), 'i18n')\n else:\n i18n_path = os.path.dirname(ckan.i18n.__file__)\n\n # For every file in the ckan i18n directory see if babel can understand\n # the locale. If yes, add it to the available locales\n for locale in os.listdir(i18n_path):\n try:\n Locale.parse(locale)\n locales.append(locale)\n except (ValueError, UnknownLocaleError):\n # Babel does not know how to make a locale out of this.\n # This is fine since we are passing all files in the\n # ckan.i18n_directory here which e.g. includes the __init__.py\n pass\n\n assert locale_default in locales, \\\n 'default language \"%s\" not available' % locale_default\n\n locale_list = []\n for locale in locales:\n # no duplicates\n if locale in locale_list:\n continue\n # if offered locales then check locale is offered\n if locales_offered and locale not in locales_offered:\n continue\n # remove if filtered out\n if locale in filtered_out:\n continue\n # ignore the default as it will be added first\n if locale == locale_default:\n continue\n locale_list.append(locale)\n # order the list if specified\n ordered_list = [locale_default]\n for locale in locale_order:\n if locale in locale_list:\n ordered_list.append(locale)\n # added so remove from our list\n locale_list.remove(locale)\n # add any remaining locales not ordered\n ordered_list += locale_list\n\n return ordered_list\n\navailable_locales = None\nlocales = None\nlocales_dict = None\n_non_translated_locals = None\n\n\ndef get_locales():\n ''' Get list of available locales\n e.g. [ 'en', 'de', ... ]\n '''\n global locales\n if not locales:\n locales = _get_locales()\n return locales\n\n\ndef non_translated_locals():\n ''' These are the locales that are available but for which there are\n no translations. returns a list like ['en', 'de', ...] '''\n global _non_translated_locals\n if not _non_translated_locals:\n locales = config.get('ckan.locale_order', '').split()\n _non_translated_locals = [x for x in locales if x not in get_locales()]\n return _non_translated_locals\n\n\ndef get_locales_dict():\n ''' Get a dict of the available locales\n e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } '''\n global locales_dict\n if not locales_dict:\n locales = _get_locales()\n locales_dict = {}\n for locale in locales:\n locales_dict[str(locale)] = Locale.parse(locale)\n return locales_dict\n\n\ndef get_available_locales():\n ''' Get a list of the available locales\n e.g. [ Locale('en'), Locale('de'), ... ] '''\n global available_locales\n if not available_locales:\n available_locales = []\n for locale in get_locales():\n # Add the short names for the locales. This equals the filename\n # of the ckan translation files as opposed to the long name\n # that includes the script which is generated by babel\n # so e.g. `zn_CH` instead of `zn_Hans_CH` this is needed\n # to properly construct urls with url_for\n parsed_locale = Locale.parse(locale)\n parsed_locale.short_name = locale\n\n # Add the full identifier (eg `pt_BR`) to the locale classes,\n # as it does not offer a way of accessing it directly\n parsed_locale.identifier = \\\n get_identifier_from_locale_class(parsed_locale)\n available_locales.append(parsed_locale)\n return available_locales\n\n\ndef get_identifier_from_locale_class(locale):\n return get_locale_identifier(\n (locale.language,\n locale.territory,\n locale.script,\n locale.variant))\n\n\ndef _set_lang(lang):\n ''' Allows a custom i18n directory to be specified.\n Creates a fake config file to pass to pylons.i18n.set_lang, which\n sets the Pylons root path to desired i18n_directory.\n This is needed as Pylons will only look for an i18n directory in\n the application root.'''\n if config.get('ckan.i18n_directory'):\n fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']},\n 'pylons.package': config['pylons.package']}\n i18n.set_lang(lang, config=fake_config, class_=Translations)\n else:\n i18n.set_lang(lang, class_=Translations)\n\n\ndef handle_request(request, tmpl_context):\n ''' Set the language for the request '''\n lang = request.environ.get('CKAN_LANG') or \\\n config.get('ckan.locale_default', 'en')\n if lang != 'en':\n set_lang(lang)\n\n for plugin in PluginImplementations(ITranslation):\n if lang in plugin.i18n_locales():\n _add_extra_translations(plugin.i18n_directory(), lang,\n plugin.i18n_domain())\n\n extra_directory = config.get('ckan.i18n.extra_directory')\n extra_domain = config.get('ckan.i18n.extra_gettext_domain')\n extra_locales = aslist(config.get('ckan.i18n.extra_locales'))\n if extra_directory and extra_domain and extra_locales:\n if lang in extra_locales:\n _add_extra_translations(extra_directory, lang, extra_domain)\n\n tmpl_context.language = lang\n return lang\n\n\ndef _add_extra_translations(dirname, locales, domain):\n translator = Translations.load(dirname=dirname, locales=locales,\n domain=domain)\n try:\n pylons.translator.merge(translator)\n except AttributeError:\n # this occurs when an extension has 'en' translations that\n # replace the default strings. As set_lang has not been run,\n # pylons.translation is the NullTranslation, so we have to\n # replace the StackedObjectProxy ourselves manually.\n environ = pylons.request.environ\n environ['pylons.pylons'].translator = translator\n if 'paste.registry' in environ:\n environ['paste.registry'].replace(pylons.translator,\n translator)\n\n\ndef get_lang():\n ''' Returns the current language. Based on babel.i18n.get_lang but\n works when set_lang has not been run (i.e. still in English). '''\n langs = i18n.get_lang()\n if langs:\n return langs[0]\n else:\n return 'en'\n\n\ndef set_lang(language_code):\n ''' Wrapper to pylons call '''\n if language_code in non_translated_locals():\n language_code = config.get('ckan.locale_default', 'en')\n if language_code != 'en':\n _set_lang(language_code)\n", "path": "ckan/lib/i18n.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport os\n\nfrom babel import Locale\nfrom babel.core import (LOCALE_ALIASES,\n get_locale_identifier,\n UnknownLocaleError)\nfrom babel.support import Translations\nfrom paste.deploy.converters import aslist\nfrom pylons import i18n\nimport pylons\n\n\nfrom ckan.common import config\nimport ckan.i18n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import ITranslation\n\n# Default Portuguese language to Brazilian territory, since\n# we don't have a Portuguese territory translation currently.\nLOCALE_ALIASES['pt'] = 'pt_BR'\n\n\ndef get_locales_from_config():\n ''' despite the name of this function it gets the locales defined by\n the config AND also the locals available subject to the config. '''\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = [config.get('ckan.locale_default', 'en')]\n locale_order = config.get('ckan.locale_order', '').split()\n\n known_locales = get_locales()\n all_locales = (set(known_locales) |\n set(locales_offered) |\n set(locale_order) |\n set(locale_default))\n all_locales -= set(filtered_out)\n return all_locales\n\n\ndef _get_locales():\n # FIXME this wants cleaning up and merging with get_locales_from_config()\n assert not config.get('lang'), \\\n ('\"lang\" config option not supported - please use ckan.locale_default '\n 'instead.')\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = config.get('ckan.locale_default', 'en')\n locale_order = config.get('ckan.locale_order', '').split()\n\n locales = ['en']\n if config.get('ckan.i18n_directory'):\n i18n_path = os.path.join(config.get('ckan.i18n_directory'), 'i18n')\n else:\n i18n_path = os.path.dirname(ckan.i18n.__file__)\n\n # For every file in the ckan i18n directory see if babel can understand\n # the locale. If yes, add it to the available locales\n for locale in os.listdir(i18n_path):\n try:\n Locale.parse(locale)\n locales.append(locale)\n except (ValueError, UnknownLocaleError):\n # Babel does not know how to make a locale out of this.\n # This is fine since we are passing all files in the\n # ckan.i18n_directory here which e.g. includes the __init__.py\n pass\n\n assert locale_default in locales, \\\n 'default language \"%s\" not available' % locale_default\n\n locale_list = []\n for locale in locales:\n # no duplicates\n if locale in locale_list:\n continue\n # if offered locales then check locale is offered\n if locales_offered and locale not in locales_offered:\n continue\n # remove if filtered out\n if locale in filtered_out:\n continue\n # ignore the default as it will be added first\n if locale == locale_default:\n continue\n locale_list.append(locale)\n # order the list if specified\n ordered_list = [locale_default]\n for locale in locale_order:\n if locale in locale_list:\n ordered_list.append(locale)\n # added so remove from our list\n locale_list.remove(locale)\n # add any remaining locales not ordered\n ordered_list += locale_list\n\n return ordered_list\n\navailable_locales = None\nlocales = None\nlocales_dict = None\n_non_translated_locals = None\n\n\ndef get_locales():\n ''' Get list of available locales\n e.g. [ 'en', 'de', ... ]\n '''\n global locales\n if not locales:\n locales = _get_locales()\n return locales\n\n\ndef non_translated_locals():\n ''' These are the locales that are available but for which there are\n no translations. returns a list like ['en', 'de', ...] '''\n global _non_translated_locals\n if not _non_translated_locals:\n locales = config.get('ckan.locale_order', '').split()\n _non_translated_locals = [x for x in locales if x not in get_locales()]\n return _non_translated_locals\n\n\ndef get_locales_dict():\n ''' Get a dict of the available locales\n e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } '''\n global locales_dict\n if not locales_dict:\n locales = _get_locales()\n locales_dict = {}\n for locale in locales:\n locales_dict[str(locale)] = Locale.parse(locale)\n return locales_dict\n\n\ndef get_available_locales():\n ''' Get a list of the available locales\n e.g. [ Locale('en'), Locale('de'), ... ] '''\n global available_locales\n if not available_locales:\n available_locales = []\n for locale in get_locales():\n # Add the short names for the locales. This equals the filename\n # of the ckan translation files as opposed to the long name\n # that includes the script which is generated by babel\n # so e.g. `zn_CH` instead of `zn_Hans_CH` this is needed\n # to properly construct urls with url_for\n parsed_locale = Locale.parse(locale)\n parsed_locale.short_name = locale\n\n # Add the full identifier (eg `pt_BR`) to the locale classes,\n # as it does not offer a way of accessing it directly\n parsed_locale.identifier = \\\n get_identifier_from_locale_class(parsed_locale)\n available_locales.append(parsed_locale)\n return available_locales\n\n\ndef get_identifier_from_locale_class(locale):\n return get_locale_identifier(\n (locale.language,\n locale.territory,\n locale.script,\n locale.variant))\n\n\ndef _set_lang(lang):\n ''' Allows a custom i18n directory to be specified.\n Creates a fake config file to pass to pylons.i18n.set_lang, which\n sets the Pylons root path to desired i18n_directory.\n This is needed as Pylons will only look for an i18n directory in\n the application root.'''\n if config.get('ckan.i18n_directory'):\n fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']},\n 'pylons.package': config['pylons.package']}\n i18n.set_lang(lang, config=fake_config, class_=Translations)\n else:\n i18n.set_lang(lang, class_=Translations)\n\n\ndef handle_request(request, tmpl_context):\n ''' Set the language for the request '''\n lang = request.environ.get('CKAN_LANG') or \\\n config.get('ckan.locale_default', 'en')\n if lang != 'en':\n set_lang(lang)\n\n for plugin in PluginImplementations(ITranslation):\n if lang in plugin.i18n_locales():\n _add_extra_translations(plugin.i18n_directory(), lang,\n plugin.i18n_domain())\n\n extra_directory = config.get('ckan.i18n.extra_directory')\n extra_domain = config.get('ckan.i18n.extra_gettext_domain')\n extra_locales = aslist(config.get('ckan.i18n.extra_locales'))\n if extra_directory and extra_domain and extra_locales:\n if lang in extra_locales:\n _add_extra_translations(extra_directory, lang, extra_domain)\n\n tmpl_context.language = lang\n return lang\n\n\ndef _add_extra_translations(dirname, locales, domain):\n translator = Translations.load(dirname=dirname, locales=locales,\n domain=domain)\n try:\n pylons.translator.merge(translator)\n except AttributeError:\n # this occurs when an extension has 'en' translations that\n # replace the default strings. As set_lang has not been run,\n # pylons.translation is the NullTranslation, so we have to\n # replace the StackedObjectProxy ourselves manually.\n environ = pylons.request.environ\n environ['pylons.pylons'].translator = translator\n if 'paste.registry' in environ:\n environ['paste.registry'].replace(pylons.translator,\n translator)\n\n\ndef get_lang():\n ''' Returns the current language. Based on babel.i18n.get_lang but\n works when set_lang has not been run (i.e. still in English). '''\n langs = i18n.get_lang()\n if langs:\n return langs[0]\n else:\n return 'en'\n\n\ndef set_lang(language_code):\n ''' Wrapper to pylons call '''\n if language_code in non_translated_locals():\n language_code = config.get('ckan.locale_default', 'en')\n if language_code != 'en':\n _set_lang(language_code)\n", "path": "ckan/lib/i18n.py"}]}
3,100
188
gh_patches_debug_30257
rasdani/github-patches
git_diff
SeldonIO__MLServer-317
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gRPC: Received message larger than max (11059277 vs. 4194304) We are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error: > status = StatusCode.RESOURCE_EXHAUSTED > details = "Received message larger than max (11059277 vs. 4194304)" > debug_error_string = "{"created":"@1630074151.799330000","description":"Error received from peer ipv4:0.0.0.0:28081","file":"src/core/lib/surface/call.cc","file_line":1070,"grpc_message":"Received message larger than max (11059277 vs. 4194304)","grpc_status":8}" Setting the following on client side did not work: ``` maxMsgLength = 20 * 1024 * 1024 ch_options = [('grpc.max_message_length', maxMsgLength), ('grpc.max_send_message_length', maxMsgLength), ('grpc.max_receive_message_length', maxMsgLength)] channel = grpc.insecure_channel(f'{host}:{port}', options=ch_options) ``` However setting it on server side worked: ``` def _create_server(self): self._inference_servicer = InferenceServicer(self._data_plane) self._model_repository_servicer = ModelRepositoryServicer( self._model_repository_handlers ) channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)] self._server = aio.server( ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt ) add_GRPCInferenceServiceServicer_to_server( self._inference_servicer, self._server ) ``` But this is not a proper solution, since our mlserver runs in a docker and does a pip install. This should be part of the server config in MLServer. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlserver/grpc/server.py` Content: ``` 1 from grpc import aio 2 from concurrent.futures import ThreadPoolExecutor 3 4 from ..handlers import DataPlane, ModelRepositoryHandlers 5 from ..settings import Settings 6 7 from .servicers import InferenceServicer, ModelRepositoryServicer 8 from .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server 9 from .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server 10 11 12 class GRPCServer: 13 def __init__( 14 self, 15 settings: Settings, 16 data_plane: DataPlane, 17 model_repository_handlers: ModelRepositoryHandlers, 18 ): 19 self._settings = settings 20 self._data_plane = data_plane 21 self._model_repository_handlers = model_repository_handlers 22 23 def _create_server(self): 24 self._inference_servicer = InferenceServicer(self._data_plane) 25 self._model_repository_servicer = ModelRepositoryServicer( 26 self._model_repository_handlers 27 ) 28 self._server = aio.server( 29 ThreadPoolExecutor(max_workers=self._settings.grpc_workers) 30 ) 31 32 add_GRPCInferenceServiceServicer_to_server( 33 self._inference_servicer, self._server 34 ) 35 add_ModelRepositoryServiceServicer_to_server( 36 self._model_repository_servicer, self._server 37 ) 38 39 self._server.add_insecure_port( 40 f"{self._settings.host}:{self._settings.grpc_port}" 41 ) 42 43 return self._server 44 45 async def start(self): 46 self._create_server() 47 48 await self._server.start() 49 await self._server.wait_for_termination() 50 51 async def stop(self): 52 # TODO: Read from config 53 await self._server.stop(grace=5) 54 ``` Path: `mlserver/settings.py` Content: ``` 1 from typing import List, Optional 2 from pydantic import BaseSettings, PyObject 3 4 from .version import __version__ 5 from .types import MetadataTensor 6 7 ENV_PREFIX_SETTINGS = "MLSERVER_" 8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_" 9 10 11 class Settings(BaseSettings): 12 class Config: 13 env_prefix = ENV_PREFIX_SETTINGS 14 15 debug: bool = True 16 17 # Model repository folder 18 model_repository_root: str = "." 19 load_models_at_startup: bool = True 20 21 # Server metadata 22 server_name: str = "mlserver" 23 server_version: str = __version__ 24 extensions: List[str] = [] 25 26 # Server settings 27 host: str = "0.0.0.0" 28 http_port: int = 8080 29 grpc_port: int = 8081 30 grpc_workers: int = 10 31 32 33 class ModelParameters(BaseSettings): 34 """ 35 Parameters that apply only to a particular instance of a model. 36 This can include things like model weights. 37 The main difference with respect to ModelSettings is that parameters can 38 change on each instance (e.g. each version) of the model. 39 """ 40 41 class Config: 42 env_prefix = ENV_PREFIX_MODEL_SETTINGS 43 44 uri: Optional[str] = None 45 version: Optional[str] = None 46 format: Optional[str] = None 47 extra: Optional[dict] = {} 48 49 50 class ModelSettings(BaseSettings): 51 class Config: 52 env_prefix = ENV_PREFIX_MODEL_SETTINGS 53 54 name: str = "" 55 56 # Model metadata 57 platform: str = "" 58 versions: Optional[List[str]] = [] 59 inputs: Optional[List[MetadataTensor]] = [] 60 outputs: Optional[List[MetadataTensor]] = [] 61 62 # Parallel settings 63 parallel_workers: Optional[int] = 4 64 65 # Custom model class implementation 66 implementation: PyObject = "mlserver.model.MLModel" # type: ignore 67 68 # Model parameters are meant to be set directly by the MLServer runtime. 69 # However, it's also possible to override them manually. 70 parameters: Optional[ModelParameters] = None 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py --- a/mlserver/grpc/server.py +++ b/mlserver/grpc/server.py @@ -1,5 +1,6 @@ from grpc import aio from concurrent.futures import ThreadPoolExecutor +from typing import Any, List, Tuple from ..handlers import DataPlane, ModelRepositoryHandlers from ..settings import Settings @@ -25,8 +26,10 @@ self._model_repository_servicer = ModelRepositoryServicer( self._model_repository_handlers ) + self._server = aio.server( - ThreadPoolExecutor(max_workers=self._settings.grpc_workers) + ThreadPoolExecutor(max_workers=self._settings.grpc_workers), + options=self._get_options(), ) add_GRPCInferenceServiceServicer_to_server( @@ -42,6 +45,19 @@ return self._server + def _get_options(self) -> List[Tuple[str, Any]]: + options = [] + + max_message_length = self._settings.grpc_max_message_length + if max_message_length is not None: + options += [ + ("grpc.max_message_length", max_message_length), + ("grpc.max_send_message_length", max_message_length), + ("grpc.max_receive_message_length", max_message_length), + ] + + return options + async def start(self): self._create_server() diff --git a/mlserver/settings.py b/mlserver/settings.py --- a/mlserver/settings.py +++ b/mlserver/settings.py @@ -28,6 +28,7 @@ http_port: int = 8080 grpc_port: int = 8081 grpc_workers: int = 10 + grpc_max_message_length: Optional[int] = None class ModelParameters(BaseSettings):
{"golden_diff": "diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py\n--- a/mlserver/grpc/server.py\n+++ b/mlserver/grpc/server.py\n@@ -1,5 +1,6 @@\n from grpc import aio\n from concurrent.futures import ThreadPoolExecutor\n+from typing import Any, List, Tuple\n \n from ..handlers import DataPlane, ModelRepositoryHandlers\n from ..settings import Settings\n@@ -25,8 +26,10 @@\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n+\n self._server = aio.server(\n- ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n+ ThreadPoolExecutor(max_workers=self._settings.grpc_workers),\n+ options=self._get_options(),\n )\n \n add_GRPCInferenceServiceServicer_to_server(\n@@ -42,6 +45,19 @@\n \n return self._server\n \n+ def _get_options(self) -> List[Tuple[str, Any]]:\n+ options = []\n+\n+ max_message_length = self._settings.grpc_max_message_length\n+ if max_message_length is not None:\n+ options += [\n+ (\"grpc.max_message_length\", max_message_length),\n+ (\"grpc.max_send_message_length\", max_message_length),\n+ (\"grpc.max_receive_message_length\", max_message_length),\n+ ]\n+\n+ return options\n+\n async def start(self):\n self._create_server()\n \ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -28,6 +28,7 @@\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n+ grpc_max_message_length: Optional[int] = None\n \n \n class ModelParameters(BaseSettings):\n", "issue": "gRPC: Received message larger than max (11059277 vs. 4194304)\nWe are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error:\r\n\r\n> status = StatusCode.RESOURCE_EXHAUSTED\r\n> \tdetails = \"Received message larger than max (11059277 vs. 4194304)\"\r\n> \tdebug_error_string = \"{\"created\":\"@1630074151.799330000\",\"description\":\"Error received from peer ipv4:0.0.0.0:28081\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1070,\"grpc_message\":\"Received message larger than max (11059277 vs. 4194304)\",\"grpc_status\":8}\"\r\n\r\nSetting the following on client side did not work:\r\n```\r\nmaxMsgLength = 20 * 1024 * 1024\r\nch_options = [('grpc.max_message_length', maxMsgLength),\r\n ('grpc.max_send_message_length', maxMsgLength),\r\n ('grpc.max_receive_message_length', maxMsgLength)]\r\nchannel = grpc.insecure_channel(f'{host}:{port}', options=ch_options)\r\n\r\n```\r\nHowever setting it on server side worked:\r\n```\r\ndef _create_server(self):\r\n self._inference_servicer = InferenceServicer(self._data_plane)\r\n self._model_repository_servicer = ModelRepositoryServicer(\r\n self._model_repository_handlers\r\n )\r\n channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)]\r\n self._server = aio.server(\r\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt\r\n )\r\n\r\n add_GRPCInferenceServiceServicer_to_server(\r\n self._inference_servicer, self._server\r\n )\r\n\r\n```\r\nBut this is not a proper solution, since our mlserver runs in a docker and does a pip install.\r\nThis should be part of the server config in MLServer.\n", "before_files": [{"content": "from grpc import aio\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\nfrom ..settings import Settings\n\nfrom .servicers import InferenceServicer, ModelRepositoryServicer\nfrom .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server\nfrom .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server\n\n\nclass GRPCServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n\n def _create_server(self):\n self._inference_servicer = InferenceServicer(self._data_plane)\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n self._server = aio.server(\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n )\n\n add_GRPCInferenceServiceServicer_to_server(\n self._inference_servicer, self._server\n )\n add_ModelRepositoryServiceServicer_to_server(\n self._model_repository_servicer, self._server\n )\n\n self._server.add_insecure_port(\n f\"{self._settings.host}:{self._settings.grpc_port}\"\n )\n\n return self._server\n\n async def start(self):\n self._create_server()\n\n await self._server.start()\n await self._server.wait_for_termination()\n\n async def stop(self):\n # TODO: Read from config\n await self._server.stop(grace=5)\n", "path": "mlserver/grpc/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str = \"\"\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Parallel settings\n parallel_workers: Optional[int] = 4\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}], "after_files": [{"content": "from grpc import aio\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Any, List, Tuple\n\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\nfrom ..settings import Settings\n\nfrom .servicers import InferenceServicer, ModelRepositoryServicer\nfrom .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server\nfrom .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server\n\n\nclass GRPCServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n\n def _create_server(self):\n self._inference_servicer = InferenceServicer(self._data_plane)\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n\n self._server = aio.server(\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers),\n options=self._get_options(),\n )\n\n add_GRPCInferenceServiceServicer_to_server(\n self._inference_servicer, self._server\n )\n add_ModelRepositoryServiceServicer_to_server(\n self._model_repository_servicer, self._server\n )\n\n self._server.add_insecure_port(\n f\"{self._settings.host}:{self._settings.grpc_port}\"\n )\n\n return self._server\n\n def _get_options(self) -> List[Tuple[str, Any]]:\n options = []\n\n max_message_length = self._settings.grpc_max_message_length\n if max_message_length is not None:\n options += [\n (\"grpc.max_message_length\", max_message_length),\n (\"grpc.max_send_message_length\", max_message_length),\n (\"grpc.max_receive_message_length\", max_message_length),\n ]\n\n return options\n\n async def start(self):\n self._create_server()\n\n await self._server.start()\n await self._server.wait_for_termination()\n\n async def stop(self):\n # TODO: Read from config\n await self._server.stop(grace=5)\n", "path": "mlserver/grpc/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n grpc_max_message_length: Optional[int] = None\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str = \"\"\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Parallel settings\n parallel_workers: Optional[int] = 4\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}]}
1,834
399
gh_patches_debug_9670
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-1500
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Internal server error @ fresh install ### Description Get "Internal server error" with no description after a fresh install. Have I missed a step, or is this a bug? ### Steps to reproduce ... ### Webserver logs ```bash Nothing shows up using 'docker-compose logs -f' webserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47) webserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6 ``` ### Paperless-ngx version 1.8.0 ### Host OS CentOS 7.9 -fully patched with latest patches ### Installation method Docker - official image ### Browser Firefox ### Configuration changes none ### Other ... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gunicorn.conf.py` Content: ``` 1 import os 2 3 bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}' 4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1)) 5 worker_class = "paperless.workers.ConfigurableWorker" 6 timeout = 120 7 8 9 def pre_fork(server, worker): 10 pass 11 12 13 def pre_exec(server): 14 server.log.info("Forked child, re-executing.") 15 16 17 def when_ready(server): 18 server.log.info("Server is ready. Spawning workers") 19 20 21 def worker_int(worker): 22 worker.log.info("worker received INT or QUIT signal") 23 24 ## get traceback info 25 import threading, sys, traceback 26 27 id2name = {th.ident: th.name for th in threading.enumerate()} 28 code = [] 29 for threadId, stack in sys._current_frames().items(): 30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId)) 31 for filename, lineno, name, line in traceback.extract_stack(stack): 32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) 33 if line: 34 code.append(" %s" % (line.strip())) 35 worker.log.debug("\n".join(code)) 36 37 38 def worker_abort(worker): 39 worker.log.info("worker received SIGABRT signal") 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gunicorn.conf.py b/gunicorn.conf.py --- a/gunicorn.conf.py +++ b/gunicorn.conf.py @@ -1,9 +1,17 @@ import os +# See https://docs.gunicorn.org/en/stable/settings.html for +# explanations of settings + bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}' + workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1)) worker_class = "paperless.workers.ConfigurableWorker" timeout = 120 +preload_app = True + +# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod +worker_tmp_dir = "/dev/shm" def pre_fork(server, worker):
{"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,9 +1,17 @@\n import os\n \n+# See https://docs.gunicorn.org/en/stable/settings.html for\n+# explanations of settings\n+\n bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n+preload_app = True\n+\n+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\n+worker_tmp_dir = \"/dev/shm\"\n \n \n def pre_fork(server, worker):\n", "issue": "[BUG] Internal server error @ fresh install\n### Description\n\nGet \"Internal server error\" with no description after a fresh install. Have I missed a step, or is this a bug?\n\n### Steps to reproduce\n\n...\n\n### Webserver logs\n\n```bash\nNothing shows up using 'docker-compose logs -f'\r\n\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nCentOS 7.9 -fully patched with latest patches\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\nnone\n\n### Other\n\n...\n", "before_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}], "after_files": [{"content": "import os\n\n# See https://docs.gunicorn.org/en/stable/settings.html for\n# explanations of settings\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\npreload_app = True\n\n# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\nworker_tmp_dir = \"/dev/shm\"\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]}
864
178
gh_patches_debug_18269
rasdani/github-patches
git_diff
cobbler__cobbler-3662
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cobbler rejects InfiniBand MAC addresses ### Describe the bug InfiniBand uses 128-bit MAC addresses like 80:00:02:08:fe:80:00:00:00:00:00:00:f4:52:14:03:00:96:79:31 ### Steps to reproduce 1. Try to configure an infiniband network interface with MAC address ### Expected behavior No error. ### Cobbler version <!--- Paste output from `cobbler version` --> ````paste below 3.2.2 ```` ### Operating system EL9 ### Cobbler log <!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` --> ````paste below 2024-03-12T14:12:29 - INFO | Exception occurred: <class 'cobbler.cexceptions.CX'> 2024-03-12T14:12:29 - INFO | Exception value: 'Invalid mac address format (80:00:02:08:fe:80:00:00:00:00:00:00:f4:52:14:03:00:96:79:31)' 2024-03-12T14:12:29 - INFO | Exception Info: File "/usr/lib/python3.9/site-packages/cobbler/remote.py", line 3453, in _dispatch return method_handle(*params) File "/usr/lib/python3.9/site-packages/cobbler/remote.py", line 1643, in modify_system return self.modify_item("system", object_id, attribute, arg, token) File "/usr/lib/python3.9/site-packages/cobbler/remote.py", line 1606, in modify_item method(arg) File "/usr/lib/python3.9/site-packages/cobbler/items/system.py", line 753, in modify_interface self.set_mac_address(value, interface) File "/usr/lib/python3.9/site-packages/cobbler/items/system.py", line 384, in set_mac_address address = validate.mac_address(address) File "/usr/lib/python3.9/site-packages/cobbler/validate.py", line 111, in mac_address raise CX("Invalid mac address format (%s)" % mac) ```` ### Additional information There is a request for support in netaddr, but it hasn't gone anywhere: https://github.com/netaddr/netaddr/issues/88 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cobbler/validate.py` Content: ``` 1 """ 2 Copyright 2014-2015. Jorgen Maas <[email protected]> 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 17 02110-1301 USA 18 """ 19 from typing import Union 20 from uuid import UUID 21 22 import netaddr 23 import re 24 import shlex 25 26 from cobbler.cexceptions import CX 27 28 RE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\-.:]*$') 29 RE_HOSTNAME = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') 30 RE_SCRIPT_NAME = re.compile(r"[a-zA-Z0-9_\-.]+") 31 32 REPO_BREEDS = ["rsync", "rhn", "yum", "apt", "wget"] 33 34 VIRT_TYPES = ["<<inherit>>", "xenpv", "xenfv", "qemu", "kvm", "vmware", "openvz"] 35 VIRT_DISK_DRIVERS = ["<<inherit>>", "raw", "qcow2", "qed", "vdi", "vmdk"] 36 37 # blacklist invalid values to the repo statement in autoinsts 38 AUTOINSTALL_REPO_BLACKLIST = ['enabled', 'gpgcheck', 'gpgkey'] 39 40 41 def object_name(name: str, parent: str) -> str: 42 """ 43 Validate the object name. 44 45 :param name: object name 46 :param parent: Parent object name 47 :returns: name or CX 48 """ 49 if not isinstance(name, str) or not isinstance(parent, str): 50 raise CX("Invalid input, name and parent must be strings") 51 else: 52 name = name.strip() 53 parent = parent.strip() 54 55 if name != "" and parent != "" and name == parent: 56 raise CX("Self parentage is not allowed") 57 58 if not RE_OBJECT_NAME.match(name): 59 raise CX("Invalid characters in name: '%s'" % name) 60 61 return name 62 63 64 def hostname(dnsname: str) -> str: 65 """ 66 Validate the DNS name. 67 68 :param dnsname: Hostname or FQDN 69 :returns: dnsname 70 :raises CX: If the Hostname/FQDN is not a string or in an invalid format. 71 """ 72 if not isinstance(dnsname, str): 73 raise CX("Invalid input, dnsname must be a string") 74 else: 75 dnsname = dnsname.strip() 76 77 if dnsname == "": 78 # hostname is not required 79 return dnsname 80 81 if not RE_HOSTNAME.match(dnsname): 82 raise CX("Invalid hostname format (%s)" % dnsname) 83 84 return dnsname 85 86 87 def mac_address(mac: str, for_item=True) -> str: 88 """ 89 Validate as an Eternet MAC address. 90 91 :param mac: MAC address 92 :param for_item: If the check should be performed for an item or not. 93 :returns: str mac or CX 94 """ 95 if not isinstance(mac, str): 96 raise CX("Invalid input, mac must be a string") 97 else: 98 mac = mac.lower().strip() 99 100 if for_item is True: 101 # this value has special meaning for items 102 if mac == "random": 103 return mac 104 105 # copying system collection will set mac to "" 106 # netaddr will fail to validate this mac and throw an exception 107 if mac == "": 108 return mac 109 110 if not netaddr.valid_mac(mac): 111 raise CX("Invalid mac address format (%s)" % mac) 112 113 return mac 114 115 116 def ipv4_address(addr: str) -> str: 117 """ 118 Validate an IPv4 address. 119 120 :param addr: IPv4 address 121 :returns: str addr or CX 122 """ 123 if not isinstance(addr, str): 124 raise CX("Invalid input, addr must be a string") 125 else: 126 addr = addr.strip() 127 128 if addr == "": 129 return addr 130 131 if not netaddr.valid_ipv4(addr): 132 raise CX("Invalid IPv4 address format (%s)" % addr) 133 134 if netaddr.IPAddress(addr).is_netmask(): 135 raise CX("Invalid IPv4 host address (%s)" % addr) 136 137 return addr 138 139 140 def ipv4_netmask(addr: str) -> str: 141 """ 142 Validate an IPv4 netmask. 143 144 :param addr: IPv4 netmask 145 :returns: str addr or CX 146 """ 147 if not isinstance(addr, str): 148 raise CX("Invalid input, addr must be a string") 149 else: 150 addr = addr.strip() 151 152 if addr == "": 153 return addr 154 155 if not netaddr.valid_ipv4(addr): 156 raise CX("Invalid IPv4 address format (%s)" % addr) 157 158 if not netaddr.IPAddress(addr).is_netmask(): 159 raise CX("Invalid IPv4 netmask (%s)" % addr) 160 161 return addr 162 163 164 def ipv6_address(addr: str) -> str: 165 """ 166 Validate an IPv6 address. 167 168 :param addr: IPv6 address 169 :returns: The IPv6 address. 170 """ 171 if not isinstance(addr, str): 172 raise CX("Invalid input, addr must be a string") 173 else: 174 addr = addr.strip() 175 176 if addr == "": 177 return addr 178 179 if not netaddr.valid_ipv6(addr): 180 raise CX("Invalid IPv6 address format (%s)" % addr) 181 182 return addr 183 184 185 def name_servers(nameservers: Union[str, list], for_item: bool = True) -> Union[str, list]: 186 """ 187 Validate nameservers IP addresses, works for IPv4 and IPv6 188 189 :param nameservers: string or list of nameserver addresses 190 :param for_item: enable/disable special handling for Item objects 191 :return: The list of valid nameservers. 192 """ 193 if isinstance(nameservers, str): 194 nameservers = nameservers.strip() 195 if for_item is True: 196 # special handling for Items 197 if nameservers in ["<<inherit>>", ""]: 198 return nameservers 199 200 # convert string to a list; do the real validation 201 # in the isinstance(list) code block below 202 nameservers = shlex.split(nameservers) 203 204 if isinstance(nameservers, list): 205 for ns in nameservers: 206 ip_version = netaddr.IPAddress(ns).version 207 if ip_version == 4: 208 ipv4_address(ns) 209 elif ip_version == 6: 210 ipv6_address(ns) 211 else: 212 raise CX("Invalid IP address format") 213 else: 214 raise CX("Invalid input type %s, expected str or list" % type(nameservers)) 215 216 return nameservers 217 218 219 def name_servers_search(search: Union[str, list], for_item: bool = True) -> Union[str, list]: 220 """ 221 Validate nameservers search domains. 222 223 :param search: One or more search domains to validate. 224 :param for_item: (enable/disable special handling for Item objects) 225 :return: The list of valid nameservers. 226 """ 227 if isinstance(search, str): 228 search = search.strip() 229 if for_item is True: 230 # special handling for Items 231 if search in ["<<inherit>>", ""]: 232 return search 233 234 # convert string to a list; do the real validation 235 # in the isinstance(list) code block below 236 search = shlex.split(search) 237 238 if isinstance(search, list): 239 for sl in search: 240 hostname(sl) 241 else: 242 raise CX("Invalid input type %s, expected str or list" % type(search)) 243 244 return search 245 246 247 def validate_autoinstall_script_name(name: str) -> bool: 248 """ 249 This validates if the name given for the script is valid in the context of the API call made. It will be handed to 250 tftpgen.py#generate_script in the end. 251 252 :param name: The name of the script. Will end up being a filename. May have an extension but should never be a path. 253 :return: If this is a valid script name or not. 254 """ 255 if not isinstance(name, str): 256 return False 257 if re.fullmatch(RE_SCRIPT_NAME, name): 258 return True 259 return False 260 261 262 def validate_uuid(possible_uuid: str) -> bool: 263 """ 264 Validate if the handed string is a valid UUIDv4. 265 266 :param possible_uuid: The str with the UUID. 267 :return: True in case it is one, False otherwise. 268 """ 269 if not isinstance(possible_uuid, str): 270 return False 271 # Taken from: https://stackoverflow.com/a/33245493/4730773 272 try: 273 uuid_obj = UUID(possible_uuid, version=4) 274 except ValueError: 275 return False 276 return str(uuid_obj) == possible_uuid 277 278 279 def validate_obj_type(object_type: str) -> bool: 280 """ 281 282 :param object_type: 283 :return: 284 """ 285 if not isinstance(object_type, str): 286 return False 287 return object_type in ["distro", "profile", "system", "repo", "image", "mgmtclass", "package", "file", "menu"] 288 289 290 def validate_obj_name(object_name: str) -> bool: 291 """ 292 293 :param object_name: 294 :return: 295 """ 296 if not isinstance(object_name, str): 297 return False 298 return bool(re.fullmatch(RE_OBJECT_NAME, object_name)) 299 300 301 def validate_obj_id(object_id: str) -> bool: 302 """ 303 304 :param object_id: 305 :return: True in case it is one, False otherwise. 306 """ 307 if not isinstance(object_id, str): 308 return False 309 if object_id.startswith("___NEW___"): 310 object_id = object_id[9:] 311 (otype, oname) = object_id.split("::", 1) 312 return validate_obj_type(otype) and validate_obj_name(oname) 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cobbler/validate.py b/cobbler/validate.py --- a/cobbler/validate.py +++ b/cobbler/validate.py @@ -28,6 +28,9 @@ RE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\-.:]*$') RE_HOSTNAME = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') RE_SCRIPT_NAME = re.compile(r"[a-zA-Z0-9_\-.]+") +RE_INFINIBAND_MAC = re.compile( + "^" + ":".join(["([0-9A-F]{1,2})"] * 20) + "$", re.IGNORECASE +) REPO_BREEDS = ["rsync", "rhn", "yum", "apt", "wget"] @@ -107,7 +110,7 @@ if mac == "": return mac - if not netaddr.valid_mac(mac): + if not netaddr.valid_mac(mac) and RE_INFINIBAND_MAC.match(mac) is None: raise CX("Invalid mac address format (%s)" % mac) return mac
{"golden_diff": "diff --git a/cobbler/validate.py b/cobbler/validate.py\n--- a/cobbler/validate.py\n+++ b/cobbler/validate.py\n@@ -28,6 +28,9 @@\n RE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\\-.:]*$')\n RE_HOSTNAME = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$')\n RE_SCRIPT_NAME = re.compile(r\"[a-zA-Z0-9_\\-.]+\")\n+RE_INFINIBAND_MAC = re.compile(\n+ \"^\" + \":\".join([\"([0-9A-F]{1,2})\"] * 20) + \"$\", re.IGNORECASE\n+)\n \n REPO_BREEDS = [\"rsync\", \"rhn\", \"yum\", \"apt\", \"wget\"]\n \n@@ -107,7 +110,7 @@\n if mac == \"\":\n return mac\n \n- if not netaddr.valid_mac(mac):\n+ if not netaddr.valid_mac(mac) and RE_INFINIBAND_MAC.match(mac) is None:\n raise CX(\"Invalid mac address format (%s)\" % mac)\n \n return mac\n", "issue": "Cobbler rejects InfiniBand MAC addresses\n### Describe the bug\r\n\r\nInfiniBand uses 128-bit MAC addresses like 80:00:02:08:fe:80:00:00:00:00:00:00:f4:52:14:03:00:96:79:31\r\n\r\n### Steps to reproduce\r\n\r\n1. Try to configure an infiniband network interface with MAC address\r\n\r\n### Expected behavior\r\n\r\nNo error.\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\n3.2.2\r\n````\r\n\r\n### Operating system\r\n\r\nEL9\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\n2024-03-12T14:12:29 - INFO | Exception occurred: <class 'cobbler.cexceptions.CX'>\r\n2024-03-12T14:12:29 - INFO | Exception value: 'Invalid mac address format (80:00:02:08:fe:80:00:00:00:00:00:00:f4:52:14:03:00:96:79:31)'\r\n2024-03-12T14:12:29 - INFO | Exception Info:\r\n File \"/usr/lib/python3.9/site-packages/cobbler/remote.py\", line 3453, in _dispatch\r\n return method_handle(*params)\r\n\r\n File \"/usr/lib/python3.9/site-packages/cobbler/remote.py\", line 1643, in modify_system\r\n return self.modify_item(\"system\", object_id, attribute, arg, token)\r\n\r\n File \"/usr/lib/python3.9/site-packages/cobbler/remote.py\", line 1606, in modify_item\r\n method(arg)\r\n\r\n File \"/usr/lib/python3.9/site-packages/cobbler/items/system.py\", line 753, in modify_interface\r\n self.set_mac_address(value, interface)\r\n\r\n File \"/usr/lib/python3.9/site-packages/cobbler/items/system.py\", line 384, in set_mac_address\r\n address = validate.mac_address(address)\r\n\r\n File \"/usr/lib/python3.9/site-packages/cobbler/validate.py\", line 111, in mac_address\r\n raise CX(\"Invalid mac address format (%s)\" % mac)\r\n````\r\n\r\n\r\n### Additional information\r\nThere is a request for support in netaddr, but it hasn't gone anywhere: https://github.com/netaddr/netaddr/issues/88\n", "before_files": [{"content": "\"\"\"\nCopyright 2014-2015. Jorgen Maas <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\nfrom typing import Union\nfrom uuid import UUID\n\nimport netaddr\nimport re\nimport shlex\n\nfrom cobbler.cexceptions import CX\n\nRE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\\-.:]*$')\nRE_HOSTNAME = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$')\nRE_SCRIPT_NAME = re.compile(r\"[a-zA-Z0-9_\\-.]+\")\n\nREPO_BREEDS = [\"rsync\", \"rhn\", \"yum\", \"apt\", \"wget\"]\n\nVIRT_TYPES = [\"<<inherit>>\", \"xenpv\", \"xenfv\", \"qemu\", \"kvm\", \"vmware\", \"openvz\"]\nVIRT_DISK_DRIVERS = [\"<<inherit>>\", \"raw\", \"qcow2\", \"qed\", \"vdi\", \"vmdk\"]\n\n# blacklist invalid values to the repo statement in autoinsts\nAUTOINSTALL_REPO_BLACKLIST = ['enabled', 'gpgcheck', 'gpgkey']\n\n\ndef object_name(name: str, parent: str) -> str:\n \"\"\"\n Validate the object name.\n\n :param name: object name\n :param parent: Parent object name\n :returns: name or CX\n \"\"\"\n if not isinstance(name, str) or not isinstance(parent, str):\n raise CX(\"Invalid input, name and parent must be strings\")\n else:\n name = name.strip()\n parent = parent.strip()\n\n if name != \"\" and parent != \"\" and name == parent:\n raise CX(\"Self parentage is not allowed\")\n\n if not RE_OBJECT_NAME.match(name):\n raise CX(\"Invalid characters in name: '%s'\" % name)\n\n return name\n\n\ndef hostname(dnsname: str) -> str:\n \"\"\"\n Validate the DNS name.\n\n :param dnsname: Hostname or FQDN\n :returns: dnsname\n :raises CX: If the Hostname/FQDN is not a string or in an invalid format.\n \"\"\"\n if not isinstance(dnsname, str):\n raise CX(\"Invalid input, dnsname must be a string\")\n else:\n dnsname = dnsname.strip()\n\n if dnsname == \"\":\n # hostname is not required\n return dnsname\n\n if not RE_HOSTNAME.match(dnsname):\n raise CX(\"Invalid hostname format (%s)\" % dnsname)\n\n return dnsname\n\n\ndef mac_address(mac: str, for_item=True) -> str:\n \"\"\"\n Validate as an Eternet MAC address.\n\n :param mac: MAC address\n :param for_item: If the check should be performed for an item or not.\n :returns: str mac or CX\n \"\"\"\n if not isinstance(mac, str):\n raise CX(\"Invalid input, mac must be a string\")\n else:\n mac = mac.lower().strip()\n\n if for_item is True:\n # this value has special meaning for items\n if mac == \"random\":\n return mac\n\n # copying system collection will set mac to \"\"\n # netaddr will fail to validate this mac and throw an exception\n if mac == \"\":\n return mac\n\n if not netaddr.valid_mac(mac):\n raise CX(\"Invalid mac address format (%s)\" % mac)\n\n return mac\n\n\ndef ipv4_address(addr: str) -> str:\n \"\"\"\n Validate an IPv4 address.\n\n :param addr: IPv4 address\n :returns: str addr or CX\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv4(addr):\n raise CX(\"Invalid IPv4 address format (%s)\" % addr)\n\n if netaddr.IPAddress(addr).is_netmask():\n raise CX(\"Invalid IPv4 host address (%s)\" % addr)\n\n return addr\n\n\ndef ipv4_netmask(addr: str) -> str:\n \"\"\"\n Validate an IPv4 netmask.\n\n :param addr: IPv4 netmask\n :returns: str addr or CX\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv4(addr):\n raise CX(\"Invalid IPv4 address format (%s)\" % addr)\n\n if not netaddr.IPAddress(addr).is_netmask():\n raise CX(\"Invalid IPv4 netmask (%s)\" % addr)\n\n return addr\n\n\ndef ipv6_address(addr: str) -> str:\n \"\"\"\n Validate an IPv6 address.\n\n :param addr: IPv6 address\n :returns: The IPv6 address.\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv6(addr):\n raise CX(\"Invalid IPv6 address format (%s)\" % addr)\n\n return addr\n\n\ndef name_servers(nameservers: Union[str, list], for_item: bool = True) -> Union[str, list]:\n \"\"\"\n Validate nameservers IP addresses, works for IPv4 and IPv6\n\n :param nameservers: string or list of nameserver addresses\n :param for_item: enable/disable special handling for Item objects\n :return: The list of valid nameservers.\n \"\"\"\n if isinstance(nameservers, str):\n nameservers = nameservers.strip()\n if for_item is True:\n # special handling for Items\n if nameservers in [\"<<inherit>>\", \"\"]:\n return nameservers\n\n # convert string to a list; do the real validation\n # in the isinstance(list) code block below\n nameservers = shlex.split(nameservers)\n\n if isinstance(nameservers, list):\n for ns in nameservers:\n ip_version = netaddr.IPAddress(ns).version\n if ip_version == 4:\n ipv4_address(ns)\n elif ip_version == 6:\n ipv6_address(ns)\n else:\n raise CX(\"Invalid IP address format\")\n else:\n raise CX(\"Invalid input type %s, expected str or list\" % type(nameservers))\n\n return nameservers\n\n\ndef name_servers_search(search: Union[str, list], for_item: bool = True) -> Union[str, list]:\n \"\"\"\n Validate nameservers search domains.\n\n :param search: One or more search domains to validate.\n :param for_item: (enable/disable special handling for Item objects)\n :return: The list of valid nameservers.\n \"\"\"\n if isinstance(search, str):\n search = search.strip()\n if for_item is True:\n # special handling for Items\n if search in [\"<<inherit>>\", \"\"]:\n return search\n\n # convert string to a list; do the real validation\n # in the isinstance(list) code block below\n search = shlex.split(search)\n\n if isinstance(search, list):\n for sl in search:\n hostname(sl)\n else:\n raise CX(\"Invalid input type %s, expected str or list\" % type(search))\n\n return search\n\n\ndef validate_autoinstall_script_name(name: str) -> bool:\n \"\"\"\n This validates if the name given for the script is valid in the context of the API call made. It will be handed to\n tftpgen.py#generate_script in the end.\n\n :param name: The name of the script. Will end up being a filename. May have an extension but should never be a path.\n :return: If this is a valid script name or not.\n \"\"\"\n if not isinstance(name, str):\n return False\n if re.fullmatch(RE_SCRIPT_NAME, name):\n return True\n return False\n\n\ndef validate_uuid(possible_uuid: str) -> bool:\n \"\"\"\n Validate if the handed string is a valid UUIDv4.\n\n :param possible_uuid: The str with the UUID.\n :return: True in case it is one, False otherwise.\n \"\"\"\n if not isinstance(possible_uuid, str):\n return False\n # Taken from: https://stackoverflow.com/a/33245493/4730773\n try:\n uuid_obj = UUID(possible_uuid, version=4)\n except ValueError:\n return False\n return str(uuid_obj) == possible_uuid\n\n\ndef validate_obj_type(object_type: str) -> bool:\n \"\"\"\n\n :param object_type:\n :return:\n \"\"\"\n if not isinstance(object_type, str):\n return False\n return object_type in [\"distro\", \"profile\", \"system\", \"repo\", \"image\", \"mgmtclass\", \"package\", \"file\", \"menu\"]\n\n\ndef validate_obj_name(object_name: str) -> bool:\n \"\"\"\n\n :param object_name:\n :return:\n \"\"\"\n if not isinstance(object_name, str):\n return False\n return bool(re.fullmatch(RE_OBJECT_NAME, object_name))\n\n\ndef validate_obj_id(object_id: str) -> bool:\n \"\"\"\n\n :param object_id:\n :return: True in case it is one, False otherwise.\n \"\"\"\n if not isinstance(object_id, str):\n return False\n if object_id.startswith(\"___NEW___\"):\n object_id = object_id[9:]\n (otype, oname) = object_id.split(\"::\", 1)\n return validate_obj_type(otype) and validate_obj_name(oname)\n", "path": "cobbler/validate.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2014-2015. Jorgen Maas <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\nfrom typing import Union\nfrom uuid import UUID\n\nimport netaddr\nimport re\nimport shlex\n\nfrom cobbler.cexceptions import CX\n\nRE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\\-.:]*$')\nRE_HOSTNAME = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$')\nRE_SCRIPT_NAME = re.compile(r\"[a-zA-Z0-9_\\-.]+\")\nRE_INFINIBAND_MAC = re.compile(\n \"^\" + \":\".join([\"([0-9A-F]{1,2})\"] * 20) + \"$\", re.IGNORECASE\n)\n\nREPO_BREEDS = [\"rsync\", \"rhn\", \"yum\", \"apt\", \"wget\"]\n\nVIRT_TYPES = [\"<<inherit>>\", \"xenpv\", \"xenfv\", \"qemu\", \"kvm\", \"vmware\", \"openvz\"]\nVIRT_DISK_DRIVERS = [\"<<inherit>>\", \"raw\", \"qcow2\", \"qed\", \"vdi\", \"vmdk\"]\n\n# blacklist invalid values to the repo statement in autoinsts\nAUTOINSTALL_REPO_BLACKLIST = ['enabled', 'gpgcheck', 'gpgkey']\n\n\ndef object_name(name: str, parent: str) -> str:\n \"\"\"\n Validate the object name.\n\n :param name: object name\n :param parent: Parent object name\n :returns: name or CX\n \"\"\"\n if not isinstance(name, str) or not isinstance(parent, str):\n raise CX(\"Invalid input, name and parent must be strings\")\n else:\n name = name.strip()\n parent = parent.strip()\n\n if name != \"\" and parent != \"\" and name == parent:\n raise CX(\"Self parentage is not allowed\")\n\n if not RE_OBJECT_NAME.match(name):\n raise CX(\"Invalid characters in name: '%s'\" % name)\n\n return name\n\n\ndef hostname(dnsname: str) -> str:\n \"\"\"\n Validate the DNS name.\n\n :param dnsname: Hostname or FQDN\n :returns: dnsname\n :raises CX: If the Hostname/FQDN is not a string or in an invalid format.\n \"\"\"\n if not isinstance(dnsname, str):\n raise CX(\"Invalid input, dnsname must be a string\")\n else:\n dnsname = dnsname.strip()\n\n if dnsname == \"\":\n # hostname is not required\n return dnsname\n\n if not RE_HOSTNAME.match(dnsname):\n raise CX(\"Invalid hostname format (%s)\" % dnsname)\n\n return dnsname\n\n\ndef mac_address(mac: str, for_item=True) -> str:\n \"\"\"\n Validate as an Eternet MAC address.\n\n :param mac: MAC address\n :param for_item: If the check should be performed for an item or not.\n :returns: str mac or CX\n \"\"\"\n if not isinstance(mac, str):\n raise CX(\"Invalid input, mac must be a string\")\n else:\n mac = mac.lower().strip()\n\n if for_item is True:\n # this value has special meaning for items\n if mac == \"random\":\n return mac\n\n # copying system collection will set mac to \"\"\n # netaddr will fail to validate this mac and throw an exception\n if mac == \"\":\n return mac\n\n if not netaddr.valid_mac(mac) and RE_INFINIBAND_MAC.match(mac) is None:\n raise CX(\"Invalid mac address format (%s)\" % mac)\n\n return mac\n\n\ndef ipv4_address(addr: str) -> str:\n \"\"\"\n Validate an IPv4 address.\n\n :param addr: IPv4 address\n :returns: str addr or CX\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv4(addr):\n raise CX(\"Invalid IPv4 address format (%s)\" % addr)\n\n if netaddr.IPAddress(addr).is_netmask():\n raise CX(\"Invalid IPv4 host address (%s)\" % addr)\n\n return addr\n\n\ndef ipv4_netmask(addr: str) -> str:\n \"\"\"\n Validate an IPv4 netmask.\n\n :param addr: IPv4 netmask\n :returns: str addr or CX\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv4(addr):\n raise CX(\"Invalid IPv4 address format (%s)\" % addr)\n\n if not netaddr.IPAddress(addr).is_netmask():\n raise CX(\"Invalid IPv4 netmask (%s)\" % addr)\n\n return addr\n\n\ndef ipv6_address(addr: str) -> str:\n \"\"\"\n Validate an IPv6 address.\n\n :param addr: IPv6 address\n :returns: The IPv6 address.\n \"\"\"\n if not isinstance(addr, str):\n raise CX(\"Invalid input, addr must be a string\")\n else:\n addr = addr.strip()\n\n if addr == \"\":\n return addr\n\n if not netaddr.valid_ipv6(addr):\n raise CX(\"Invalid IPv6 address format (%s)\" % addr)\n\n return addr\n\n\ndef name_servers(nameservers: Union[str, list], for_item: bool = True) -> Union[str, list]:\n \"\"\"\n Validate nameservers IP addresses, works for IPv4 and IPv6\n\n :param nameservers: string or list of nameserver addresses\n :param for_item: enable/disable special handling for Item objects\n :return: The list of valid nameservers.\n \"\"\"\n if isinstance(nameservers, str):\n nameservers = nameservers.strip()\n if for_item is True:\n # special handling for Items\n if nameservers in [\"<<inherit>>\", \"\"]:\n return nameservers\n\n # convert string to a list; do the real validation\n # in the isinstance(list) code block below\n nameservers = shlex.split(nameservers)\n\n if isinstance(nameservers, list):\n for ns in nameservers:\n ip_version = netaddr.IPAddress(ns).version\n if ip_version == 4:\n ipv4_address(ns)\n elif ip_version == 6:\n ipv6_address(ns)\n else:\n raise CX(\"Invalid IP address format\")\n else:\n raise CX(\"Invalid input type %s, expected str or list\" % type(nameservers))\n\n return nameservers\n\n\ndef name_servers_search(search: Union[str, list], for_item: bool = True) -> Union[str, list]:\n \"\"\"\n Validate nameservers search domains.\n\n :param search: One or more search domains to validate.\n :param for_item: (enable/disable special handling for Item objects)\n :return: The list of valid nameservers.\n \"\"\"\n if isinstance(search, str):\n search = search.strip()\n if for_item is True:\n # special handling for Items\n if search in [\"<<inherit>>\", \"\"]:\n return search\n\n # convert string to a list; do the real validation\n # in the isinstance(list) code block below\n search = shlex.split(search)\n\n if isinstance(search, list):\n for sl in search:\n hostname(sl)\n else:\n raise CX(\"Invalid input type %s, expected str or list\" % type(search))\n\n return search\n\n\ndef validate_autoinstall_script_name(name: str) -> bool:\n \"\"\"\n This validates if the name given for the script is valid in the context of the API call made. It will be handed to\n tftpgen.py#generate_script in the end.\n\n :param name: The name of the script. Will end up being a filename. May have an extension but should never be a path.\n :return: If this is a valid script name or not.\n \"\"\"\n if not isinstance(name, str):\n return False\n if re.fullmatch(RE_SCRIPT_NAME, name):\n return True\n return False\n\n\ndef validate_uuid(possible_uuid: str) -> bool:\n \"\"\"\n Validate if the handed string is a valid UUIDv4.\n\n :param possible_uuid: The str with the UUID.\n :return: True in case it is one, False otherwise.\n \"\"\"\n if not isinstance(possible_uuid, str):\n return False\n # Taken from: https://stackoverflow.com/a/33245493/4730773\n try:\n uuid_obj = UUID(possible_uuid, version=4)\n except ValueError:\n return False\n return str(uuid_obj) == possible_uuid\n\n\ndef validate_obj_type(object_type: str) -> bool:\n \"\"\"\n\n :param object_type:\n :return:\n \"\"\"\n if not isinstance(object_type, str):\n return False\n return object_type in [\"distro\", \"profile\", \"system\", \"repo\", \"image\", \"mgmtclass\", \"package\", \"file\", \"menu\"]\n\n\ndef validate_obj_name(object_name: str) -> bool:\n \"\"\"\n\n :param object_name:\n :return:\n \"\"\"\n if not isinstance(object_name, str):\n return False\n return bool(re.fullmatch(RE_OBJECT_NAME, object_name))\n\n\ndef validate_obj_id(object_id: str) -> bool:\n \"\"\"\n\n :param object_id:\n :return: True in case it is one, False otherwise.\n \"\"\"\n if not isinstance(object_id, str):\n return False\n if object_id.startswith(\"___NEW___\"):\n object_id = object_id[9:]\n (otype, oname) = object_id.split(\"::\", 1)\n return validate_obj_type(otype) and validate_obj_name(oname)\n", "path": "cobbler/validate.py"}]}
4,016
314
gh_patches_debug_39779
rasdani/github-patches
git_diff
sunpy__sunpy-4477
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Normalise images before handing them to skimage.transform.warp From @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683: > #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior. > I suggest that the normalization code be re-inserted. I believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/image/transform.py` Content: ``` 1 """ 2 Functions for geometrical image transformation and warping. 3 """ 4 import numbers 5 import warnings 6 7 import numpy as np 8 import scipy.ndimage.interpolation 9 10 from sunpy.util.exceptions import SunpyUserWarning 11 12 __all__ = ['affine_transform'] 13 14 15 def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None, 16 recenter=False, missing=0.0, use_scipy=False): 17 """ 18 Rotates, shifts and scales an image. 19 20 Will use `skimage.transform.warp` unless scikit-image can't be imported 21 then it will use`scipy.ndimage.affine_transform`. 22 23 Parameters 24 ---------- 25 image : `numpy.ndarray` 26 2D image to be rotated. 27 rmatrix : `numpy.ndarray` that is 2x2 28 Linear transformation rotation matrix. 29 order : `int` 0-5, optional 30 Interpolation order to be used, defaults to 3. When using scikit-image this parameter 31 is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation). 32 When using scipy it is passed into 33 `scipy.ndimage.affine_transform` where it controls the order of the spline. 34 scale : `float` 35 A scale factor for the image with the default being no scaling. 36 image_center : tuple, optional 37 The point in the image to rotate around (axis of rotation). 38 Defaults to the center of the array. 39 recenter : `bool` or array-like, optional 40 Move the axis of rotation to the center of the array or recenter coords. 41 Defaults to `True` i.e., recenter to the center of the array. 42 missing : `float`, optional 43 The value to replace any missing data after the transformation. 44 use_scipy : `bool`, optional 45 Force use of `scipy.ndimage.affine_transform`. 46 Will set all "NaNs" in image to zero before doing the transform. 47 Defaults to `False`, unless scikit-image can't be imported. 48 49 Returns 50 ------- 51 `numpy.ndarray`: 52 New rotated, scaled and translated image. 53 54 Notes 55 ----- 56 This algorithm uses an affine transformation as opposed to a polynomial 57 geometrical transformation, which by default is `skimage.transform.warp`. 58 One can specify using `scipy.ndimage.affine_transform` as 59 an alternative affine transformation. The two transformations use different 60 algorithms and thus do not give identical output. 61 62 When using for `skimage.transform.warp` with order >= 4 or using 63 `scipy.ndimage.affine_transform` at all, "NaN" values will 64 replaced with zero prior to rotation. No attempt is made to retain the NaN 65 values. 66 67 Input arrays with integer data are cast to float 64 and can be re-cast using 68 `numpy.ndarray.astype` if desired. 69 70 Although this function is analogous to the IDL's ``rot`` function, it does not 71 use the same algorithm as the IDL ``rot`` function. 72 IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__ 73 method to calculate the inverse mapping of original to target pixel 74 coordinates. This is a polynomial geometrical transformation. 75 Then optionally it uses a bicubic convolution interpolation 76 algorithm to map the original to target pixel values. 77 """ 78 rmatrix = rmatrix / scale 79 array_center = (np.array(image.shape)[::-1] - 1) / 2.0 80 81 # Make sure the image center is an array and is where it's supposed to be 82 if image_center is not None: 83 image_center = np.asanyarray(image_center) 84 else: 85 image_center = array_center 86 87 # Determine center of rotation based on use (or not) of the recenter keyword 88 if recenter: 89 rot_center = array_center 90 else: 91 rot_center = image_center 92 93 displacement = np.dot(rmatrix, rot_center) 94 shift = image_center - displacement 95 if not use_scipy: 96 try: 97 import skimage.transform 98 except ImportError: 99 warnings.warn("scikit-image could not be imported. Image rotation will use scipy", 100 ImportWarning) 101 use_scipy = True 102 if use_scipy: 103 if np.any(np.isnan(image)): 104 warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning) 105 # Transform the image using the scipy affine transform 106 rotated_image = scipy.ndimage.interpolation.affine_transform( 107 np.nan_to_num(image).T, rmatrix, offset=shift, order=order, 108 mode='constant', cval=missing).T 109 else: 110 # Make the rotation matrix 3x3 to include translation of the image 111 skmatrix = np.zeros((3, 3)) 112 skmatrix[:2, :2] = rmatrix 113 skmatrix[2, 2] = 1.0 114 skmatrix[:2, 2] = shift 115 tform = skimage.transform.AffineTransform(skmatrix) 116 117 if issubclass(image.dtype.type, numbers.Integral): 118 warnings.warn("Integer input data has been cast to float64, " 119 "which is required for the skikit-image transform.", 120 SunpyUserWarning) 121 adjusted_image = image.astype(np.float64) 122 else: 123 adjusted_image = image.copy() 124 if np.any(np.isnan(adjusted_image)) and order >= 4: 125 warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.", 126 SunpyUserWarning) 127 adjusted_image = np.nan_to_num(adjusted_image) 128 129 rotated_image = skimage.transform.warp(adjusted_image, tform, order=order, 130 mode='constant', cval=missing, 131 preserve_range=True) 132 133 return rotated_image 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py --- a/sunpy/image/transform.py +++ b/sunpy/image/transform.py @@ -59,14 +59,16 @@ an alternative affine transformation. The two transformations use different algorithms and thus do not give identical output. - When using for `skimage.transform.warp` with order >= 4 or using - `scipy.ndimage.affine_transform` at all, "NaN" values will - replaced with zero prior to rotation. No attempt is made to retain the NaN - values. + When using `skimage.transform.warp` with order >= 4 or using + `scipy.ndimage.affine_transform` at all, "NaN" values will be replaced with + zero prior to rotation. No attempt is made to retain the "NaN" values. Input arrays with integer data are cast to float 64 and can be re-cast using `numpy.ndarray.astype` if desired. + In the case of `skimage.transform.warp`, the image is normalized to [0, 1] + before passing it to the function. It is later rescaled back to the original range. + Although this function is analogous to the IDL's ``rot`` function, it does not use the same algorithm as the IDL ``rot`` function. IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__ @@ -115,8 +117,7 @@ tform = skimage.transform.AffineTransform(skmatrix) if issubclass(image.dtype.type, numbers.Integral): - warnings.warn("Integer input data has been cast to float64, " - "which is required for the skikit-image transform.", + warnings.warn("Integer input data has been cast to float64.", SunpyUserWarning) adjusted_image = image.astype(np.float64) else: @@ -126,8 +127,28 @@ SunpyUserWarning) adjusted_image = np.nan_to_num(adjusted_image) + # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs) + is_nan_image = np.all(np.isnan(adjusted_image)) + if is_nan_image: + adjusted_missing = missing + else: + im_min = np.nanmin(adjusted_image) + adjusted_image -= im_min + im_max = np.nanmax(adjusted_image) + if im_max > 0: + adjusted_image /= im_max + adjusted_missing = (missing - im_min) / im_max + else: + # The input array is all one value (aside from NaNs), so no scaling is needed + adjusted_missing = missing - im_min + rotated_image = skimage.transform.warp(adjusted_image, tform, order=order, - mode='constant', cval=missing, - preserve_range=True) + mode='constant', cval=adjusted_missing) + + # Convert the image back to its original range if it is valid + if not is_nan_image: + if im_max > 0: + rotated_image *= im_max + rotated_image += im_min return rotated_image
{"golden_diff": "diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py\n--- a/sunpy/image/transform.py\n+++ b/sunpy/image/transform.py\n@@ -59,14 +59,16 @@\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n \n- When using for `skimage.transform.warp` with order >= 4 or using\n- `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n- replaced with zero prior to rotation. No attempt is made to retain the NaN\n- values.\n+ When using `skimage.transform.warp` with order >= 4 or using\n+ `scipy.ndimage.affine_transform` at all, \"NaN\" values will be replaced with\n+ zero prior to rotation. No attempt is made to retain the \"NaN\" values.\n \n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n \n+ In the case of `skimage.transform.warp`, the image is normalized to [0, 1]\n+ before passing it to the function. It is later rescaled back to the original range.\n+\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n@@ -115,8 +117,7 @@\n tform = skimage.transform.AffineTransform(skmatrix)\n \n if issubclass(image.dtype.type, numbers.Integral):\n- warnings.warn(\"Integer input data has been cast to float64, \"\n- \"which is required for the skikit-image transform.\",\n+ warnings.warn(\"Integer input data has been cast to float64.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n@@ -126,8 +127,28 @@\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n \n+ # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)\n+ is_nan_image = np.all(np.isnan(adjusted_image))\n+ if is_nan_image:\n+ adjusted_missing = missing\n+ else:\n+ im_min = np.nanmin(adjusted_image)\n+ adjusted_image -= im_min\n+ im_max = np.nanmax(adjusted_image)\n+ if im_max > 0:\n+ adjusted_image /= im_max\n+ adjusted_missing = (missing - im_min) / im_max\n+ else:\n+ # The input array is all one value (aside from NaNs), so no scaling is needed\n+ adjusted_missing = missing - im_min\n+\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n- mode='constant', cval=missing,\n- preserve_range=True)\n+ mode='constant', cval=adjusted_missing)\n+\n+ # Convert the image back to its original range if it is valid\n+ if not is_nan_image:\n+ if im_max > 0:\n+ rotated_image *= im_max\n+ rotated_image += im_min\n \n return rotated_image\n", "issue": "Normalise images before handing them to skimage.transform.warp\nFrom @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683:\r\n\r\n> #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior.\r\n\r\n> I suggest that the normalization code be re-inserted.\r\n\r\nI believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform.\n", "before_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using for `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n replaced with zero prior to rotation. No attempt is made to retain the NaN\n values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n if issubclass(image.dtype.type, numbers.Integral):\n warnings.warn(\"Integer input data has been cast to float64, \"\n \"which is required for the skikit-image transform.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\",\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=missing,\n preserve_range=True)\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}], "after_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.affine_transform` at all, \"NaN\" values will be replaced with\n zero prior to rotation. No attempt is made to retain the \"NaN\" values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n In the case of `skimage.transform.warp`, the image is normalized to [0, 1]\n before passing it to the function. It is later rescaled back to the original range.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n if issubclass(image.dtype.type, numbers.Integral):\n warnings.warn(\"Integer input data has been cast to float64.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\",\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)\n is_nan_image = np.all(np.isnan(adjusted_image))\n if is_nan_image:\n adjusted_missing = missing\n else:\n im_min = np.nanmin(adjusted_image)\n adjusted_image -= im_min\n im_max = np.nanmax(adjusted_image)\n if im_max > 0:\n adjusted_image /= im_max\n adjusted_missing = (missing - im_min) / im_max\n else:\n # The input array is all one value (aside from NaNs), so no scaling is needed\n adjusted_missing = missing - im_min\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=adjusted_missing)\n\n # Convert the image back to its original range if it is valid\n if not is_nan_image:\n if im_max > 0:\n rotated_image *= im_max\n rotated_image += im_min\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}]}
2,022
749
gh_patches_debug_3610
rasdani/github-patches
git_diff
google__clusterfuzz-1961
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- js_minimizer is not callable The following run function of JSMinimizer class is not callable as `js_minimizer` is an object and not a function, thus it should not be callable. ``` js_tokenizer = AntlrTokenizer(JavaScriptLexer) js_minimizer = JSMinimizer( utils.test, max_threads=thread_count, tokenizer=js_tokenizer.tokenize, token_combiner=js_tokenizer.combine, file_extension=file_extension) result = line_minimizer.minimize(data) result = js_minimizer(result) result = js_minimizer(result) result = line_minimizer.minimize(result) ``` Location in codebase [here](https://github.com/google/clusterfuzz/blob/master/src/python/bot/minimizer/js_minimizer.py#L187). Also, does js_minimizer should be called two times? I guess `js_minimizer(result)` should be replaced with `js_minimizer.minimize(data)`, but I am not really sure. Can someone please confirm if it is a bug or not? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/bot/minimizer/js_minimizer.py` Content: ``` 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Minimizer used for additional reduction on javascript test cases.""" 15 from __future__ import absolute_import 16 17 from builtins import range 18 19 from . import delta_minimizer 20 from . import errors 21 from . import minimizer 22 from . import utils 23 24 from bot.tokenizer.antlr_tokenizer import AntlrTokenizer 25 from bot.tokenizer.grammars.JavaScriptLexer import JavaScriptLexer 26 27 28 def step_back_while(cur_index, condition): 29 """Helper function. Decreases index from cur while condition is satisfied.""" 30 while cur_index >= 0 and condition(cur_index): 31 cur_index -= 1 32 return cur_index 33 34 35 class JSMinimizer(minimizer.Minimizer): 36 """Intended as a second-pass minimizer to remove unneeded tokens from JS.""" 37 38 def _execute(self, data): 39 testcase = minimizer.Testcase(data, self) 40 if not self.validate_tokenizer(data, testcase): 41 raise errors.TokenizationFailureError('JS Minimizer') 42 43 brace_stack = [] 44 paren_stack = [] 45 46 for index, token in enumerate(testcase.tokens): 47 if token == '{': 48 brace_stack.append(index) 49 50 elif token == '}' and brace_stack: 51 52 # Two hypotheses for tokens grouped by curly braces: 53 # 1) Remove from start of line to open brace and the closing brace. 54 # e.g.: if (statement_that_evaluates_to_true) { crash() } -> crash() 55 open_brace_index = brace_stack.pop() 56 57 # Find the first non-empty token prior to the starting brackets. 58 token_before_bracket = step_back_while( 59 open_brace_index - 1, (lambda x: not testcase.tokens[x].strip())) 60 61 # If that token is a close paren, we need to grab everything else too. 62 # Do this to grab the whole paren so we don't create a syntax error by 63 # removing only part of a paren. 64 if testcase.tokens[token_before_bracket] == ')': 65 # Find everything in the paren. 66 token_before_bracket = step_back_while( 67 token_before_bracket, (lambda x: testcase.tokens[x] != '(')) 68 69 # Get the token before the paren. 70 token_before_bracket -= 1 71 token_before_bracket = step_back_while( 72 token_before_bracket, (lambda x: not testcase.tokens[x].strip())) 73 74 # Walk back to the start of that line as well to get if/else and funcs. 75 # Do this after paren to manage situations where there are newlines in 76 # the parens. 77 token_before_bracket = step_back_while( 78 token_before_bracket, (lambda x: testcase.tokens[x] != '\n')) 79 80 token_before_bracket += 1 81 82 hypothesis = list(range(token_before_bracket, 83 open_brace_index + 1)) + [index] 84 85 testcase.prepare_test(hypothesis) 86 87 # 2) Remove previous tokens and from the closing brace to the next one. 88 # e.g.: try { crash() } catch(e) {} -> crash(). 89 future_index = len(testcase.tokens) 90 open_count = 0 91 for future_index in range(index + 1, len(testcase.tokens)): 92 if testcase.tokens[future_index] == '{': 93 open_count += 1 94 if testcase.tokens[future_index] == '}': 95 open_count -= 1 96 # Make sure to grab entire outer brace if there are inner braces. 97 if not open_count: 98 break 99 if future_index != len(testcase.tokens): 100 lookahead_hypothesis = hypothesis + list( 101 range(index + 1, future_index + 1)) 102 103 testcase.prepare_test(lookahead_hypothesis) 104 105 elif token == '(': 106 paren_stack.append(index) 107 108 elif token == ')' and paren_stack: 109 # Three hypotheses for tokens grouped by parentheses: 110 # 1) Remove the parentheses and the previous token. 111 # e.g.: assertTrue(crash()); -> crash() 112 previous_end = paren_stack.pop() 113 if previous_end > 0: 114 hypothesis = [previous_end - 1, previous_end, index] 115 testcase.prepare_test(hypothesis) 116 117 # 2) Remove everything between the parentheses. 118 # e.g. crash(junk, more_junk) -> crash() 119 if index - previous_end > 1: 120 hypothesis = list(range(previous_end + 1, index)) 121 testcase.prepare_test(hypothesis) 122 123 # 3) Like 1, but to start of line instead of previous token. 124 # e.g.: leftover_junk = (function() { 125 # }); 126 127 # Find the beginning of the line 128 token_before_paren = previous_end 129 token_before_paren = step_back_while( 130 previous_end, (lambda x: testcase.tokens[x] != '\n')) 131 token_before_paren += 1 132 133 hypothesis = list(range(token_before_paren, previous_end + 1)) + [index] 134 testcase.prepare_test(hypothesis) 135 136 # 4) Like 3, but also from the closing brace to the next one. 137 # e.g.: (function(global) { })(this); 138 future_index = len(testcase.tokens) 139 for future_index in range(index + 1, len(testcase.tokens)): 140 if testcase.tokens[future_index] == ')': 141 break 142 if future_index != len(testcase.tokens): 143 lookahead_hypothesis = list( 144 range(token_before_paren, future_index + 1)) 145 testcase.prepare_test(lookahead_hypothesis) 146 147 elif token == ',': 148 # Two hypotheses for commas: 149 # 1) Remove comma and left-hand-side. 150 # e.g.: f(whatever, crash()) -> f(crash()) 151 if index > 0: 152 hypothesis = [index - 1, index] 153 testcase.prepare_test(hypothesis) 154 155 # 2) Remove comma and right-hand-side. 156 # e.g.: f(crash(), whatever) -> f(crash()) 157 158 # Find the next non whitespace token after the comma. 159 hypothesis = [index] 160 for right_token_index in range(index + 1, len(testcase.tokens)): 161 hypothesis.append(right_token_index) 162 if testcase.tokens[right_token_index].strip(): 163 testcase.prepare_test(hypothesis) 164 break 165 166 testcase.process() 167 return testcase 168 169 @staticmethod 170 def run(data, 171 thread_count=minimizer.DEFAULT_THREAD_COUNT, 172 file_extension='.js'): 173 """Attempt to minimize a javascript test case.""" 174 line_minimizer = delta_minimizer.DeltaMinimizer( 175 utils.test, max_threads=thread_count, file_extension=file_extension) 176 177 js_tokenizer = AntlrTokenizer(JavaScriptLexer) 178 179 js_minimizer = JSMinimizer( 180 utils.test, 181 max_threads=thread_count, 182 tokenizer=js_tokenizer.tokenize, 183 token_combiner=js_tokenizer.combine, 184 file_extension=file_extension) 185 186 result = line_minimizer.minimize(data) 187 result = js_minimizer(result) 188 result = js_minimizer(result) 189 result = line_minimizer.minimize(result) 190 191 return result 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/bot/minimizer/js_minimizer.py b/src/python/bot/minimizer/js_minimizer.py --- a/src/python/bot/minimizer/js_minimizer.py +++ b/src/python/bot/minimizer/js_minimizer.py @@ -184,8 +184,8 @@ file_extension=file_extension) result = line_minimizer.minimize(data) - result = js_minimizer(result) - result = js_minimizer(result) + result = js_minimizer.minimize(result) + result = js_minimizer.minimize(result) result = line_minimizer.minimize(result) return result
{"golden_diff": "diff --git a/src/python/bot/minimizer/js_minimizer.py b/src/python/bot/minimizer/js_minimizer.py\n--- a/src/python/bot/minimizer/js_minimizer.py\n+++ b/src/python/bot/minimizer/js_minimizer.py\n@@ -184,8 +184,8 @@\n file_extension=file_extension)\n \n result = line_minimizer.minimize(data)\n- result = js_minimizer(result)\n- result = js_minimizer(result)\n+ result = js_minimizer.minimize(result)\n+ result = js_minimizer.minimize(result)\n result = line_minimizer.minimize(result)\n \n return result\n", "issue": "js_minimizer is not callable\nThe following run function of JSMinimizer class is not callable as `js_minimizer` is an object and not a function, thus it should not be callable.\r\n\r\n```\r\n js_tokenizer = AntlrTokenizer(JavaScriptLexer)\r\n\r\n js_minimizer = JSMinimizer(\r\n utils.test,\r\n max_threads=thread_count,\r\n tokenizer=js_tokenizer.tokenize,\r\n token_combiner=js_tokenizer.combine,\r\n file_extension=file_extension)\r\n\r\n result = line_minimizer.minimize(data)\r\n result = js_minimizer(result)\r\n result = js_minimizer(result)\r\n result = line_minimizer.minimize(result)\r\n```\r\n\r\nLocation in codebase [here](https://github.com/google/clusterfuzz/blob/master/src/python/bot/minimizer/js_minimizer.py#L187).\r\n\r\nAlso, does js_minimizer should be called two times?\r\nI guess `js_minimizer(result)` should be replaced with `js_minimizer.minimize(data)`, but I am not really sure. Can someone please confirm if it is a bug or not?\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Minimizer used for additional reduction on javascript test cases.\"\"\"\nfrom __future__ import absolute_import\n\nfrom builtins import range\n\nfrom . import delta_minimizer\nfrom . import errors\nfrom . import minimizer\nfrom . import utils\n\nfrom bot.tokenizer.antlr_tokenizer import AntlrTokenizer\nfrom bot.tokenizer.grammars.JavaScriptLexer import JavaScriptLexer\n\n\ndef step_back_while(cur_index, condition):\n \"\"\"Helper function. Decreases index from cur while condition is satisfied.\"\"\"\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index\n\n\nclass JSMinimizer(minimizer.Minimizer):\n \"\"\"Intended as a second-pass minimizer to remove unneeded tokens from JS.\"\"\"\n\n def _execute(self, data):\n testcase = minimizer.Testcase(data, self)\n if not self.validate_tokenizer(data, testcase):\n raise errors.TokenizationFailureError('JS Minimizer')\n\n brace_stack = []\n paren_stack = []\n\n for index, token in enumerate(testcase.tokens):\n if token == '{':\n brace_stack.append(index)\n\n elif token == '}' and brace_stack:\n\n # Two hypotheses for tokens grouped by curly braces:\n # 1) Remove from start of line to open brace and the closing brace.\n # e.g.: if (statement_that_evaluates_to_true) { crash() } -> crash()\n open_brace_index = brace_stack.pop()\n\n # Find the first non-empty token prior to the starting brackets.\n token_before_bracket = step_back_while(\n open_brace_index - 1, (lambda x: not testcase.tokens[x].strip()))\n\n # If that token is a close paren, we need to grab everything else too.\n # Do this to grab the whole paren so we don't create a syntax error by\n # removing only part of a paren.\n if testcase.tokens[token_before_bracket] == ')':\n # Find everything in the paren.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '('))\n\n # Get the token before the paren.\n token_before_bracket -= 1\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: not testcase.tokens[x].strip()))\n\n # Walk back to the start of that line as well to get if/else and funcs.\n # Do this after paren to manage situations where there are newlines in\n # the parens.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '\\n'))\n\n token_before_bracket += 1\n\n hypothesis = list(range(token_before_bracket,\n open_brace_index + 1)) + [index]\n\n testcase.prepare_test(hypothesis)\n\n # 2) Remove previous tokens and from the closing brace to the next one.\n # e.g.: try { crash() } catch(e) {} -> crash().\n future_index = len(testcase.tokens)\n open_count = 0\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == '{':\n open_count += 1\n if testcase.tokens[future_index] == '}':\n open_count -= 1\n # Make sure to grab entire outer brace if there are inner braces.\n if not open_count:\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = hypothesis + list(\n range(index + 1, future_index + 1))\n\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == '(':\n paren_stack.append(index)\n\n elif token == ')' and paren_stack:\n # Three hypotheses for tokens grouped by parentheses:\n # 1) Remove the parentheses and the previous token.\n # e.g.: assertTrue(crash()); -> crash()\n previous_end = paren_stack.pop()\n if previous_end > 0:\n hypothesis = [previous_end - 1, previous_end, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove everything between the parentheses.\n # e.g. crash(junk, more_junk) -> crash()\n if index - previous_end > 1:\n hypothesis = list(range(previous_end + 1, index))\n testcase.prepare_test(hypothesis)\n\n # 3) Like 1, but to start of line instead of previous token.\n # e.g.: leftover_junk = (function() {\n # });\n\n # Find the beginning of the line\n token_before_paren = previous_end\n token_before_paren = step_back_while(\n previous_end, (lambda x: testcase.tokens[x] != '\\n'))\n token_before_paren += 1\n\n hypothesis = list(range(token_before_paren, previous_end + 1)) + [index]\n testcase.prepare_test(hypothesis)\n\n # 4) Like 3, but also from the closing brace to the next one.\n # e.g.: (function(global) { })(this);\n future_index = len(testcase.tokens)\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == ')':\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = list(\n range(token_before_paren, future_index + 1))\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == ',':\n # Two hypotheses for commas:\n # 1) Remove comma and left-hand-side.\n # e.g.: f(whatever, crash()) -> f(crash())\n if index > 0:\n hypothesis = [index - 1, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove comma and right-hand-side.\n # e.g.: f(crash(), whatever) -> f(crash())\n\n # Find the next non whitespace token after the comma.\n hypothesis = [index]\n for right_token_index in range(index + 1, len(testcase.tokens)):\n hypothesis.append(right_token_index)\n if testcase.tokens[right_token_index].strip():\n testcase.prepare_test(hypothesis)\n break\n\n testcase.process()\n return testcase\n\n @staticmethod\n def run(data,\n thread_count=minimizer.DEFAULT_THREAD_COUNT,\n file_extension='.js'):\n \"\"\"Attempt to minimize a javascript test case.\"\"\"\n line_minimizer = delta_minimizer.DeltaMinimizer(\n utils.test, max_threads=thread_count, file_extension=file_extension)\n\n js_tokenizer = AntlrTokenizer(JavaScriptLexer)\n\n js_minimizer = JSMinimizer(\n utils.test,\n max_threads=thread_count,\n tokenizer=js_tokenizer.tokenize,\n token_combiner=js_tokenizer.combine,\n file_extension=file_extension)\n\n result = line_minimizer.minimize(data)\n result = js_minimizer(result)\n result = js_minimizer(result)\n result = line_minimizer.minimize(result)\n\n return result\n", "path": "src/python/bot/minimizer/js_minimizer.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Minimizer used for additional reduction on javascript test cases.\"\"\"\nfrom __future__ import absolute_import\n\nfrom builtins import range\n\nfrom . import delta_minimizer\nfrom . import errors\nfrom . import minimizer\nfrom . import utils\n\nfrom bot.tokenizer.antlr_tokenizer import AntlrTokenizer\nfrom bot.tokenizer.grammars.JavaScriptLexer import JavaScriptLexer\n\n\ndef step_back_while(cur_index, condition):\n \"\"\"Helper function. Decreases index from cur while condition is satisfied.\"\"\"\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index\n\n\nclass JSMinimizer(minimizer.Minimizer):\n \"\"\"Intended as a second-pass minimizer to remove unneeded tokens from JS.\"\"\"\n\n def _execute(self, data):\n testcase = minimizer.Testcase(data, self)\n if not self.validate_tokenizer(data, testcase):\n raise errors.TokenizationFailureError('JS Minimizer')\n\n brace_stack = []\n paren_stack = []\n\n for index, token in enumerate(testcase.tokens):\n if token == '{':\n brace_stack.append(index)\n\n elif token == '}' and brace_stack:\n\n # Two hypotheses for tokens grouped by curly braces:\n # 1) Remove from start of line to open brace and the closing brace.\n # e.g.: if (statement_that_evaluates_to_true) { crash() } -> crash()\n open_brace_index = brace_stack.pop()\n\n # Find the first non-empty token prior to the starting brackets.\n token_before_bracket = step_back_while(\n open_brace_index - 1, (lambda x: not testcase.tokens[x].strip()))\n\n # If that token is a close paren, we need to grab everything else too.\n # Do this to grab the whole paren so we don't create a syntax error by\n # removing only part of a paren.\n if testcase.tokens[token_before_bracket] == ')':\n # Find everything in the paren.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '('))\n\n # Get the token before the paren.\n token_before_bracket -= 1\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: not testcase.tokens[x].strip()))\n\n # Walk back to the start of that line as well to get if/else and funcs.\n # Do this after paren to manage situations where there are newlines in\n # the parens.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '\\n'))\n\n token_before_bracket += 1\n\n hypothesis = list(range(token_before_bracket,\n open_brace_index + 1)) + [index]\n\n testcase.prepare_test(hypothesis)\n\n # 2) Remove previous tokens and from the closing brace to the next one.\n # e.g.: try { crash() } catch(e) {} -> crash().\n future_index = len(testcase.tokens)\n open_count = 0\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == '{':\n open_count += 1\n if testcase.tokens[future_index] == '}':\n open_count -= 1\n # Make sure to grab entire outer brace if there are inner braces.\n if not open_count:\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = hypothesis + list(\n range(index + 1, future_index + 1))\n\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == '(':\n paren_stack.append(index)\n\n elif token == ')' and paren_stack:\n # Three hypotheses for tokens grouped by parentheses:\n # 1) Remove the parentheses and the previous token.\n # e.g.: assertTrue(crash()); -> crash()\n previous_end = paren_stack.pop()\n if previous_end > 0:\n hypothesis = [previous_end - 1, previous_end, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove everything between the parentheses.\n # e.g. crash(junk, more_junk) -> crash()\n if index - previous_end > 1:\n hypothesis = list(range(previous_end + 1, index))\n testcase.prepare_test(hypothesis)\n\n # 3) Like 1, but to start of line instead of previous token.\n # e.g.: leftover_junk = (function() {\n # });\n\n # Find the beginning of the line\n token_before_paren = previous_end\n token_before_paren = step_back_while(\n previous_end, (lambda x: testcase.tokens[x] != '\\n'))\n token_before_paren += 1\n\n hypothesis = list(range(token_before_paren, previous_end + 1)) + [index]\n testcase.prepare_test(hypothesis)\n\n # 4) Like 3, but also from the closing brace to the next one.\n # e.g.: (function(global) { })(this);\n future_index = len(testcase.tokens)\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == ')':\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = list(\n range(token_before_paren, future_index + 1))\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == ',':\n # Two hypotheses for commas:\n # 1) Remove comma and left-hand-side.\n # e.g.: f(whatever, crash()) -> f(crash())\n if index > 0:\n hypothesis = [index - 1, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove comma and right-hand-side.\n # e.g.: f(crash(), whatever) -> f(crash())\n\n # Find the next non whitespace token after the comma.\n hypothesis = [index]\n for right_token_index in range(index + 1, len(testcase.tokens)):\n hypothesis.append(right_token_index)\n if testcase.tokens[right_token_index].strip():\n testcase.prepare_test(hypothesis)\n break\n\n testcase.process()\n return testcase\n\n @staticmethod\n def run(data,\n thread_count=minimizer.DEFAULT_THREAD_COUNT,\n file_extension='.js'):\n \"\"\"Attempt to minimize a javascript test case.\"\"\"\n line_minimizer = delta_minimizer.DeltaMinimizer(\n utils.test, max_threads=thread_count, file_extension=file_extension)\n\n js_tokenizer = AntlrTokenizer(JavaScriptLexer)\n\n js_minimizer = JSMinimizer(\n utils.test,\n max_threads=thread_count,\n tokenizer=js_tokenizer.tokenize,\n token_combiner=js_tokenizer.combine,\n file_extension=file_extension)\n\n result = line_minimizer.minimize(data)\n result = js_minimizer.minimize(result)\n result = js_minimizer.minimize(result)\n result = line_minimizer.minimize(result)\n\n return result\n", "path": "src/python/bot/minimizer/js_minimizer.py"}]}
2,597
138
gh_patches_debug_7914
rasdani/github-patches
git_diff
mkdocs__mkdocs-955
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Eliminate duplicate warnings A few recent updates have added warnings for deprecated behavior (#921 & #947). However, the same warnings are issued once for every single page. The more pages in a project, the more warnings are issued. Each warning only needs to be printed to the console once per build. [This](http://stackoverflow.com/a/31953563/866026) may be a potential solution. Will explore when I have some time unless someone beats me to it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/commands/build.py` Content: ``` 1 # coding: utf-8 2 3 from __future__ import unicode_literals 4 from datetime import datetime 5 from calendar import timegm 6 import io 7 import logging 8 import os 9 10 from jinja2.exceptions import TemplateNotFound 11 import jinja2 12 import json 13 14 from mkdocs import nav, search, utils 15 from mkdocs.utils import filters 16 from mkdocs.relative_path_ext import RelativePathExtension 17 import mkdocs 18 19 log = logging.getLogger(__name__) 20 21 22 def convert_markdown(markdown_source, config, site_navigation=None): 23 """ 24 Convert the Markdown source file to HTML as per the config and 25 site_navigation. Return a tuple of the HTML as a string, the parsed table 26 of contents, and a dictionary of any metadata that was specified in the 27 Markdown file. 28 """ 29 30 extensions = [ 31 RelativePathExtension(site_navigation, config['strict']) 32 ] + config['markdown_extensions'] 33 34 return utils.convert_markdown( 35 markdown_source=markdown_source, 36 extensions=extensions, 37 extension_configs=config['mdx_configs'] 38 ) 39 40 41 def get_global_context(nav, config): 42 """ 43 Given the SiteNavigation and config, generate the context which is relevant 44 to app pages. 45 """ 46 47 site_name = config['site_name'] 48 49 if config['site_favicon']: 50 site_favicon = nav.url_context.make_relative('/' + config['site_favicon']) 51 else: 52 site_favicon = None 53 54 page_description = config['site_description'] 55 56 extra_javascript = utils.create_media_urls(nav, config['extra_javascript']) 57 58 extra_css = utils.create_media_urls(nav, config['extra_css']) 59 60 # Support SOURCE_DATE_EPOCH environment variable for "reproducible" builds. 61 # See https://reproducible-builds.org/specs/source-date-epoch/ 62 timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timegm(datetime.utcnow().utctimetuple()))) 63 64 return { 65 'site_name': site_name, 66 'site_author': config['site_author'], 67 'favicon': site_favicon, 68 'page_description': page_description, 69 70 # Note that there's intentionally repetition here. Rather than simply 71 # provide the config dictionary we instead pass everything explicitly. 72 # 73 # This helps ensure that we can throughly document the context that 74 # gets passed to themes. 75 'repo_url': config['repo_url'], 76 'repo_name': config['repo_name'], 77 'nav': nav, 78 'base_url': nav.url_context.make_relative('/'), 79 'homepage_url': nav.homepage.url, 80 'site_url': config['site_url'], 81 82 'extra_css': extra_css, 83 'extra_javascript': extra_javascript, 84 85 'include_nav': config['include_nav'], 86 'include_next_prev': config['include_next_prev'], 87 88 'copyright': config['copyright'], 89 'google_analytics': config['google_analytics'], 90 91 'mkdocs_version': mkdocs.__version__, 92 'build_date_utc': datetime.utcfromtimestamp(timestamp), 93 94 'config': config 95 } 96 97 98 def get_page_context(page, content, toc, meta, config): 99 """ 100 Generate the page context by extending the global context and adding page 101 specific variables. 102 """ 103 if config['site_url']: 104 page.set_canonical_url(config['site_url']) 105 106 page.content = content 107 page.toc = toc 108 page.meta = meta 109 110 # TODO: remove the rest in version 1.0 as they are deprecated 111 112 if page.is_homepage or page.title is None: 113 page_title = None 114 else: 115 page_title = page.title 116 117 if page.is_homepage: 118 page_description = config['site_description'] 119 else: 120 page_description = None 121 122 return { 123 'page': page, 124 # TODO: remove the rest in version 1.0 as they are deprecated 125 'page_title': page_title, 126 'page_description': page_description, 127 128 'content': content, 129 'toc': toc, 130 'meta': meta, 131 132 'canonical_url': page.canonical_url, 133 134 'current_page': page, 135 'previous_page': page.previous_page, 136 'next_page': page.next_page 137 } 138 139 140 def build_template(template_name, env, config, site_navigation=None): 141 142 log.debug("Building template: %s", template_name) 143 144 try: 145 template = env.get_template(template_name) 146 except TemplateNotFound: 147 return False 148 149 context = {'page': None} 150 if site_navigation is not None: 151 context.update(get_global_context(site_navigation, config)) 152 153 output_content = template.render(context) 154 output_path = os.path.join(config['site_dir'], template_name) 155 utils.write_file(output_content.encode('utf-8'), output_path) 156 return True 157 158 159 def _build_page(page, config, site_navigation, env, dump_json): 160 161 # Read the input file 162 input_path = os.path.join(config['docs_dir'], page.input_path) 163 164 try: 165 input_content = io.open(input_path, 'r', encoding='utf-8').read() 166 except IOError: 167 log.error('file not found: %s', input_path) 168 raise 169 170 # Process the markdown text 171 html_content, table_of_contents, meta = convert_markdown( 172 markdown_source=input_content, 173 config=config, 174 site_navigation=site_navigation 175 ) 176 177 context = get_global_context(site_navigation, config) 178 context.update(get_page_context( 179 page, html_content, table_of_contents, meta, config 180 )) 181 182 # Allow 'template:' override in md source files. 183 if 'template' in meta: 184 template = env.get_template(meta['template'][0]) 185 else: 186 try: 187 template = env.get_template('main.html') 188 except jinja2.TemplateNotFound: 189 # TODO: Remove this in version 1.0 190 template = env.get_template('base.html') 191 log.warn( 192 "Your theme does not appear to contain a 'main.html' template. " 193 "The 'base.html' template was used instead, which is deprecated. " 194 "Update your theme so that the primary entry point is 'main.html'." 195 ) 196 197 # Render the template. 198 output_content = template.render(context) 199 200 # Write the output file. 201 output_path = os.path.join(config['site_dir'], page.output_path) 202 if dump_json: 203 json_context = { 204 'content': context['content'], 205 'title': context['current_page'].title, 206 'url': context['current_page'].abs_url, 207 'language': 'en', 208 } 209 json_output = json.dumps(json_context, indent=4).encode('utf-8') 210 utils.write_file(json_output, output_path.replace('.html', '.json')) 211 else: 212 utils.write_file(output_content.encode('utf-8'), output_path) 213 214 return html_content, table_of_contents, meta 215 216 217 def build_extra_templates(extra_templates, config, site_navigation=None): 218 219 log.debug("Building extra_templates page") 220 221 for extra_template in extra_templates: 222 223 input_path = os.path.join(config['docs_dir'], extra_template) 224 225 with io.open(input_path, 'r', encoding='utf-8') as template_file: 226 template = jinja2.Template(template_file.read()) 227 228 context = {'page': None} 229 if site_navigation is not None: 230 context.update(get_global_context(site_navigation, config)) 231 232 output_content = template.render(context) 233 output_path = os.path.join(config['site_dir'], extra_template) 234 utils.write_file(output_content.encode('utf-8'), output_path) 235 236 237 def build_pages(config, dump_json=False): 238 """ 239 Builds all the pages and writes them into the build directory. 240 """ 241 site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls']) 242 loader = jinja2.FileSystemLoader(config['theme_dir'] + [config['mkdocs_templates'], ]) 243 env = jinja2.Environment(loader=loader) 244 245 # TODO: remove DeprecationContext in v1.0 when all deprecated vars have been removed 246 from jinja2.runtime import Context 247 deprecated_vars = [ 248 'page_title', 249 'content', 250 'toc', 251 'meta', 252 'current_page', 253 'canonical_url', 254 'previous_page', 255 'next_page' 256 ] 257 258 class DeprecationContext(Context): 259 def resolve(self, key): 260 """ Log a warning when acessing any deprecated variable name. """ 261 if key in deprecated_vars: 262 replacement = "page" if key == 'current_page' else "page.{0}".format(key) 263 log.warn( 264 "Template variable warning: '{0}' is being deprecated and will not be " 265 "available in a future version. Use '{1}' instead.".format(key, replacement) 266 ) 267 return super(DeprecationContext, self).resolve(key) 268 269 env.context_class = DeprecationContext 270 # TODO: end remove DeprecationContext 271 272 env.filters['tojson'] = filters.tojson 273 search_index = search.SearchIndex() 274 275 build_template('404.html', env, config, site_navigation) 276 277 if not build_template('search.html', env, config, site_navigation): 278 log.debug("Search is enabled but the theme doesn't contain a " 279 "search.html file. Assuming the theme implements search " 280 "within a modal.") 281 282 build_template('sitemap.xml', env, config, site_navigation) 283 284 build_extra_templates(config['extra_templates'], config, site_navigation) 285 286 for page in site_navigation.walk_pages(): 287 288 try: 289 log.debug("Building page %s", page.input_path) 290 build_result = _build_page(page, config, site_navigation, env, 291 dump_json) 292 html_content, table_of_contents, _ = build_result 293 search_index.add_entry_from_context( 294 page, html_content, table_of_contents) 295 except Exception: 296 log.error("Error building page %s", page.input_path) 297 raise 298 299 search_index = search_index.generate_search_index() 300 json_output_path = os.path.join(config['site_dir'], 'mkdocs', 'search_index.json') 301 utils.write_file(search_index.encode('utf-8'), json_output_path) 302 303 304 def build(config, live_server=False, dump_json=False, clean_site_dir=False): 305 """ 306 Perform a full site build. 307 """ 308 if clean_site_dir: 309 log.info("Cleaning site directory") 310 utils.clean_directory(config['site_dir']) 311 if not live_server: 312 log.info("Building documentation to directory: %s", config['site_dir']) 313 if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']): 314 log.info("The directory contains stale files. Use --clean to remove them.") 315 316 if dump_json: 317 build_pages(config, dump_json=True) 318 return 319 320 # Reversed as we want to take the media files from the builtin theme 321 # and then from the custom theme_dir so that the custom versions take 322 # precedence. 323 for theme_dir in reversed(config['theme_dir']): 324 log.debug("Copying static assets from theme: %s", theme_dir) 325 utils.copy_media_files( 326 theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html'] 327 ) 328 329 log.debug("Copying static assets from the docs dir.") 330 utils.copy_media_files(config['docs_dir'], config['site_dir']) 331 332 log.debug("Building markdown pages.") 333 build_pages(config) 334 335 336 def site_directory_contains_stale_files(site_directory): 337 """ 338 Check if the site directory contains stale files from a previous build. 339 Right now the check returns true if the directory is not empty. 340 A more sophisticated approach should be found to trigger only if there are 341 files that won't be overwritten anyway. 342 """ 343 if os.path.exists(site_directory): 344 if os.listdir(site_directory): 345 return True 346 return False 347 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/commands/build.py b/mkdocs/commands/build.py --- a/mkdocs/commands/build.py +++ b/mkdocs/commands/build.py @@ -16,7 +16,20 @@ from mkdocs.relative_path_ext import RelativePathExtension import mkdocs + +class DuplicateFilter(object): + ''' Avoid logging duplicate messages. ''' + def __init__(self): + self.msgs = set() + + def filter(self, record): + rv = record.msg not in self.msgs + self.msgs.add(record.msg) + return rv + + log = logging.getLogger(__name__) +log.addFilter(DuplicateFilter()) def convert_markdown(markdown_source, config, site_navigation=None):
{"golden_diff": "diff --git a/mkdocs/commands/build.py b/mkdocs/commands/build.py\n--- a/mkdocs/commands/build.py\n+++ b/mkdocs/commands/build.py\n@@ -16,7 +16,20 @@\n from mkdocs.relative_path_ext import RelativePathExtension\n import mkdocs\n \n+\n+class DuplicateFilter(object):\n+ ''' Avoid logging duplicate messages. '''\n+ def __init__(self):\n+ self.msgs = set()\n+\n+ def filter(self, record):\n+ rv = record.msg not in self.msgs\n+ self.msgs.add(record.msg)\n+ return rv\n+\n+\n log = logging.getLogger(__name__)\n+log.addFilter(DuplicateFilter())\n \n \n def convert_markdown(markdown_source, config, site_navigation=None):\n", "issue": "Eliminate duplicate warnings\nA few recent updates have added warnings for deprecated behavior (#921 & #947). However, the same warnings are issued once for every single page. The more pages in a project, the more warnings are issued. Each warning only needs to be printed to the console once per build. \n\n[This](http://stackoverflow.com/a/31953563/866026) may be a potential solution. Will explore when I have some time unless someone beats me to it.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nfrom calendar import timegm\nimport io\nimport logging\nimport os\n\nfrom jinja2.exceptions import TemplateNotFound\nimport jinja2\nimport json\n\nfrom mkdocs import nav, search, utils\nfrom mkdocs.utils import filters\nfrom mkdocs.relative_path_ext import RelativePathExtension\nimport mkdocs\n\nlog = logging.getLogger(__name__)\n\n\ndef convert_markdown(markdown_source, config, site_navigation=None):\n \"\"\"\n Convert the Markdown source file to HTML as per the config and\n site_navigation. Return a tuple of the HTML as a string, the parsed table\n of contents, and a dictionary of any metadata that was specified in the\n Markdown file.\n \"\"\"\n\n extensions = [\n RelativePathExtension(site_navigation, config['strict'])\n ] + config['markdown_extensions']\n\n return utils.convert_markdown(\n markdown_source=markdown_source,\n extensions=extensions,\n extension_configs=config['mdx_configs']\n )\n\n\ndef get_global_context(nav, config):\n \"\"\"\n Given the SiteNavigation and config, generate the context which is relevant\n to app pages.\n \"\"\"\n\n site_name = config['site_name']\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n page_description = config['site_description']\n\n extra_javascript = utils.create_media_urls(nav, config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav, config['extra_css'])\n\n # Support SOURCE_DATE_EPOCH environment variable for \"reproducible\" builds.\n # See https://reproducible-builds.org/specs/source-date-epoch/\n timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timegm(datetime.utcnow().utctimetuple())))\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n 'page_description': page_description,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n 'nav': nav,\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n 'site_url': config['site_url'],\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n\n 'copyright': config['copyright'],\n 'google_analytics': config['google_analytics'],\n\n 'mkdocs_version': mkdocs.__version__,\n 'build_date_utc': datetime.utcfromtimestamp(timestamp),\n\n 'config': config\n }\n\n\ndef get_page_context(page, content, toc, meta, config):\n \"\"\"\n Generate the page context by extending the global context and adding page\n specific variables.\n \"\"\"\n if config['site_url']:\n page.set_canonical_url(config['site_url'])\n\n page.content = content\n page.toc = toc\n page.meta = meta\n\n # TODO: remove the rest in version 1.0 as they are deprecated\n\n if page.is_homepage or page.title is None:\n page_title = None\n else:\n page_title = page.title\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n return {\n 'page': page,\n # TODO: remove the rest in version 1.0 as they are deprecated\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'meta': meta,\n\n 'canonical_url': page.canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page\n }\n\n\ndef build_template(template_name, env, config, site_navigation=None):\n\n log.debug(\"Building template: %s\", template_name)\n\n try:\n template = env.get_template(template_name)\n except TemplateNotFound:\n return False\n\n context = {'page': None}\n if site_navigation is not None:\n context.update(get_global_context(site_navigation, config))\n\n output_content = template.render(context)\n output_path = os.path.join(config['site_dir'], template_name)\n utils.write_file(output_content.encode('utf-8'), output_path)\n return True\n\n\ndef _build_page(page, config, site_navigation, env, dump_json):\n\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n\n try:\n input_content = io.open(input_path, 'r', encoding='utf-8').read()\n except IOError:\n log.error('file not found: %s', input_path)\n raise\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n markdown_source=input_content,\n config=config,\n site_navigation=site_navigation\n )\n\n context = get_global_context(site_navigation, config)\n context.update(get_page_context(\n page, html_content, table_of_contents, meta, config\n ))\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n try:\n template = env.get_template('main.html')\n except jinja2.TemplateNotFound:\n # TODO: Remove this in version 1.0\n template = env.get_template('base.html')\n log.warn(\n \"Your theme does not appear to contain a 'main.html' template. \"\n \"The 'base.html' template was used instead, which is deprecated. \"\n \"Update your theme so that the primary entry point is 'main.html'.\"\n )\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n json_output = json.dumps(json_context, indent=4).encode('utf-8')\n utils.write_file(json_output, output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n return html_content, table_of_contents, meta\n\n\ndef build_extra_templates(extra_templates, config, site_navigation=None):\n\n log.debug(\"Building extra_templates page\")\n\n for extra_template in extra_templates:\n\n input_path = os.path.join(config['docs_dir'], extra_template)\n\n with io.open(input_path, 'r', encoding='utf-8') as template_file:\n template = jinja2.Template(template_file.read())\n\n context = {'page': None}\n if site_navigation is not None:\n context.update(get_global_context(site_navigation, config))\n\n output_content = template.render(context)\n output_path = os.path.join(config['site_dir'], extra_template)\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'] + [config['mkdocs_templates'], ])\n env = jinja2.Environment(loader=loader)\n\n # TODO: remove DeprecationContext in v1.0 when all deprecated vars have been removed\n from jinja2.runtime import Context\n deprecated_vars = [\n 'page_title',\n 'content',\n 'toc',\n 'meta',\n 'current_page',\n 'canonical_url',\n 'previous_page',\n 'next_page'\n ]\n\n class DeprecationContext(Context):\n def resolve(self, key):\n \"\"\" Log a warning when acessing any deprecated variable name. \"\"\"\n if key in deprecated_vars:\n replacement = \"page\" if key == 'current_page' else \"page.{0}\".format(key)\n log.warn(\n \"Template variable warning: '{0}' is being deprecated and will not be \"\n \"available in a future version. Use '{1}' instead.\".format(key, replacement)\n )\n return super(DeprecationContext, self).resolve(key)\n\n env.context_class = DeprecationContext\n # TODO: end remove DeprecationContext\n\n env.filters['tojson'] = filters.tojson\n search_index = search.SearchIndex()\n\n build_template('404.html', env, config, site_navigation)\n\n if not build_template('search.html', env, config, site_navigation):\n log.debug(\"Search is enabled but the theme doesn't contain a \"\n \"search.html file. Assuming the theme implements search \"\n \"within a modal.\")\n\n build_template('sitemap.xml', env, config, site_navigation)\n\n build_extra_templates(config['extra_templates'], config, site_navigation)\n\n for page in site_navigation.walk_pages():\n\n try:\n log.debug(\"Building page %s\", page.input_path)\n build_result = _build_page(page, config, site_navigation, env,\n dump_json)\n html_content, table_of_contents, _ = build_result\n search_index.add_entry_from_context(\n page, html_content, table_of_contents)\n except Exception:\n log.error(\"Error building page %s\", page.input_path)\n raise\n\n search_index = search_index.generate_search_index()\n json_output_path = os.path.join(config['site_dir'], 'mkdocs', 'search_index.json')\n utils.write_file(search_index.encode('utf-8'), json_output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n log.info(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n log.info(\"Building documentation to directory: %s\", config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n log.info(\"The directory contains stale files. Use --clean to remove them.\")\n\n if dump_json:\n build_pages(config, dump_json=True)\n return\n\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so that the custom versions take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n log.debug(\"Copying static assets from theme: %s\", theme_dir)\n utils.copy_media_files(\n theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html']\n )\n\n log.debug(\"Copying static assets from the docs dir.\")\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n\n log.debug(\"Building markdown pages.\")\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/commands/build.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nfrom calendar import timegm\nimport io\nimport logging\nimport os\n\nfrom jinja2.exceptions import TemplateNotFound\nimport jinja2\nimport json\n\nfrom mkdocs import nav, search, utils\nfrom mkdocs.utils import filters\nfrom mkdocs.relative_path_ext import RelativePathExtension\nimport mkdocs\n\n\nclass DuplicateFilter(object):\n ''' Avoid logging duplicate messages. '''\n def __init__(self):\n self.msgs = set()\n\n def filter(self, record):\n rv = record.msg not in self.msgs\n self.msgs.add(record.msg)\n return rv\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(DuplicateFilter())\n\n\ndef convert_markdown(markdown_source, config, site_navigation=None):\n \"\"\"\n Convert the Markdown source file to HTML as per the config and\n site_navigation. Return a tuple of the HTML as a string, the parsed table\n of contents, and a dictionary of any metadata that was specified in the\n Markdown file.\n \"\"\"\n\n extensions = [\n RelativePathExtension(site_navigation, config['strict'])\n ] + config['markdown_extensions']\n\n return utils.convert_markdown(\n markdown_source=markdown_source,\n extensions=extensions,\n extension_configs=config['mdx_configs']\n )\n\n\ndef get_global_context(nav, config):\n \"\"\"\n Given the SiteNavigation and config, generate the context which is relevant\n to app pages.\n \"\"\"\n\n site_name = config['site_name']\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n page_description = config['site_description']\n\n extra_javascript = utils.create_media_urls(nav, config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav, config['extra_css'])\n\n # Support SOURCE_DATE_EPOCH environment variable for \"reproducible\" builds.\n # See https://reproducible-builds.org/specs/source-date-epoch/\n timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timegm(datetime.utcnow().utctimetuple())))\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n 'page_description': page_description,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n 'nav': nav,\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n 'site_url': config['site_url'],\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n\n 'copyright': config['copyright'],\n 'google_analytics': config['google_analytics'],\n\n 'mkdocs_version': mkdocs.__version__,\n 'build_date_utc': datetime.utcfromtimestamp(timestamp),\n\n 'config': config\n }\n\n\ndef get_page_context(page, content, toc, meta, config):\n \"\"\"\n Generate the page context by extending the global context and adding page\n specific variables.\n \"\"\"\n if config['site_url']:\n page.set_canonical_url(config['site_url'])\n\n page.content = content\n page.toc = toc\n page.meta = meta\n\n # TODO: remove the rest in version 1.0 as they are deprecated\n\n if page.is_homepage or page.title is None:\n page_title = None\n else:\n page_title = page.title\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n return {\n 'page': page,\n # TODO: remove the rest in version 1.0 as they are deprecated\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'meta': meta,\n\n 'canonical_url': page.canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page\n }\n\n\ndef build_template(template_name, env, config, site_navigation=None):\n\n log.debug(\"Building template: %s\", template_name)\n\n try:\n template = env.get_template(template_name)\n except TemplateNotFound:\n return False\n\n context = {'page': None}\n if site_navigation is not None:\n context.update(get_global_context(site_navigation, config))\n\n output_content = template.render(context)\n output_path = os.path.join(config['site_dir'], template_name)\n utils.write_file(output_content.encode('utf-8'), output_path)\n return True\n\n\ndef _build_page(page, config, site_navigation, env, dump_json):\n\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n\n try:\n input_content = io.open(input_path, 'r', encoding='utf-8').read()\n except IOError:\n log.error('file not found: %s', input_path)\n raise\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n markdown_source=input_content,\n config=config,\n site_navigation=site_navigation\n )\n\n context = get_global_context(site_navigation, config)\n context.update(get_page_context(\n page, html_content, table_of_contents, meta, config\n ))\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n try:\n template = env.get_template('main.html')\n except jinja2.TemplateNotFound:\n # TODO: Remove this in version 1.0\n template = env.get_template('base.html')\n log.warn(\n \"Your theme does not appear to contain a 'main.html' template. \"\n \"The 'base.html' template was used instead, which is deprecated. \"\n \"Update your theme so that the primary entry point is 'main.html'.\"\n )\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n json_output = json.dumps(json_context, indent=4).encode('utf-8')\n utils.write_file(json_output, output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n return html_content, table_of_contents, meta\n\n\ndef build_extra_templates(extra_templates, config, site_navigation=None):\n\n log.debug(\"Building extra_templates page\")\n\n for extra_template in extra_templates:\n\n input_path = os.path.join(config['docs_dir'], extra_template)\n\n with io.open(input_path, 'r', encoding='utf-8') as template_file:\n template = jinja2.Template(template_file.read())\n\n context = {'page': None}\n if site_navigation is not None:\n context.update(get_global_context(site_navigation, config))\n\n output_content = template.render(context)\n output_path = os.path.join(config['site_dir'], extra_template)\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'] + [config['mkdocs_templates'], ])\n env = jinja2.Environment(loader=loader)\n\n # TODO: remove DeprecationContext in v1.0 when all deprecated vars have been removed\n from jinja2.runtime import Context\n deprecated_vars = [\n 'page_title',\n 'content',\n 'toc',\n 'meta',\n 'current_page',\n 'canonical_url',\n 'previous_page',\n 'next_page'\n ]\n\n class DeprecationContext(Context):\n def resolve(self, key):\n \"\"\" Log a warning when acessing any deprecated variable name. \"\"\"\n if key in deprecated_vars:\n replacement = \"page\" if key == 'current_page' else \"page.{0}\".format(key)\n log.warn(\n \"Template variable warning: '{0}' is being deprecated and will not be \"\n \"available in a future version. Use '{1}' instead.\".format(key, replacement)\n )\n return super(DeprecationContext, self).resolve(key)\n\n env.context_class = DeprecationContext\n # TODO: end remove DeprecationContext\n\n env.filters['tojson'] = filters.tojson\n search_index = search.SearchIndex()\n\n build_template('404.html', env, config, site_navigation)\n\n if not build_template('search.html', env, config, site_navigation):\n log.debug(\"Search is enabled but the theme doesn't contain a \"\n \"search.html file. Assuming the theme implements search \"\n \"within a modal.\")\n\n build_template('sitemap.xml', env, config, site_navigation)\n\n build_extra_templates(config['extra_templates'], config, site_navigation)\n\n for page in site_navigation.walk_pages():\n\n try:\n log.debug(\"Building page %s\", page.input_path)\n build_result = _build_page(page, config, site_navigation, env,\n dump_json)\n html_content, table_of_contents, _ = build_result\n search_index.add_entry_from_context(\n page, html_content, table_of_contents)\n except Exception:\n log.error(\"Error building page %s\", page.input_path)\n raise\n\n search_index = search_index.generate_search_index()\n json_output_path = os.path.join(config['site_dir'], 'mkdocs', 'search_index.json')\n utils.write_file(search_index.encode('utf-8'), json_output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n log.info(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n log.info(\"Building documentation to directory: %s\", config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n log.info(\"The directory contains stale files. Use --clean to remove them.\")\n\n if dump_json:\n build_pages(config, dump_json=True)\n return\n\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so that the custom versions take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n log.debug(\"Copying static assets from theme: %s\", theme_dir)\n utils.copy_media_files(\n theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html']\n )\n\n log.debug(\"Copying static assets from the docs dir.\")\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n\n log.debug(\"Building markdown pages.\")\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/commands/build.py"}]}
3,845
167
gh_patches_debug_31683
rasdani/github-patches
git_diff
ivy-llc__ivy-19089
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bernoulli --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/backends/paddle/experimental/random.py` Content: ``` 1 # global 2 from typing import Optional, Union, Sequence 3 import paddle 4 5 from ivy import with_unsupported_device_and_dtypes 6 from ivy.functional.backends.paddle import backend_version 7 from ivy.utils.exceptions import IvyNotImplementedException 8 9 # local 10 import ivy 11 from paddle.device import core 12 13 # dirichlet 14 15 16 @with_unsupported_device_and_dtypes( 17 { 18 "2.5.1 and below": { 19 "cpu": ( 20 "int8", 21 "int16", 22 "uint8", 23 "float16", 24 "complex64", 25 "complex128", 26 "bool", 27 ) 28 } 29 }, 30 backend_version, 31 ) 32 def dirichlet( 33 alpha: Union[paddle.Tensor, float, Sequence[float]], 34 /, 35 *, 36 size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, 37 out: Optional[paddle.Tensor] = None, 38 seed: Optional[int] = None, 39 dtype: Optional[paddle.dtype] = None, 40 ) -> paddle.Tensor: 41 size = size if size is not None else len(alpha) 42 dtype = dtype if dtype is not None else paddle.float64 43 if seed is not None: 44 paddle.seed(seed) 45 res = paddle.to_tensor( 46 paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size), 47 dtype=dtype, 48 ) 49 return res 50 51 52 def beta( 53 alpha: Union[float, paddle.Tensor], 54 beta: Union[float, paddle.Tensor], 55 /, 56 *, 57 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, 58 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None, 59 device: core.Place = None, 60 seed: Optional[int] = None, 61 out: Optional[paddle.Tensor] = None, 62 ) -> paddle.Tensor: 63 raise IvyNotImplementedException() 64 65 66 def gamma( 67 alpha: Union[float, paddle.Tensor], 68 beta: Union[float, paddle.Tensor], 69 /, 70 *, 71 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, 72 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None, 73 device: core.Place = None, 74 seed: Optional[int] = None, 75 out: Optional[paddle.Tensor] = None, 76 ) -> paddle.Tensor: 77 raise IvyNotImplementedException() 78 79 80 def poisson( 81 lam: Union[float, paddle.Tensor], 82 *, 83 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, 84 device: core.Place, 85 dtype: paddle.dtype, 86 seed: Optional[int] = None, 87 fill_value: Optional[Union[float, int]] = 0, 88 out: Optional[paddle.Tensor] = None, 89 ): 90 raise IvyNotImplementedException() 91 92 93 def bernoulli( 94 probs: Union[float, paddle.Tensor], 95 *, 96 logits: Union[float, paddle.Tensor] = None, 97 shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None, 98 device: core.Place, 99 dtype: paddle.dtype, 100 seed: Optional[int] = None, 101 out: Optional[paddle.Tensor] = None, 102 ) -> paddle.Tensor: 103 raise IvyNotImplementedException() 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py --- a/ivy/functional/backends/paddle/experimental/random.py +++ b/ivy/functional/backends/paddle/experimental/random.py @@ -1,7 +1,7 @@ # global from typing import Optional, Union, Sequence import paddle - +from ivy.functional.backends.paddle.device import to_device from ivy import with_unsupported_device_and_dtypes from ivy.functional.backends.paddle import backend_version from ivy.utils.exceptions import IvyNotImplementedException @@ -9,6 +9,7 @@ # local import ivy from paddle.device import core +from ivy import with_supported_device_and_dtypes # dirichlet @@ -90,6 +91,23 @@ raise IvyNotImplementedException() +# bernoulli +@with_supported_device_and_dtypes( + { + "2.5.0 and above": { + "cpu": ("float32", "float64"), + "gpu": ("bfloat16", "float16", "float32", "float64"), + }, + "2.4.2 and below": { + "cpu": ( + "float32", + "float64", + ), + "gpu": ("float16", "float32", "float64"), + }, + }, + backend_version, +) def bernoulli( probs: Union[float, paddle.Tensor], *, @@ -100,4 +118,14 @@ seed: Optional[int] = None, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: - raise IvyNotImplementedException() + if seed is not None: + paddle.seed(seed) + if probs is not None: + probs = probs + elif logits is not None: + probs = ivy.softmax(logits) + probs = paddle.cast(probs, dtype) + probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs + probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6)) + sample = paddle.bernoulli(probs) + return to_device(sample, device)
{"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py\n--- a/ivy/functional/backends/paddle/experimental/random.py\n+++ b/ivy/functional/backends/paddle/experimental/random.py\n@@ -1,7 +1,7 @@\n # global\n from typing import Optional, Union, Sequence\n import paddle\n-\n+from ivy.functional.backends.paddle.device import to_device\n from ivy import with_unsupported_device_and_dtypes\n from ivy.functional.backends.paddle import backend_version\n from ivy.utils.exceptions import IvyNotImplementedException\n@@ -9,6 +9,7 @@\n # local\n import ivy\n from paddle.device import core\n+from ivy import with_supported_device_and_dtypes\n \n # dirichlet\n \n@@ -90,6 +91,23 @@\n raise IvyNotImplementedException()\n \n \n+# bernoulli\n+@with_supported_device_and_dtypes(\n+ {\n+ \"2.5.0 and above\": {\n+ \"cpu\": (\"float32\", \"float64\"),\n+ \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n+ },\n+ \"2.4.2 and below\": {\n+ \"cpu\": (\n+ \"float32\",\n+ \"float64\",\n+ ),\n+ \"gpu\": (\"float16\", \"float32\", \"float64\"),\n+ },\n+ },\n+ backend_version,\n+)\n def bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n@@ -100,4 +118,14 @@\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n ) -> paddle.Tensor:\n- raise IvyNotImplementedException()\n+ if seed is not None:\n+ paddle.seed(seed)\n+ if probs is not None:\n+ probs = probs\n+ elif logits is not None:\n+ probs = ivy.softmax(logits)\n+ probs = paddle.cast(probs, dtype)\n+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n+ sample = paddle.bernoulli(probs)\n+ return to_device(sample, device)\n", "issue": "bernoulli\n\n", "before_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\n\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}], "after_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\nfrom ivy.functional.backends.paddle.device import to_device\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\nfrom ivy import with_supported_device_and_dtypes\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\n# bernoulli\n@with_supported_device_and_dtypes(\n {\n \"2.5.0 and above\": {\n \"cpu\": (\"float32\", \"float64\"),\n \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\", \"float32\", \"float64\"),\n },\n },\n backend_version,\n)\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if seed is not None:\n paddle.seed(seed)\n if probs is not None:\n probs = probs\n elif logits is not None:\n probs = ivy.softmax(logits)\n probs = paddle.cast(probs, dtype)\n probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n sample = paddle.bernoulli(probs)\n return to_device(sample, device)\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}]}
1,163
521
gh_patches_debug_27388
rasdani/github-patches
git_diff
python-poetry__poetry-2787
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RecursionError when installing mkdocs-material <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). - **OS version and name**: Windows 10 Pro 1909 18363.719 - **Poetry version**: 1.0.5 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68 ## Issue Hi, when running: `poetry install -E docs_material` I get the following error: ``` [RecursionError] maximum recursion depth exceeded ``` There are quite a few similar issues (like #2329), but it seems that this problem is package specific?!? Thank you for poetry and your effort :) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `poetry/utils/extras.py` Content: ``` 1 from typing import Iterator 2 from typing import List 3 from typing import Mapping 4 from typing import Sequence 5 6 from poetry.core.packages import Package 7 from poetry.utils.helpers import canonicalize_name 8 9 10 def get_extra_package_names( 11 packages, # type: Sequence[Package] 12 extras, # type: Mapping[str, List[str]] 13 extra_names, # type: Sequence[str] 14 ): # type: (...) -> Iterator[str] 15 """ 16 Returns all package names required by the given extras. 17 18 :param packages: A collection of packages, such as from Repository.packages 19 :param extras: A mapping of `extras` names to lists of package names, as defined 20 in the `extras` section of `poetry.lock`. 21 :param extra_names: A list of strings specifying names of extra groups to resolve. 22 """ 23 if not extra_names: 24 return [] 25 26 # lookup for packages by name, faster than looping over packages repeatedly 27 packages_by_name = {package.name: package for package in packages} 28 29 # get and flatten names of packages we've opted into as extras 30 extra_package_names = [ 31 canonicalize_name(extra_package_name) 32 for extra_name in extra_names 33 for extra_package_name in extras.get(extra_name, ()) 34 ] 35 36 def _extra_packages(package_names): 37 """Recursively find dependencies for packages names""" 38 # for each extra pacakge name 39 for package_name in package_names: 40 # Find the actual Package object. A missing key indicates an implicit 41 # dependency (like setuptools), which should be ignored 42 package = packages_by_name.get(canonicalize_name(package_name)) 43 if package: 44 yield package.name 45 # Recurse for dependencies 46 for dependency_package_name in _extra_packages( 47 dependency.name for dependency in package.requires 48 ): 49 yield dependency_package_name 50 51 return _extra_packages(extra_package_names) 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py --- a/poetry/utils/extras.py +++ b/poetry/utils/extras.py @@ -33,6 +33,9 @@ for extra_package_name in extras.get(extra_name, ()) ] + # keep record of packages seen during recursion in order to avoid recursion error + seen_package_names = set() + def _extra_packages(package_names): """Recursively find dependencies for packages names""" # for each extra pacakge name @@ -41,11 +44,16 @@ # dependency (like setuptools), which should be ignored package = packages_by_name.get(canonicalize_name(package_name)) if package: - yield package.name + if package.name not in seen_package_names: + seen_package_names.add(package.name) + yield package.name # Recurse for dependencies for dependency_package_name in _extra_packages( - dependency.name for dependency in package.requires + dependency.name + for dependency in package.requires + if dependency.name not in seen_package_names ): + seen_package_names.add(dependency_package_name) yield dependency_package_name return _extra_packages(extra_package_names)
{"golden_diff": "diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py\n--- a/poetry/utils/extras.py\n+++ b/poetry/utils/extras.py\n@@ -33,6 +33,9 @@\n for extra_package_name in extras.get(extra_name, ())\n ]\n \n+ # keep record of packages seen during recursion in order to avoid recursion error\n+ seen_package_names = set()\n+\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n@@ -41,11 +44,16 @@\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n- yield package.name\n+ if package.name not in seen_package_names:\n+ seen_package_names.add(package.name)\n+ yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n- dependency.name for dependency in package.requires\n+ dependency.name\n+ for dependency in package.requires\n+ if dependency.name not in seen_package_names\n ):\n+ seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n \n return _extra_packages(extra_package_names)\n", "issue": "RecursionError when installing mkdocs-material\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 Pro 1909 18363.719\r\n- **Poetry version**: 1.0.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68\r\n\r\n## Issue\r\nHi,\r\n\r\nwhen running: `poetry install -E docs_material` I get the following error:\r\n\r\n```\r\n[RecursionError]\r\nmaximum recursion depth exceeded\r\n```\r\n\r\nThere are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?\r\n\r\nThank you for poetry and your effort :)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name for dependency in package.requires\n ):\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}], "after_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n # keep record of packages seen during recursion in order to avoid recursion error\n seen_package_names = set()\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n if package.name not in seen_package_names:\n seen_package_names.add(package.name)\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name\n for dependency in package.requires\n if dependency.name not in seen_package_names\n ):\n seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}]}
1,049
276
gh_patches_debug_33896
rasdani/github-patches
git_diff
voxel51__fiftyone-3436
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Group sample in modal missing attributes In `0.21.6`, a group sample in the modal will be missing attribute data unless the fields are added via `add_dynamic_sample_fields()` or `add_dynamic_frame_fields()`. This is due a subtle detail in the `relay` cache, which is straightforward to resolve. ```py import fiftyone as fo dataset = foz.load_zoo_dataset("quickstart-groups") for sample in dataset: for detection in sample.ground_truth.detections: detection["my_attr"] = "value" sample.save() # tooltip will not contain `my_attr` on detections session = fo.launch_app(dataset) # will force `my_attr` to appear dataset.add_dynamic_sample_fields() ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fiftyone/server/routes/media.py` Content: ``` 1 """ 2 FiftyOne Server /media route 3 4 | Copyright 2017-2023, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 import typing as t 9 10 import aiofiles 11 from aiofiles.threadpool.binary import AsyncBufferedReader 12 from aiofiles.os import stat as aio_stat 13 from starlette.endpoints import HTTPEndpoint 14 from starlette.requests import Request 15 from starlette.responses import ( 16 FileResponse, 17 Response, 18 StreamingResponse, 19 guess_type, 20 ) 21 22 23 async def ranged( 24 file: AsyncBufferedReader, 25 start: int = 0, 26 end: int = None, 27 block_size: int = 8192, 28 ) -> t.AsyncGenerator: 29 consumed = 0 30 31 await file.seek(start) 32 33 while True: 34 data_length = ( 35 min(block_size, end - start - consumed) if end else block_size 36 ) 37 38 if data_length <= 0: 39 break 40 41 data = await file.read(data_length) 42 43 if not data: 44 break 45 46 consumed += data_length 47 48 yield data 49 50 if hasattr(file, "close"): 51 await file.close() 52 53 54 class Media(HTTPEndpoint): 55 async def get( 56 self, request: Request 57 ) -> t.Union[FileResponse, StreamingResponse]: 58 path = request.query_params["filepath"] 59 60 response: t.Union[FileResponse, StreamingResponse] 61 if request.headers.get("range"): 62 response = await self.ranged_file_response(path, request) 63 else: 64 response = FileResponse( 65 path, 66 ) 67 response.headers["Accept-Ranges"] = "bytes" 68 69 return response 70 71 async def ranged_file_response( 72 self, path: str, request: Request 73 ) -> StreamingResponse: 74 file = await aiofiles.open(path, "rb") 75 file_size = (await aio_stat(path)).st_size 76 content_range = request.headers.get("range") 77 content_length = file_size 78 status_code = 200 79 headers = {} 80 81 if content_range is not None: 82 content_range = content_range.strip().lower() 83 84 content_ranges = content_range.split("=")[-1] 85 86 range_start, range_end, *_ = map( 87 str.strip, (content_ranges + "-").split("-") 88 ) 89 90 start, end = ( 91 int(range_start) if range_start else 0, 92 int(range_end) if range_end else file_size - 1, 93 ) 94 range_start = max(0, start) 95 range_end = min(file_size - 1, int(end)) 96 97 content_length = (end - start) + 1 98 99 file_response = ranged(file, start=start, end=end + 1) 100 101 status_code = 206 102 103 headers["Content-Range"] = f"bytes {start}-{end}/{file_size}" 104 105 response = StreamingResponse( 106 file_response, 107 media_type=guess_type(path)[0], 108 status_code=status_code, 109 ) 110 111 response.headers.update( 112 { 113 "Accept-Ranges": "bytes", 114 "Content-Length": str(content_length), 115 **headers, 116 } 117 ) 118 119 return response 120 121 async def head(self, request: Request) -> Response: 122 path = request.query_params["filepath"] 123 response = Response() 124 size = (await aio_stat(path)).st_size 125 response.headers.update( 126 { 127 "Accept-Ranges": "bytes", 128 "Content-Type": guess_type(path)[0], 129 "Content-Length": size, 130 } 131 ) 132 return response 133 134 async def options(self, request: Request) -> Response: 135 response = Response() 136 response.headers["Accept-Ranges"] = "bytes" 137 response.headers["Allow"] = "OPTIONS, GET, HEAD" 138 return response 139 ``` Path: `fiftyone/server/samples.py` Content: ``` 1 """ 2 FiftyOne Server samples pagination 3 4 | Copyright 2017-2023, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 import asyncio 9 import strawberry as gql 10 import typing as t 11 12 13 from fiftyone.core.collections import SampleCollection 14 import fiftyone.core.media as fom 15 import fiftyone.core.odm as foo 16 from fiftyone.core.utils import run_sync_task 17 18 from fiftyone.server.filters import SampleFilter 19 import fiftyone.server.metadata as fosm 20 from fiftyone.server.paginator import Connection, Edge, PageInfo 21 from fiftyone.server.scalars import BSON, JSON, BSONArray 22 from fiftyone.server.utils import from_dict 23 import fiftyone.server.view as fosv 24 25 26 @gql.type 27 class MediaURL: 28 field: str 29 url: t.Optional[str] 30 31 32 @gql.interface 33 class Sample: 34 id: gql.ID 35 sample: JSON 36 urls: t.List[MediaURL] 37 aspect_ratio: float 38 39 40 @gql.type 41 class ImageSample(Sample): 42 pass 43 44 45 @gql.type 46 class PointCloudSample(Sample): 47 pass 48 49 50 @gql.type 51 class VideoSample(Sample): 52 frame_number: int 53 frame_rate: float 54 55 56 SampleItem = gql.union( 57 "SampleItem", types=(ImageSample, PointCloudSample, VideoSample) 58 ) 59 60 MEDIA_TYPES = { 61 fom.IMAGE: ImageSample, 62 fom.POINT_CLOUD: PointCloudSample, 63 fom.VIDEO: VideoSample, 64 } 65 66 67 async def paginate_samples( 68 dataset: str, 69 stages: BSONArray, 70 filters: JSON, 71 first: int, 72 after: t.Optional[str] = None, 73 extended_stages: t.Optional[BSON] = None, 74 sample_filter: t.Optional[SampleFilter] = None, 75 pagination_data: t.Optional[bool] = False, 76 ) -> Connection[t.Union[ImageSample, VideoSample], str]: 77 run = lambda reload: fosv.get_view( 78 dataset, 79 stages=stages, 80 filters=filters, 81 pagination_data=pagination_data, 82 extended_stages=extended_stages, 83 sample_filter=sample_filter, 84 reload=reload, 85 ) 86 try: 87 view = await run_sync_task(run, False) 88 except: 89 view = await run_sync_task(run, True) 90 91 # check frame field schema explicitly, media type is not reliable for groups 92 has_frames = view.get_frame_field_schema() is not None 93 94 # TODO: Remove this once we have a better way to handle large videos. This 95 # is a temporary fix to reduce the $lookup overhead for sample frames on 96 # full datasets. 97 full_lookup = has_frames and (filters or stages) 98 support = [1, 1] if not full_lookup else None 99 if after is None: 100 after = "-1" 101 102 if int(after) > -1: 103 view = view.skip(int(after) + 1) 104 105 pipeline = view._pipeline( 106 attach_frames=has_frames, 107 detach_frames=False, 108 manual_group_select=sample_filter 109 and sample_filter.group 110 and (sample_filter.group.id and not sample_filter.group.slices), 111 support=support, 112 ) 113 114 # Only return the first frame of each video sample for the grid thumbnail 115 if has_frames: 116 pipeline.append({"$addFields": {"frames": {"$slice": ["$frames", 1]}}}) 117 118 samples = await foo.aggregate( 119 foo.get_async_db_conn()[view._dataset._sample_collection_name], 120 pipeline, 121 ).to_list(first + 1) 122 123 more = False 124 if len(samples) > first: 125 samples = samples[:first] 126 more = True 127 128 metadata_cache = {} 129 url_cache = {} 130 nodes = await asyncio.gather( 131 *[ 132 _create_sample_item(view, sample, metadata_cache, url_cache) 133 for sample in samples 134 ] 135 ) 136 137 edges = [] 138 for idx, node in enumerate(nodes): 139 edges.append( 140 Edge( 141 node=node, 142 cursor=str(idx + int(after) + 1), 143 ) 144 ) 145 146 return Connection( 147 page_info=PageInfo( 148 has_previous_page=False, 149 has_next_page=more, 150 start_cursor=edges[0].cursor if edges else None, 151 end_cursor=edges[-1].cursor if len(edges) > 1 else None, 152 ), 153 edges=edges, 154 ) 155 156 157 async def _create_sample_item( 158 dataset: SampleCollection, 159 sample: t.Dict, 160 metadata_cache: t.Dict[str, t.Dict], 161 url_cache: t.Dict[str, str], 162 ) -> SampleItem: 163 media_type = fom.get_media_type(sample["filepath"]) 164 165 if media_type == fom.IMAGE: 166 cls = ImageSample 167 elif media_type == fom.VIDEO: 168 cls = VideoSample 169 elif media_type == fom.POINT_CLOUD: 170 cls = PointCloudSample 171 else: 172 raise ValueError(f"unknown media type '{media_type}'") 173 174 metadata = await fosm.get_metadata( 175 dataset, sample, media_type, metadata_cache, url_cache 176 ) 177 178 if cls == VideoSample: 179 metadata = dict(**metadata, frame_number=sample.get("frame_number", 1)) 180 181 return from_dict(cls, {"id": sample["_id"], "sample": sample, **metadata}) 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fiftyone/server/routes/media.py b/fiftyone/server/routes/media.py --- a/fiftyone/server/routes/media.py +++ b/fiftyone/server/routes/media.py @@ -5,8 +5,10 @@ | `voxel51.com <https://voxel51.com/>`_ | """ +import os import typing as t +import anyio import aiofiles from aiofiles.threadpool.binary import AsyncBufferedReader from aiofiles.os import stat as aio_stat @@ -58,6 +60,12 @@ path = request.query_params["filepath"] response: t.Union[FileResponse, StreamingResponse] + + try: + await anyio.to_thread.run_sync(os.stat, path) + except FileNotFoundError: + return Response(content="Not found", status_code=404) + if request.headers.get("range"): response = await self.ranged_file_response(path, request) else: diff --git a/fiftyone/server/samples.py b/fiftyone/server/samples.py --- a/fiftyone/server/samples.py +++ b/fiftyone/server/samples.py @@ -129,7 +129,9 @@ url_cache = {} nodes = await asyncio.gather( *[ - _create_sample_item(view, sample, metadata_cache, url_cache) + _create_sample_item( + view, sample, metadata_cache, url_cache, pagination_data + ) for sample in samples ] ) @@ -159,6 +161,7 @@ sample: t.Dict, metadata_cache: t.Dict[str, t.Dict], url_cache: t.Dict[str, str], + pagination_data: bool, ) -> SampleItem: media_type = fom.get_media_type(sample["filepath"]) @@ -178,4 +181,9 @@ if cls == VideoSample: metadata = dict(**metadata, frame_number=sample.get("frame_number", 1)) - return from_dict(cls, {"id": sample["_id"], "sample": sample, **metadata}) + _id = sample["_id"] + + if not pagination_data: + _id = f"{_id}-modal" + + return from_dict(cls, {"id": _id, "sample": sample, **metadata})
{"golden_diff": "diff --git a/fiftyone/server/routes/media.py b/fiftyone/server/routes/media.py\n--- a/fiftyone/server/routes/media.py\n+++ b/fiftyone/server/routes/media.py\n@@ -5,8 +5,10 @@\n | `voxel51.com <https://voxel51.com/>`_\n |\n \"\"\"\n+import os\n import typing as t\n \n+import anyio\n import aiofiles\n from aiofiles.threadpool.binary import AsyncBufferedReader\n from aiofiles.os import stat as aio_stat\n@@ -58,6 +60,12 @@\n path = request.query_params[\"filepath\"]\n \n response: t.Union[FileResponse, StreamingResponse]\n+\n+ try:\n+ await anyio.to_thread.run_sync(os.stat, path)\n+ except FileNotFoundError:\n+ return Response(content=\"Not found\", status_code=404)\n+\n if request.headers.get(\"range\"):\n response = await self.ranged_file_response(path, request)\n else:\ndiff --git a/fiftyone/server/samples.py b/fiftyone/server/samples.py\n--- a/fiftyone/server/samples.py\n+++ b/fiftyone/server/samples.py\n@@ -129,7 +129,9 @@\n url_cache = {}\n nodes = await asyncio.gather(\n *[\n- _create_sample_item(view, sample, metadata_cache, url_cache)\n+ _create_sample_item(\n+ view, sample, metadata_cache, url_cache, pagination_data\n+ )\n for sample in samples\n ]\n )\n@@ -159,6 +161,7 @@\n sample: t.Dict,\n metadata_cache: t.Dict[str, t.Dict],\n url_cache: t.Dict[str, str],\n+ pagination_data: bool,\n ) -> SampleItem:\n media_type = fom.get_media_type(sample[\"filepath\"])\n \n@@ -178,4 +181,9 @@\n if cls == VideoSample:\n metadata = dict(**metadata, frame_number=sample.get(\"frame_number\", 1))\n \n- return from_dict(cls, {\"id\": sample[\"_id\"], \"sample\": sample, **metadata})\n+ _id = sample[\"_id\"]\n+\n+ if not pagination_data:\n+ _id = f\"{_id}-modal\"\n+\n+ return from_dict(cls, {\"id\": _id, \"sample\": sample, **metadata})\n", "issue": "[BUG] Group sample in modal missing attributes\nIn `0.21.6`, a group sample in the modal will be missing attribute data unless the fields are added via `add_dynamic_sample_fields()` or `add_dynamic_frame_fields()`. This is due a subtle detail in the `relay` cache, which is straightforward to resolve.\r\n\r\n```py\r\nimport fiftyone as fo\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart-groups\")\r\n\r\nfor sample in dataset:\r\n for detection in sample.ground_truth.detections:\r\n detection[\"my_attr\"] = \"value\"\r\n sample.save()\r\n\r\n# tooltip will not contain `my_attr` on detections\r\nsession = fo.launch_app(dataset)\r\n\r\n\r\n# will force `my_attr` to appear\r\ndataset.add_dynamic_sample_fields()\r\n```\n", "before_files": [{"content": "\"\"\"\nFiftyOne Server /media route\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport typing as t\n\nimport aiofiles\nfrom aiofiles.threadpool.binary import AsyncBufferedReader\nfrom aiofiles.os import stat as aio_stat\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import (\n FileResponse,\n Response,\n StreamingResponse,\n guess_type,\n)\n\n\nasync def ranged(\n file: AsyncBufferedReader,\n start: int = 0,\n end: int = None,\n block_size: int = 8192,\n) -> t.AsyncGenerator:\n consumed = 0\n\n await file.seek(start)\n\n while True:\n data_length = (\n min(block_size, end - start - consumed) if end else block_size\n )\n\n if data_length <= 0:\n break\n\n data = await file.read(data_length)\n\n if not data:\n break\n\n consumed += data_length\n\n yield data\n\n if hasattr(file, \"close\"):\n await file.close()\n\n\nclass Media(HTTPEndpoint):\n async def get(\n self, request: Request\n ) -> t.Union[FileResponse, StreamingResponse]:\n path = request.query_params[\"filepath\"]\n\n response: t.Union[FileResponse, StreamingResponse]\n if request.headers.get(\"range\"):\n response = await self.ranged_file_response(path, request)\n else:\n response = FileResponse(\n path,\n )\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n\n return response\n\n async def ranged_file_response(\n self, path: str, request: Request\n ) -> StreamingResponse:\n file = await aiofiles.open(path, \"rb\")\n file_size = (await aio_stat(path)).st_size\n content_range = request.headers.get(\"range\")\n content_length = file_size\n status_code = 200\n headers = {}\n\n if content_range is not None:\n content_range = content_range.strip().lower()\n\n content_ranges = content_range.split(\"=\")[-1]\n\n range_start, range_end, *_ = map(\n str.strip, (content_ranges + \"-\").split(\"-\")\n )\n\n start, end = (\n int(range_start) if range_start else 0,\n int(range_end) if range_end else file_size - 1,\n )\n range_start = max(0, start)\n range_end = min(file_size - 1, int(end))\n\n content_length = (end - start) + 1\n\n file_response = ranged(file, start=start, end=end + 1)\n\n status_code = 206\n\n headers[\"Content-Range\"] = f\"bytes {start}-{end}/{file_size}\"\n\n response = StreamingResponse(\n file_response,\n media_type=guess_type(path)[0],\n status_code=status_code,\n )\n\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Length\": str(content_length),\n **headers,\n }\n )\n\n return response\n\n async def head(self, request: Request) -> Response:\n path = request.query_params[\"filepath\"]\n response = Response()\n size = (await aio_stat(path)).st_size\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Type\": guess_type(path)[0],\n \"Content-Length\": size,\n }\n )\n return response\n\n async def options(self, request: Request) -> Response:\n response = Response()\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n response.headers[\"Allow\"] = \"OPTIONS, GET, HEAD\"\n return response\n", "path": "fiftyone/server/routes/media.py"}, {"content": "\"\"\"\nFiftyOne Server samples pagination\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport strawberry as gql\nimport typing as t\n\n\nfrom fiftyone.core.collections import SampleCollection\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.utils import run_sync_task\n\nfrom fiftyone.server.filters import SampleFilter\nimport fiftyone.server.metadata as fosm\nfrom fiftyone.server.paginator import Connection, Edge, PageInfo\nfrom fiftyone.server.scalars import BSON, JSON, BSONArray\nfrom fiftyone.server.utils import from_dict\nimport fiftyone.server.view as fosv\n\n\[email protected]\nclass MediaURL:\n field: str\n url: t.Optional[str]\n\n\[email protected]\nclass Sample:\n id: gql.ID\n sample: JSON\n urls: t.List[MediaURL]\n aspect_ratio: float\n\n\[email protected]\nclass ImageSample(Sample):\n pass\n\n\[email protected]\nclass PointCloudSample(Sample):\n pass\n\n\[email protected]\nclass VideoSample(Sample):\n frame_number: int\n frame_rate: float\n\n\nSampleItem = gql.union(\n \"SampleItem\", types=(ImageSample, PointCloudSample, VideoSample)\n)\n\nMEDIA_TYPES = {\n fom.IMAGE: ImageSample,\n fom.POINT_CLOUD: PointCloudSample,\n fom.VIDEO: VideoSample,\n}\n\n\nasync def paginate_samples(\n dataset: str,\n stages: BSONArray,\n filters: JSON,\n first: int,\n after: t.Optional[str] = None,\n extended_stages: t.Optional[BSON] = None,\n sample_filter: t.Optional[SampleFilter] = None,\n pagination_data: t.Optional[bool] = False,\n) -> Connection[t.Union[ImageSample, VideoSample], str]:\n run = lambda reload: fosv.get_view(\n dataset,\n stages=stages,\n filters=filters,\n pagination_data=pagination_data,\n extended_stages=extended_stages,\n sample_filter=sample_filter,\n reload=reload,\n )\n try:\n view = await run_sync_task(run, False)\n except:\n view = await run_sync_task(run, True)\n\n # check frame field schema explicitly, media type is not reliable for groups\n has_frames = view.get_frame_field_schema() is not None\n\n # TODO: Remove this once we have a better way to handle large videos. This\n # is a temporary fix to reduce the $lookup overhead for sample frames on\n # full datasets.\n full_lookup = has_frames and (filters or stages)\n support = [1, 1] if not full_lookup else None\n if after is None:\n after = \"-1\"\n\n if int(after) > -1:\n view = view.skip(int(after) + 1)\n\n pipeline = view._pipeline(\n attach_frames=has_frames,\n detach_frames=False,\n manual_group_select=sample_filter\n and sample_filter.group\n and (sample_filter.group.id and not sample_filter.group.slices),\n support=support,\n )\n\n # Only return the first frame of each video sample for the grid thumbnail\n if has_frames:\n pipeline.append({\"$addFields\": {\"frames\": {\"$slice\": [\"$frames\", 1]}}})\n\n samples = await foo.aggregate(\n foo.get_async_db_conn()[view._dataset._sample_collection_name],\n pipeline,\n ).to_list(first + 1)\n\n more = False\n if len(samples) > first:\n samples = samples[:first]\n more = True\n\n metadata_cache = {}\n url_cache = {}\n nodes = await asyncio.gather(\n *[\n _create_sample_item(view, sample, metadata_cache, url_cache)\n for sample in samples\n ]\n )\n\n edges = []\n for idx, node in enumerate(nodes):\n edges.append(\n Edge(\n node=node,\n cursor=str(idx + int(after) + 1),\n )\n )\n\n return Connection(\n page_info=PageInfo(\n has_previous_page=False,\n has_next_page=more,\n start_cursor=edges[0].cursor if edges else None,\n end_cursor=edges[-1].cursor if len(edges) > 1 else None,\n ),\n edges=edges,\n )\n\n\nasync def _create_sample_item(\n dataset: SampleCollection,\n sample: t.Dict,\n metadata_cache: t.Dict[str, t.Dict],\n url_cache: t.Dict[str, str],\n) -> SampleItem:\n media_type = fom.get_media_type(sample[\"filepath\"])\n\n if media_type == fom.IMAGE:\n cls = ImageSample\n elif media_type == fom.VIDEO:\n cls = VideoSample\n elif media_type == fom.POINT_CLOUD:\n cls = PointCloudSample\n else:\n raise ValueError(f\"unknown media type '{media_type}'\")\n\n metadata = await fosm.get_metadata(\n dataset, sample, media_type, metadata_cache, url_cache\n )\n\n if cls == VideoSample:\n metadata = dict(**metadata, frame_number=sample.get(\"frame_number\", 1))\n\n return from_dict(cls, {\"id\": sample[\"_id\"], \"sample\": sample, **metadata})\n", "path": "fiftyone/server/samples.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne Server /media route\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nimport typing as t\n\nimport anyio\nimport aiofiles\nfrom aiofiles.threadpool.binary import AsyncBufferedReader\nfrom aiofiles.os import stat as aio_stat\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import (\n FileResponse,\n Response,\n StreamingResponse,\n guess_type,\n)\n\n\nasync def ranged(\n file: AsyncBufferedReader,\n start: int = 0,\n end: int = None,\n block_size: int = 8192,\n) -> t.AsyncGenerator:\n consumed = 0\n\n await file.seek(start)\n\n while True:\n data_length = (\n min(block_size, end - start - consumed) if end else block_size\n )\n\n if data_length <= 0:\n break\n\n data = await file.read(data_length)\n\n if not data:\n break\n\n consumed += data_length\n\n yield data\n\n if hasattr(file, \"close\"):\n await file.close()\n\n\nclass Media(HTTPEndpoint):\n async def get(\n self, request: Request\n ) -> t.Union[FileResponse, StreamingResponse]:\n path = request.query_params[\"filepath\"]\n\n response: t.Union[FileResponse, StreamingResponse]\n\n try:\n await anyio.to_thread.run_sync(os.stat, path)\n except FileNotFoundError:\n return Response(content=\"Not found\", status_code=404)\n\n if request.headers.get(\"range\"):\n response = await self.ranged_file_response(path, request)\n else:\n response = FileResponse(\n path,\n )\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n\n return response\n\n async def ranged_file_response(\n self, path: str, request: Request\n ) -> StreamingResponse:\n file = await aiofiles.open(path, \"rb\")\n file_size = (await aio_stat(path)).st_size\n content_range = request.headers.get(\"range\")\n content_length = file_size\n status_code = 200\n headers = {}\n\n if content_range is not None:\n content_range = content_range.strip().lower()\n\n content_ranges = content_range.split(\"=\")[-1]\n\n range_start, range_end, *_ = map(\n str.strip, (content_ranges + \"-\").split(\"-\")\n )\n\n start, end = (\n int(range_start) if range_start else 0,\n int(range_end) if range_end else file_size - 1,\n )\n range_start = max(0, start)\n range_end = min(file_size - 1, int(end))\n\n content_length = (end - start) + 1\n\n file_response = ranged(file, start=start, end=end + 1)\n\n status_code = 206\n\n headers[\"Content-Range\"] = f\"bytes {start}-{end}/{file_size}\"\n\n response = StreamingResponse(\n file_response,\n media_type=guess_type(path)[0],\n status_code=status_code,\n )\n\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Length\": str(content_length),\n **headers,\n }\n )\n\n return response\n\n async def head(self, request: Request) -> Response:\n path = request.query_params[\"filepath\"]\n response = Response()\n size = (await aio_stat(path)).st_size\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Type\": guess_type(path)[0],\n \"Content-Length\": size,\n }\n )\n return response\n\n async def options(self, request: Request) -> Response:\n response = Response()\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n response.headers[\"Allow\"] = \"OPTIONS, GET, HEAD\"\n return response\n", "path": "fiftyone/server/routes/media.py"}, {"content": "\"\"\"\nFiftyOne Server samples pagination\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport strawberry as gql\nimport typing as t\n\n\nfrom fiftyone.core.collections import SampleCollection\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.utils import run_sync_task\n\nfrom fiftyone.server.filters import SampleFilter\nimport fiftyone.server.metadata as fosm\nfrom fiftyone.server.paginator import Connection, Edge, PageInfo\nfrom fiftyone.server.scalars import BSON, JSON, BSONArray\nfrom fiftyone.server.utils import from_dict\nimport fiftyone.server.view as fosv\n\n\[email protected]\nclass MediaURL:\n field: str\n url: t.Optional[str]\n\n\[email protected]\nclass Sample:\n id: gql.ID\n sample: JSON\n urls: t.List[MediaURL]\n aspect_ratio: float\n\n\[email protected]\nclass ImageSample(Sample):\n pass\n\n\[email protected]\nclass PointCloudSample(Sample):\n pass\n\n\[email protected]\nclass VideoSample(Sample):\n frame_number: int\n frame_rate: float\n\n\nSampleItem = gql.union(\n \"SampleItem\", types=(ImageSample, PointCloudSample, VideoSample)\n)\n\nMEDIA_TYPES = {\n fom.IMAGE: ImageSample,\n fom.POINT_CLOUD: PointCloudSample,\n fom.VIDEO: VideoSample,\n}\n\n\nasync def paginate_samples(\n dataset: str,\n stages: BSONArray,\n filters: JSON,\n first: int,\n after: t.Optional[str] = None,\n extended_stages: t.Optional[BSON] = None,\n sample_filter: t.Optional[SampleFilter] = None,\n pagination_data: t.Optional[bool] = False,\n) -> Connection[t.Union[ImageSample, VideoSample], str]:\n run = lambda reload: fosv.get_view(\n dataset,\n stages=stages,\n filters=filters,\n pagination_data=pagination_data,\n extended_stages=extended_stages,\n sample_filter=sample_filter,\n reload=reload,\n )\n try:\n view = await run_sync_task(run, False)\n except:\n view = await run_sync_task(run, True)\n\n # check frame field schema explicitly, media type is not reliable for groups\n has_frames = view.get_frame_field_schema() is not None\n\n # TODO: Remove this once we have a better way to handle large videos. This\n # is a temporary fix to reduce the $lookup overhead for sample frames on\n # full datasets.\n full_lookup = has_frames and (filters or stages)\n support = [1, 1] if not full_lookup else None\n if after is None:\n after = \"-1\"\n\n if int(after) > -1:\n view = view.skip(int(after) + 1)\n\n pipeline = view._pipeline(\n attach_frames=has_frames,\n detach_frames=False,\n manual_group_select=sample_filter\n and sample_filter.group\n and (sample_filter.group.id and not sample_filter.group.slices),\n support=support,\n )\n\n # Only return the first frame of each video sample for the grid thumbnail\n if has_frames:\n pipeline.append({\"$addFields\": {\"frames\": {\"$slice\": [\"$frames\", 1]}}})\n\n samples = await foo.aggregate(\n foo.get_async_db_conn()[view._dataset._sample_collection_name],\n pipeline,\n ).to_list(first + 1)\n\n more = False\n if len(samples) > first:\n samples = samples[:first]\n more = True\n\n metadata_cache = {}\n url_cache = {}\n nodes = await asyncio.gather(\n *[\n _create_sample_item(\n view, sample, metadata_cache, url_cache, pagination_data\n )\n for sample in samples\n ]\n )\n\n edges = []\n for idx, node in enumerate(nodes):\n edges.append(\n Edge(\n node=node,\n cursor=str(idx + int(after) + 1),\n )\n )\n\n return Connection(\n page_info=PageInfo(\n has_previous_page=False,\n has_next_page=more,\n start_cursor=edges[0].cursor if edges else None,\n end_cursor=edges[-1].cursor if len(edges) > 1 else None,\n ),\n edges=edges,\n )\n\n\nasync def _create_sample_item(\n dataset: SampleCollection,\n sample: t.Dict,\n metadata_cache: t.Dict[str, t.Dict],\n url_cache: t.Dict[str, str],\n pagination_data: bool,\n) -> SampleItem:\n media_type = fom.get_media_type(sample[\"filepath\"])\n\n if media_type == fom.IMAGE:\n cls = ImageSample\n elif media_type == fom.VIDEO:\n cls = VideoSample\n elif media_type == fom.POINT_CLOUD:\n cls = PointCloudSample\n else:\n raise ValueError(f\"unknown media type '{media_type}'\")\n\n metadata = await fosm.get_metadata(\n dataset, sample, media_type, metadata_cache, url_cache\n )\n\n if cls == VideoSample:\n metadata = dict(**metadata, frame_number=sample.get(\"frame_number\", 1))\n\n _id = sample[\"_id\"]\n\n if not pagination_data:\n _id = f\"{_id}-modal\"\n\n return from_dict(cls, {\"id\": _id, \"sample\": sample, **metadata})\n", "path": "fiftyone/server/samples.py"}]}
3,186
513
gh_patches_debug_4951
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-4919
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mitmproxy depends on `pkg_resources` at runtime but does not specify `install_requires=setuptools` #### Problem Description A clear and concise description of what the bug is. https://github.com/mitmproxy/mitmproxy/blob/0ca458fd6475ee48728147f3b529467a75e912a4/mitmproxy/contrib/kaitaistruct/exif.py#L7 the `pkg_resources` module is provided by `setuptools` installation into a minimal environment (for example, bazel) will break without also needing to specify `setuptools` mitmproxy should depend on `setuptools` #### Steps to reproduce the behavior: 1. simulate a minimal environment: ``` virtualenv venv venv/bin/pip install mitmproxy venv/bin/pip uninstall setuptools venv/bin/pip install mitmproxy # make sure we actually have its deps even after uninstalling ``` 2. run `mitmproxy --help` ```console $ venv/bin/mitmproxy --help Traceback (most recent call last): File "venv/bin/mitmproxy", line 5, in <module> from mitmproxy.tools.main import mitmproxy File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py", line 8, in <module> from mitmproxy import exceptions, master File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py", line 10, in <module> from mitmproxy import eventsequence File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py", line 8, in <module> from mitmproxy.proxy import layers File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py", line 1, in <module> from . import modes File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py", line 9, in <module> from mitmproxy.proxy.layers import tls File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py", line 8, in <module> from mitmproxy.net import tls as net_tls File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py", line 18, in <module> from mitmproxy.contrib.kaitaistruct import tls_client_hello File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py", line 7, in <module> from pkg_resources import parse_version ModuleNotFoundError: No module named 'pkg_resources' ``` #### System Information Paste the output of "mitmproxy --version" here. ```console $ venv/bin/mitmproxy --version Traceback (most recent call last): File "venv/bin/mitmproxy", line 5, in <module> from mitmproxy.tools.main import mitmproxy File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py", line 8, in <module> from mitmproxy import exceptions, master File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py", line 10, in <module> from mitmproxy import eventsequence File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py", line 8, in <module> from mitmproxy.proxy import layers File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py", line 1, in <module> from . import modes File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py", line 9, in <module> from mitmproxy.proxy.layers import tls File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py", line 8, in <module> from mitmproxy.net import tls as net_tls File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py", line 18, in <module> from mitmproxy.contrib.kaitaistruct import tls_client_hello File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py", line 7, in <module> from pkg_resources import parse_version ModuleNotFoundError: No module named 'pkg_resources' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 import re 3 from codecs import open 4 5 from setuptools import find_packages, setup 6 7 # Based on https://github.com/pypa/sampleproject/blob/main/setup.py 8 # and https://python-packaging-user-guide.readthedocs.org/ 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 with open(os.path.join(here, 'README.md'), encoding='utf-8') as f: 13 long_description = f.read() 14 long_description_content_type = "text/markdown" 15 16 with open(os.path.join(here, "mitmproxy", "version.py")) as f: 17 match = re.search(r'VERSION = "(.+?)"', f.read()) 18 assert match 19 VERSION = match.group(1) 20 21 setup( 22 name="mitmproxy", 23 version=VERSION, 24 description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.", 25 long_description=long_description, 26 long_description_content_type=long_description_content_type, 27 url="http://mitmproxy.org", 28 author="Aldo Cortesi", 29 author_email="[email protected]", 30 license="MIT", 31 classifiers=[ 32 "License :: OSI Approved :: MIT License", 33 "Development Status :: 5 - Production/Stable", 34 "Environment :: Console :: Curses", 35 "Operating System :: MacOS", 36 "Operating System :: POSIX", 37 "Operating System :: Microsoft :: Windows", 38 "Programming Language :: Python :: 3 :: Only", 39 "Programming Language :: Python :: 3.8", 40 "Programming Language :: Python :: 3.9", 41 "Programming Language :: Python :: 3.10", 42 "Programming Language :: Python :: Implementation :: CPython", 43 "Topic :: Security", 44 "Topic :: Internet :: WWW/HTTP", 45 "Topic :: Internet :: Proxy Servers", 46 "Topic :: System :: Networking :: Monitoring", 47 "Topic :: Software Development :: Testing", 48 "Typing :: Typed", 49 ], 50 project_urls={ 51 'Documentation': 'https://docs.mitmproxy.org/stable/', 52 'Source': 'https://github.com/mitmproxy/mitmproxy/', 53 'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues', 54 }, 55 packages=find_packages(include=[ 56 "mitmproxy", "mitmproxy.*", 57 ]), 58 include_package_data=True, 59 entry_points={ 60 'console_scripts': [ 61 "mitmproxy = mitmproxy.tools.main:mitmproxy", 62 "mitmdump = mitmproxy.tools.main:mitmdump", 63 "mitmweb = mitmproxy.tools.main:mitmweb", 64 ] 65 }, 66 python_requires='>=3.8', 67 # https://packaging.python.org/en/latest/requirements/#install-requires 68 # It is not considered best practice to use install_requires to pin dependencies to specific versions. 69 install_requires=[ 70 "asgiref>=3.2.10,<3.5", 71 "blinker>=1.4, <1.5", 72 "Brotli>=1.0,<1.1", 73 "certifi>=2019.9.11", # no semver here - this should always be on the last release! 74 "click>=7.0,<8.1", 75 "cryptography>=3.3,<3.5", 76 "flask>=1.1.1,<2.1", 77 "h11>=0.11,<0.13", 78 "h2>=4.1,<5", 79 "hyperframe>=6.0,<7", 80 "kaitaistruct>=0.7,<0.10", 81 "ldap3>=2.8,<2.10", 82 "msgpack>=1.0.0, <1.1.0", 83 "passlib>=1.6.5, <1.8", 84 "protobuf>=3.14,<3.19", 85 "pyOpenSSL>=21.0,<21.1", 86 "pyparsing>=2.4.2,<2.5", 87 "pyperclip>=1.6.0,<1.9", 88 "ruamel.yaml>=0.16,<0.17.17", 89 "sortedcontainers>=2.3,<2.5", 90 "tornado>=6.1,<7", 91 "urwid>=2.1.1,<2.2", 92 "wsproto>=1.0,<1.1", 93 "publicsuffix2>=2.20190812,<3", 94 "zstandard>=0.11,<0.16", 95 ], 96 extras_require={ 97 ':sys_platform == "win32"': [ 98 "pydivert>=2.0.3,<2.2", 99 ], 100 'dev': [ 101 "hypothesis>=5.8,<7", 102 "parver>=0.1,<2.0", 103 "pdoc>=4.0.0", 104 "pyinstaller==4.5.1", 105 "pytest-asyncio>=0.10.0,<0.16,!=0.14", 106 "pytest-cov>=2.7.1,<3", 107 "pytest-timeout>=1.3.3,<2", 108 "pytest-xdist>=2.1.0,<3", 109 "pytest>=6.1.0,<7", 110 "requests>=2.9.1,<3", 111 "tox>=3.5,<4", 112 "wheel>=0.36.2,<0.38", 113 "coverage==5.5", # workaround issue with import errors introduced in 5.6b1/6.0 114 ], 115 } 116 ) 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -86,6 +86,8 @@ "pyparsing>=2.4.2,<2.5", "pyperclip>=1.6.0,<1.9", "ruamel.yaml>=0.16,<0.17.17", + # Kaitai parsers depend on setuptools, remove once https://github.com/kaitai-io/kaitai_struct_python_runtime/issues/62 is fixed + "setuptools", "sortedcontainers>=2.3,<2.5", "tornado>=6.1,<7", "urwid>=2.1.1,<2.2",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,6 +86,8 @@\n \"pyparsing>=2.4.2,<2.5\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.17.17\",\n+ # Kaitai parsers depend on setuptools, remove once https://github.com/kaitai-io/kaitai_struct_python_runtime/issues/62 is fixed\n+ \"setuptools\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n", "issue": "mitmproxy depends on `pkg_resources` at runtime but does not specify `install_requires=setuptools`\n#### Problem Description\r\nA clear and concise description of what the bug is.\r\n\r\nhttps://github.com/mitmproxy/mitmproxy/blob/0ca458fd6475ee48728147f3b529467a75e912a4/mitmproxy/contrib/kaitaistruct/exif.py#L7\r\n\r\nthe `pkg_resources` module is provided by `setuptools`\r\n\r\ninstallation into a minimal environment (for example, bazel) will break without also needing to specify `setuptools`\r\n\r\nmitmproxy should depend on `setuptools`\r\n\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n1. simulate a minimal environment:\r\n\r\n```\r\nvirtualenv venv\r\nvenv/bin/pip install mitmproxy\r\nvenv/bin/pip uninstall setuptools\r\nvenv/bin/pip install mitmproxy # make sure we actually have its deps even after uninstalling\r\n```\r\n\r\n2. run `mitmproxy --help`\r\n\r\n```console\r\n$ venv/bin/mitmproxy --help\r\nTraceback (most recent call last):\r\n File \"venv/bin/mitmproxy\", line 5, in <module>\r\n from mitmproxy.tools.main import mitmproxy\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py\", line 8, in <module>\r\n from mitmproxy import exceptions, master\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py\", line 10, in <module>\r\n from mitmproxy import eventsequence\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py\", line 8, in <module>\r\n from mitmproxy.proxy import layers\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py\", line 1, in <module>\r\n from . import modes\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py\", line 9, in <module>\r\n from mitmproxy.proxy.layers import tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py\", line 8, in <module>\r\n from mitmproxy.net import tls as net_tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py\", line 18, in <module>\r\n from mitmproxy.contrib.kaitaistruct import tls_client_hello\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py\", line 7, in <module>\r\n from pkg_resources import parse_version\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n```\r\n\r\n#### System Information\r\nPaste the output of \"mitmproxy --version\" here.\r\n\r\n```console\r\n$ venv/bin/mitmproxy --version\r\nTraceback (most recent call last):\r\n File \"venv/bin/mitmproxy\", line 5, in <module>\r\n from mitmproxy.tools.main import mitmproxy\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py\", line 8, in <module>\r\n from mitmproxy import exceptions, master\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py\", line 10, in <module>\r\n from mitmproxy import eventsequence\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py\", line 8, in <module>\r\n from mitmproxy.proxy import layers\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py\", line 1, in <module>\r\n from . import modes\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py\", line 9, in <module>\r\n from mitmproxy.proxy.layers import tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py\", line 8, in <module>\r\n from mitmproxy.net import tls as net_tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py\", line 18, in <module>\r\n from mitmproxy.contrib.kaitaistruct import tls_client_hello\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py\", line 7, in <module>\r\n from pkg_resources import parse_version\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n```\n", "before_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy\", \"version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n 'Documentation': 'https://docs.mitmproxy.org/stable/',\n 'Source': 'https://github.com/mitmproxy/mitmproxy/',\n 'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues',\n },\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ]\n },\n python_requires='>=3.8',\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.5\",\n \"blinker>=1.4, <1.5\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"click>=7.0,<8.1\",\n \"cryptography>=3.3,<3.5\",\n \"flask>=1.1.1,<2.1\",\n \"h11>=0.11,<0.13\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.7,<0.10\",\n \"ldap3>=2.8,<2.10\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<3.19\",\n \"pyOpenSSL>=21.0,<21.1\",\n \"pyparsing>=2.4.2,<2.5\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.17.17\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.1\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.16\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==4.5.1\",\n \"pytest-asyncio>=0.10.0,<0.16,!=0.14\",\n \"pytest-cov>=2.7.1,<3\",\n \"pytest-timeout>=1.3.3,<2\",\n \"pytest-xdist>=2.1.0,<3\",\n \"pytest>=6.1.0,<7\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.38\",\n \"coverage==5.5\", # workaround issue with import errors introduced in 5.6b1/6.0\n ],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy\", \"version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n 'Documentation': 'https://docs.mitmproxy.org/stable/',\n 'Source': 'https://github.com/mitmproxy/mitmproxy/',\n 'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues',\n },\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ]\n },\n python_requires='>=3.8',\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.5\",\n \"blinker>=1.4, <1.5\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"click>=7.0,<8.1\",\n \"cryptography>=3.3,<3.5\",\n \"flask>=1.1.1,<2.1\",\n \"h11>=0.11,<0.13\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.7,<0.10\",\n \"ldap3>=2.8,<2.10\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<3.19\",\n \"pyOpenSSL>=21.0,<21.1\",\n \"pyparsing>=2.4.2,<2.5\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.17.17\",\n # Kaitai parsers depend on setuptools, remove once https://github.com/kaitai-io/kaitai_struct_python_runtime/issues/62 is fixed\n \"setuptools\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.1\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.16\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==4.5.1\",\n \"pytest-asyncio>=0.10.0,<0.16,!=0.14\",\n \"pytest-cov>=2.7.1,<3\",\n \"pytest-timeout>=1.3.3,<2\",\n \"pytest-xdist>=2.1.0,<3\",\n \"pytest>=6.1.0,<7\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.38\",\n \"coverage==5.5\", # workaround issue with import errors introduced in 5.6b1/6.0\n ],\n }\n)\n", "path": "setup.py"}]}
2,793
162
gh_patches_debug_230
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-2545
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Releasing 1.0 With #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release. This should consist of: - [x] assembling changelog #2440 - [x] making sure new features are well documented - [x] publishing beta release - [x] test beta (perhaps by adding it to the z2jh chart) - [ ] release 1.0 final --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jupyterhub/_version.py` Content: ``` 1 """JupyterHub version info""" 2 # Copyright (c) Jupyter Development Team. 3 # Distributed under the terms of the Modified BSD License. 4 5 version_info = ( 6 1, 7 0, 8 0, 9 "b2", # release (b1, rc1, or "" for final or dev) 10 # "dev", # dev or nothing 11 ) 12 13 # pep 440 version: no dot before beta/rc, but before .dev 14 # 0.1.0rc1 15 # 0.1.0a1 16 # 0.1.0b1.dev 17 # 0.1.0.dev 18 19 __version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:]) 20 21 22 def _check_version(hub_version, singleuser_version, log): 23 """Compare Hub and single-user server versions""" 24 if not hub_version: 25 log.warning( 26 "Hub has no version header, which means it is likely < 0.8. Expected %s", 27 __version__, 28 ) 29 return 30 31 if not singleuser_version: 32 log.warning( 33 "Single-user server has no version header, which means it is likely < 0.8. Expected %s", 34 __version__, 35 ) 36 return 37 38 # compare minor X.Y versions 39 if hub_version != singleuser_version: 40 from distutils.version import LooseVersion as V 41 42 hub_major_minor = V(hub_version).version[:2] 43 singleuser_major_minor = V(singleuser_version).version[:2] 44 extra = "" 45 if singleuser_major_minor == hub_major_minor: 46 # patch-level mismatch or lower, log difference at debug-level 47 # because this should be fine 48 log_method = log.debug 49 else: 50 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc. 51 log_method = log.warning 52 extra = " This could cause failure to authenticate and result in redirect loops!" 53 log_method( 54 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra, 55 hub_version, 56 singleuser_version, 57 ) 58 else: 59 log.debug( 60 "jupyterhub and jupyterhub-singleuser both on version %s" % hub_version 61 ) 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py --- a/jupyterhub/_version.py +++ b/jupyterhub/_version.py @@ -6,7 +6,7 @@ 1, 0, 0, - "b2", # release (b1, rc1, or "" for final or dev) + # "b2", # release (b1, rc1, or "" for final or dev) # "dev", # dev or nothing )
{"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -6,7 +6,7 @@\n 1,\n 0,\n 0,\n- \"b2\", # release (b1, rc1, or \"\" for final or dev)\n+ # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n )\n", "issue": "Releasing 1.0\nWith #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.\r\n\r\nThis should consist of:\r\n\r\n- [x] assembling changelog #2440\r\n- [x] making sure new features are well documented\r\n- [x] publishing beta release\r\n- [x] test beta (perhaps by adding it to the z2jh chart)\r\n- [ ] release 1.0 final\n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}], "after_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}]}
980
117
gh_patches_debug_5303
rasdani/github-patches
git_diff
googleapis__python-bigquery-498
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Closing a connection tries to close already closed cursors When closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs. #### Environment details - OS type and version: Mac OS 10.15.7 - Python version: Python 3.8.2 - pip version: pip 20.3.3 - `google-cloud-bigquery` version: 2.7.0 #### Steps to reproduce 1. Close a cursor 2. Close the connection 3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>` #### Code example ```python from contextlib import closing # using pybigquery with closing(engine.raw_connection()) as conn: with closing(conn.cursor()) as cursor: cursor.execute(sql) ``` #### Stack trace ``` Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py", line 270, in _close_connection self._dialect.do_close(connection) File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 549, in do_close dbapi_connection.close() File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 258, in with_closed_check return method(self, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py", line 79, in close cursor_.close() File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 257, in with_closed_check raise exc_class(exc_msg) google.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor. ``` #### Suggested fix ```python # google/cloud/bigquery/dbapi/connection.py class Connection(object): ... def close(self): ... for cursor_ in self._cursors_created: if not cursor_._closed: cursor_.close() ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/cloud/bigquery/dbapi/connection.py` Content: ``` 1 # Copyright 2017 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Connection for the Google BigQuery DB-API.""" 16 17 import weakref 18 19 from google.cloud import bigquery 20 from google.cloud.bigquery.dbapi import cursor 21 from google.cloud.bigquery.dbapi import _helpers 22 23 24 @_helpers.raise_on_closed("Operating on a closed connection.") 25 class Connection(object): 26 """DB-API Connection to Google BigQuery. 27 28 Args: 29 client (Optional[google.cloud.bigquery.Client]): 30 A REST API client used to connect to BigQuery. If not passed, a 31 client is created using default options inferred from the environment. 32 bqstorage_client(\ 33 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \ 34 ): 35 A client that uses the faster BigQuery Storage API to fetch rows from 36 BigQuery. If not passed, it is created using the same credentials 37 as ``client`` (provided that BigQuery Storage dependencies are installed). 38 39 If both clients are available, ``bqstorage_client`` is used for 40 fetching query results. 41 """ 42 43 def __init__(self, client=None, bqstorage_client=None): 44 if client is None: 45 client = bigquery.Client() 46 self._owns_client = True 47 else: 48 self._owns_client = False 49 50 if bqstorage_client is None: 51 # A warning is already raised by the factory if instantiation fails. 52 bqstorage_client = client._create_bqstorage_client() 53 self._owns_bqstorage_client = bqstorage_client is not None 54 else: 55 self._owns_bqstorage_client = False 56 57 self._client = client 58 self._bqstorage_client = bqstorage_client 59 60 self._closed = False 61 self._cursors_created = weakref.WeakSet() 62 63 def close(self): 64 """Close the connection and any cursors created from it. 65 66 Any BigQuery clients explicitly passed to the constructor are *not* 67 closed, only those created by the connection instance itself. 68 """ 69 self._closed = True 70 71 if self._owns_client: 72 self._client.close() 73 74 if self._owns_bqstorage_client: 75 # There is no close() on the BQ Storage client itself. 76 self._bqstorage_client._transport.grpc_channel.close() 77 78 for cursor_ in self._cursors_created: 79 cursor_.close() 80 81 def commit(self): 82 """No-op, but for consistency raise an error if connection is closed.""" 83 84 def cursor(self): 85 """Return a new cursor object. 86 87 Returns: 88 google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection. 89 """ 90 new_cursor = cursor.Cursor(self) 91 self._cursors_created.add(new_cursor) 92 return new_cursor 93 94 95 def connect(client=None, bqstorage_client=None): 96 """Construct a DB-API connection to Google BigQuery. 97 98 Args: 99 client (Optional[google.cloud.bigquery.Client]): 100 A REST API client used to connect to BigQuery. If not passed, a 101 client is created using default options inferred from the environment. 102 bqstorage_client(\ 103 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \ 104 ): 105 A client that uses the faster BigQuery Storage API to fetch rows from 106 BigQuery. If not passed, it is created using the same credentials 107 as ``client`` (provided that BigQuery Storage dependencies are installed). 108 109 If both clients are available, ``bqstorage_client`` is used for 110 fetching query results. 111 112 Returns: 113 google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery. 114 """ 115 return Connection(client, bqstorage_client) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py --- a/google/cloud/bigquery/dbapi/connection.py +++ b/google/cloud/bigquery/dbapi/connection.py @@ -76,7 +76,8 @@ self._bqstorage_client._transport.grpc_channel.close() for cursor_ in self._cursors_created: - cursor_.close() + if not cursor_._closed: + cursor_.close() def commit(self): """No-op, but for consistency raise an error if connection is closed."""
{"golden_diff": "diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py\n--- a/google/cloud/bigquery/dbapi/connection.py\n+++ b/google/cloud/bigquery/dbapi/connection.py\n@@ -76,7 +76,8 @@\n self._bqstorage_client._transport.grpc_channel.close()\n \n for cursor_ in self._cursors_created:\n- cursor_.close()\n+ if not cursor_._closed:\n+ cursor_.close()\n \n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n", "issue": "Closing a connection tries to close already closed cursors\nWhen closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs.\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Mac OS 10.15.7\r\n - Python version: Python 3.8.2\r\n - pip version: pip 20.3.3\r\n - `google-cloud-bigquery` version: 2.7.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Close a cursor\r\n 2. Close the connection\r\n 3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>`\r\n\r\n#### Code example\r\n\r\n```python\r\nfrom contextlib import closing\r\n\r\n# using pybigquery\r\nwith closing(engine.raw_connection()) as conn:\r\n with closing(conn.cursor()) as cursor:\r\n cursor.execute(sql)\r\n```\r\n\r\n#### Stack trace\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py\", line 270, in _close_connection\r\n self._dialect.do_close(connection)\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py\", line 549, in do_close\r\n dbapi_connection.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 258, in with_closed_check\r\n return method(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py\", line 79, in close\r\n cursor_.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 257, in with_closed_check\r\n raise exc_class(exc_msg)\r\ngoogle.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor.\r\n```\r\n\r\n#### Suggested fix\r\n\r\n```python\r\n# google/cloud/bigquery/dbapi/connection.py\r\nclass Connection(object):\r\n ...\r\n def close(self):\r\n ...\r\n for cursor_ in self._cursors_created:\r\n if not cursor_._closed:\r\n cursor_.close()\r\n```\n", "before_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Connection for the Google BigQuery DB-API.\"\"\"\n\nimport weakref\n\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import cursor\nfrom google.cloud.bigquery.dbapi import _helpers\n\n\n@_helpers.raise_on_closed(\"Operating on a closed connection.\")\nclass Connection(object):\n \"\"\"DB-API Connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n \"\"\"\n\n def __init__(self, client=None, bqstorage_client=None):\n if client is None:\n client = bigquery.Client()\n self._owns_client = True\n else:\n self._owns_client = False\n\n if bqstorage_client is None:\n # A warning is already raised by the factory if instantiation fails.\n bqstorage_client = client._create_bqstorage_client()\n self._owns_bqstorage_client = bqstorage_client is not None\n else:\n self._owns_bqstorage_client = False\n\n self._client = client\n self._bqstorage_client = bqstorage_client\n\n self._closed = False\n self._cursors_created = weakref.WeakSet()\n\n def close(self):\n \"\"\"Close the connection and any cursors created from it.\n\n Any BigQuery clients explicitly passed to the constructor are *not*\n closed, only those created by the connection instance itself.\n \"\"\"\n self._closed = True\n\n if self._owns_client:\n self._client.close()\n\n if self._owns_bqstorage_client:\n # There is no close() on the BQ Storage client itself.\n self._bqstorage_client._transport.grpc_channel.close()\n\n for cursor_ in self._cursors_created:\n cursor_.close()\n\n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n\n def cursor(self):\n \"\"\"Return a new cursor object.\n\n Returns:\n google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.\n \"\"\"\n new_cursor = cursor.Cursor(self)\n self._cursors_created.add(new_cursor)\n return new_cursor\n\n\ndef connect(client=None, bqstorage_client=None):\n \"\"\"Construct a DB-API connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n\n Returns:\n google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.\n \"\"\"\n return Connection(client, bqstorage_client)\n", "path": "google/cloud/bigquery/dbapi/connection.py"}], "after_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Connection for the Google BigQuery DB-API.\"\"\"\n\nimport weakref\n\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import cursor\nfrom google.cloud.bigquery.dbapi import _helpers\n\n\n@_helpers.raise_on_closed(\"Operating on a closed connection.\")\nclass Connection(object):\n \"\"\"DB-API Connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n \"\"\"\n\n def __init__(self, client=None, bqstorage_client=None):\n if client is None:\n client = bigquery.Client()\n self._owns_client = True\n else:\n self._owns_client = False\n\n if bqstorage_client is None:\n # A warning is already raised by the factory if instantiation fails.\n bqstorage_client = client._create_bqstorage_client()\n self._owns_bqstorage_client = bqstorage_client is not None\n else:\n self._owns_bqstorage_client = False\n\n self._client = client\n self._bqstorage_client = bqstorage_client\n\n self._closed = False\n self._cursors_created = weakref.WeakSet()\n\n def close(self):\n \"\"\"Close the connection and any cursors created from it.\n\n Any BigQuery clients explicitly passed to the constructor are *not*\n closed, only those created by the connection instance itself.\n \"\"\"\n self._closed = True\n\n if self._owns_client:\n self._client.close()\n\n if self._owns_bqstorage_client:\n # There is no close() on the BQ Storage client itself.\n self._bqstorage_client._transport.grpc_channel.close()\n\n for cursor_ in self._cursors_created:\n if not cursor_._closed:\n cursor_.close()\n\n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n\n def cursor(self):\n \"\"\"Return a new cursor object.\n\n Returns:\n google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.\n \"\"\"\n new_cursor = cursor.Cursor(self)\n self._cursors_created.add(new_cursor)\n return new_cursor\n\n\ndef connect(client=None, bqstorage_client=None):\n \"\"\"Construct a DB-API connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n\n Returns:\n google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.\n \"\"\"\n return Connection(client, bqstorage_client)\n", "path": "google/cloud/bigquery/dbapi/connection.py"}]}
1,904
124
gh_patches_debug_24087
rasdani/github-patches
git_diff
web2py__web2py-1960
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't initiate simplejsonrpc service object When attempting to instatiate a simplejsonrpc service using Python3, the following happens: ``` >>> from gluon.contrib.simplejsonrpc import ServerProxy >>> URL = "http://localhost:8000/app/webservice/call/jsonrpc" >>> service = ServerProxy(URL, verbose=False) Traceback (most recent call last): File "<console>", line 1, in <module> File "/opt/web2py/gluon/contrib/simplejsonrpc.py", line 92, in __init__ type, uri = urllib.splittype(uri) AttributeError: module 'urllib' has no attribute 'splittype' ``` As far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gluon/contrib/simplejsonrpc.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This program is free software; you can redistribute it and/or modify 3 # it under the terms of the GNU Lesser General Public License as published by the 4 # Free Software Foundation; either version 3, or (at your option) any later 5 # version. 6 # 7 # This program is distributed in the hope that it will be useful, but 8 # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY 9 # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 10 # for more details. 11 12 "Pythonic simple JSON RPC Client implementation" 13 from __future__ import print_function 14 15 __author__ = "Mariano Reingart ([email protected])" 16 __copyright__ = "Copyright (C) 2011 Mariano Reingart" 17 __license__ = "LGPL 3.0" 18 __version__ = "0.05" 19 20 import sys 21 PY2 = sys.version_info[0] == 2 22 23 import urllib 24 if PY2: 25 from xmlrpclib import Transport, SafeTransport 26 from cStringIO import StringIO 27 else: 28 from xmlrpc.client import Transport, SafeTransport 29 from io import StringIO 30 import random 31 import json 32 33 34 class JSONRPCError(RuntimeError): 35 "Error object for remote procedure call fail" 36 def __init__(self, code, message, data=''): 37 value = "%s: %s\n%s" % (code, message, '\n'.join(data)) 38 RuntimeError.__init__(self, value) 39 self.code = code 40 self.message = message 41 self.data = data 42 43 44 class JSONDummyParser: 45 "json wrapper for xmlrpclib parser interfase" 46 def __init__(self): 47 self.buf = StringIO() 48 49 def feed(self, data): 50 self.buf.write(data) 51 52 def close(self): 53 return self.buf.getvalue() 54 55 56 class JSONTransportMixin: 57 "json wrapper for xmlrpclib transport interfase" 58 59 def send_content(self, connection, request_body): 60 connection.putheader("Content-Type", "application/json") 61 connection.putheader("Content-Length", str(len(request_body))) 62 connection.endheaders() 63 if request_body: 64 connection.send(request_body) 65 # todo: add gzip compression 66 67 def getparser(self): 68 # get parser and unmarshaller 69 parser = JSONDummyParser() 70 return parser, parser 71 72 73 class JSONTransport(JSONTransportMixin, Transport): 74 pass 75 76 77 class JSONSafeTransport(JSONTransportMixin, SafeTransport): 78 pass 79 80 81 class ServerProxy(object): 82 "JSON RPC Simple Client Service Proxy" 83 84 def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None): 85 self.location = uri # server location (url) 86 self.trace = verbose # show debug messages 87 self.exceptions = True # raise errors? (JSONRPCError) 88 self.timeout = None 89 self.json_request = self.json_response = '' 90 self.version = version # '2.0' for jsonrpc2 91 92 type, uri = urllib.splittype(uri) 93 if type not in ("http", "https"): 94 raise IOError("unsupported JSON-RPC protocol") 95 self.__host, self.__handler = urllib.splithost(uri) 96 97 if transport is None: 98 if type == "https": 99 transport = JSONSafeTransport() 100 else: 101 transport = JSONTransport() 102 self.__transport = transport 103 self.__encoding = encoding 104 self.__verbose = verbose 105 106 def __getattr__(self, attr): 107 "pseudo method that can be called" 108 return lambda *args, **vars: self.call(attr, *args, **vars) 109 110 def call(self, method, *args, **vars): 111 "JSON RPC communication (method invocation)" 112 113 # build data sent to the service 114 request_id = random.randint(0, sys.maxsize) 115 data = {'id': request_id, 'method': method, 'params': args or vars, } 116 if self.version: 117 data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600 118 request = json.dumps(data) 119 120 # make HTTP request (retry if connection is lost) 121 response = self.__transport.request( 122 self.__host, 123 self.__handler, 124 request, 125 verbose=self.__verbose 126 ) 127 128 # store plain request and response for further debugging 129 self.json_request = request 130 self.json_response = response 131 132 # parse json data coming from service 133 # {'version': '1.1', 'id': id, 'result': result, 'error': None} 134 response = json.loads(response) 135 136 self.error = response.get('error', {}) 137 if self.error and self.exceptions: 138 raise JSONRPCError(self.error.get('code', 0), 139 self.error.get('message', ''), 140 self.error.get('data', None)) 141 if response['id'] != request_id: 142 raise JSONRPCError(0, "JSON Request ID != Response ID") 143 144 return response.get('result') 145 146 147 ServiceProxy = ServerProxy 148 149 150 if __name__ == "__main__": 151 # basic tests: 152 location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc" 153 client = ServerProxy(location, verbose='--verbose' in sys.argv,) 154 print(client.add(1, 2)) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py --- a/gluon/contrib/simplejsonrpc.py +++ b/gluon/contrib/simplejsonrpc.py @@ -20,11 +20,12 @@ import sys PY2 = sys.version_info[0] == 2 -import urllib if PY2: + import urllib from xmlrpclib import Transport, SafeTransport from cStringIO import StringIO else: + import urllib.request as urllib from xmlrpc.client import Transport, SafeTransport from io import StringIO import random @@ -47,7 +48,7 @@ self.buf = StringIO() def feed(self, data): - self.buf.write(data) + self.buf.write(data.decode('utf-8')) def close(self): return self.buf.getvalue() @@ -61,7 +62,7 @@ connection.putheader("Content-Length", str(len(request_body))) connection.endheaders() if request_body: - connection.send(request_body) + connection.send(str.encode(request_body)) # todo: add gzip compression def getparser(self):
{"golden_diff": "diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py\n--- a/gluon/contrib/simplejsonrpc.py\n+++ b/gluon/contrib/simplejsonrpc.py\n@@ -20,11 +20,12 @@\n import sys\n PY2 = sys.version_info[0] == 2\n \n-import urllib\n if PY2:\n+ import urllib\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\n else:\n+ import urllib.request as urllib\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\n import random\n@@ -47,7 +48,7 @@\n self.buf = StringIO()\n \n def feed(self, data):\n- self.buf.write(data)\n+ self.buf.write(data.decode('utf-8'))\n \n def close(self):\n return self.buf.getvalue()\n@@ -61,7 +62,7 @@\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n- connection.send(request_body)\n+ connection.send(str.encode(request_body))\n # todo: add gzip compression\n \n def getparser(self):\n", "issue": "Can't initiate simplejsonrpc service object\nWhen attempting to instatiate a simplejsonrpc service using Python3, the following happens:\r\n```\r\n>>> from gluon.contrib.simplejsonrpc import ServerProxy\r\n>>> URL = \"http://localhost:8000/app/webservice/call/jsonrpc\"\r\n>>> service = ServerProxy(URL, verbose=False)\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/opt/web2py/gluon/contrib/simplejsonrpc.py\", line 92, in __init__\r\n type, uri = urllib.splittype(uri)\r\nAttributeError: module 'urllib' has no attribute 'splittype'\r\n```\r\nAs far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\n\"Pythonic simple JSON RPC Client implementation\"\nfrom __future__ import print_function\n\n__author__ = \"Mariano Reingart ([email protected])\"\n__copyright__ = \"Copyright (C) 2011 Mariano Reingart\"\n__license__ = \"LGPL 3.0\"\n__version__ = \"0.05\"\n\nimport sys\nPY2 = sys.version_info[0] == 2\n\nimport urllib\nif PY2:\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\nelse:\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\nimport random\nimport json\n\n\nclass JSONRPCError(RuntimeError):\n \"Error object for remote procedure call fail\"\n def __init__(self, code, message, data=''):\n value = \"%s: %s\\n%s\" % (code, message, '\\n'.join(data))\n RuntimeError.__init__(self, value)\n self.code = code\n self.message = message\n self.data = data\n\n\nclass JSONDummyParser:\n \"json wrapper for xmlrpclib parser interfase\"\n def __init__(self):\n self.buf = StringIO()\n\n def feed(self, data):\n self.buf.write(data)\n\n def close(self):\n return self.buf.getvalue()\n\n\nclass JSONTransportMixin:\n \"json wrapper for xmlrpclib transport interfase\"\n\n def send_content(self, connection, request_body):\n connection.putheader(\"Content-Type\", \"application/json\")\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n connection.send(request_body)\n # todo: add gzip compression\n\n def getparser(self):\n # get parser and unmarshaller\n parser = JSONDummyParser()\n return parser, parser\n\n\nclass JSONTransport(JSONTransportMixin, Transport):\n pass\n\n\nclass JSONSafeTransport(JSONTransportMixin, SafeTransport):\n pass\n\n\nclass ServerProxy(object):\n \"JSON RPC Simple Client Service Proxy\"\n\n def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):\n self.location = uri # server location (url)\n self.trace = verbose # show debug messages\n self.exceptions = True # raise errors? (JSONRPCError)\n self.timeout = None\n self.json_request = self.json_response = ''\n self.version = version # '2.0' for jsonrpc2\n\n type, uri = urllib.splittype(uri)\n if type not in (\"http\", \"https\"):\n raise IOError(\"unsupported JSON-RPC protocol\")\n self.__host, self.__handler = urllib.splithost(uri)\n\n if transport is None:\n if type == \"https\":\n transport = JSONSafeTransport()\n else:\n transport = JSONTransport()\n self.__transport = transport\n self.__encoding = encoding\n self.__verbose = verbose\n\n def __getattr__(self, attr):\n \"pseudo method that can be called\"\n return lambda *args, **vars: self.call(attr, *args, **vars)\n\n def call(self, method, *args, **vars):\n \"JSON RPC communication (method invocation)\"\n\n # build data sent to the service\n request_id = random.randint(0, sys.maxsize)\n data = {'id': request_id, 'method': method, 'params': args or vars, }\n if self.version:\n data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600\n request = json.dumps(data)\n\n # make HTTP request (retry if connection is lost)\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n\n # store plain request and response for further debugging\n self.json_request = request\n self.json_response = response\n\n # parse json data coming from service\n # {'version': '1.1', 'id': id, 'result': result, 'error': None}\n response = json.loads(response)\n\n self.error = response.get('error', {})\n if self.error and self.exceptions:\n raise JSONRPCError(self.error.get('code', 0),\n self.error.get('message', ''),\n self.error.get('data', None))\n if response['id'] != request_id:\n raise JSONRPCError(0, \"JSON Request ID != Response ID\")\n\n return response.get('result')\n\n\nServiceProxy = ServerProxy\n\n\nif __name__ == \"__main__\":\n # basic tests:\n location = \"http://www.web2py.com.ar/webservices/sample/call/jsonrpc\"\n client = ServerProxy(location, verbose='--verbose' in sys.argv,)\n print(client.add(1, 2))\n", "path": "gluon/contrib/simplejsonrpc.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\n\"Pythonic simple JSON RPC Client implementation\"\nfrom __future__ import print_function\n\n__author__ = \"Mariano Reingart ([email protected])\"\n__copyright__ = \"Copyright (C) 2011 Mariano Reingart\"\n__license__ = \"LGPL 3.0\"\n__version__ = \"0.05\"\n\nimport sys\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n import urllib\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\nelse:\n import urllib.request as urllib\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\nimport random\nimport json\n\n\nclass JSONRPCError(RuntimeError):\n \"Error object for remote procedure call fail\"\n def __init__(self, code, message, data=''):\n value = \"%s: %s\\n%s\" % (code, message, '\\n'.join(data))\n RuntimeError.__init__(self, value)\n self.code = code\n self.message = message\n self.data = data\n\n\nclass JSONDummyParser:\n \"json wrapper for xmlrpclib parser interfase\"\n def __init__(self):\n self.buf = StringIO()\n\n def feed(self, data):\n self.buf.write(data.decode('utf-8'))\n\n def close(self):\n return self.buf.getvalue()\n\n\nclass JSONTransportMixin:\n \"json wrapper for xmlrpclib transport interfase\"\n\n def send_content(self, connection, request_body):\n connection.putheader(\"Content-Type\", \"application/json\")\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n connection.send(str.encode(request_body))\n # todo: add gzip compression\n\n def getparser(self):\n # get parser and unmarshaller\n parser = JSONDummyParser()\n return parser, parser\n\n\nclass JSONTransport(JSONTransportMixin, Transport):\n pass\n\n\nclass JSONSafeTransport(JSONTransportMixin, SafeTransport):\n pass\n\n\nclass ServerProxy(object):\n \"JSON RPC Simple Client Service Proxy\"\n\n def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):\n self.location = uri # server location (url)\n self.trace = verbose # show debug messages\n self.exceptions = True # raise errors? (JSONRPCError)\n self.timeout = None\n self.json_request = self.json_response = ''\n self.version = version # '2.0' for jsonrpc2\n\n type, uri = urllib.splittype(uri)\n if type not in (\"http\", \"https\"):\n raise IOError(\"unsupported JSON-RPC protocol\")\n self.__host, self.__handler = urllib.splithost(uri)\n\n if transport is None:\n if type == \"https\":\n transport = JSONSafeTransport()\n else:\n transport = JSONTransport()\n self.__transport = transport\n self.__encoding = encoding\n self.__verbose = verbose\n\n def __getattr__(self, attr):\n \"pseudo method that can be called\"\n return lambda *args, **vars: self.call(attr, *args, **vars)\n\n def call(self, method, *args, **vars):\n \"JSON RPC communication (method invocation)\"\n\n # build data sent to the service\n request_id = random.randint(0, sys.maxsize)\n data = {'id': request_id, 'method': method, 'params': args or vars, }\n if self.version:\n data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600\n request = json.dumps(data)\n\n # make HTTP request (retry if connection is lost)\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n\n # store plain request and response for further debugging\n self.json_request = request\n self.json_response = response\n\n # parse json data coming from service\n # {'version': '1.1', 'id': id, 'result': result, 'error': None}\n response = json.loads(response)\n\n self.error = response.get('error', {})\n if self.error and self.exceptions:\n raise JSONRPCError(self.error.get('code', 0),\n self.error.get('message', ''),\n self.error.get('data', None))\n if response['id'] != request_id:\n raise JSONRPCError(0, \"JSON Request ID != Response ID\")\n\n return response.get('result')\n\n\nServiceProxy = ServerProxy\n\n\nif __name__ == \"__main__\":\n # basic tests:\n location = \"http://www.web2py.com.ar/webservices/sample/call/jsonrpc\"\n client = ServerProxy(location, verbose='--verbose' in sys.argv,)\n print(client.add(1, 2))\n", "path": "gluon/contrib/simplejsonrpc.py"}]}
1,963
262
gh_patches_debug_9643
rasdani/github-patches
git_diff
aio-libs__aiohttp-3819
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TooManyRedirects is not properly exposed publicly ## Description https://github.com/aio-libs/aiohttp/commit/544716cf93b3856e485c8e993de60d9911c002cb (#2943) added `TooManyRedirects` and added it as `aiohttp.TooManyRedirects` [in the documentation](https://docs.aiohttp.org/en/latest/client_reference.html#aiohttp.TooManyRedirects). However, it's not properly exposed publicly in `__init__.py` and can only be referenced as `aiohttp.client_exceptions.TooManyRedirects`. ## Expected Behaviour `aiohttp.TooManyRedirects` works, by reference to `aiohttp.client_exceptions.TooManyRedirects` ## Actual Behaviour `AttributeError: module 'aiohttp' has no attribute 'TooManyRedirects` ## Reproduction Steps `import aiohttp` and attempt to use `aiohttp.TooManyRedirects` ## Environment aiohttp 3.5.4 Also reproducible on latest master (https://github.com/aio-libs/aiohttp/commit/81cc715227f6b0f3b36ee6d8bcc95a63a37fbdab) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `aiohttp/__init__.py` Content: ``` 1 __version__ = '4.0.0a0' 2 3 from typing import Tuple # noqa 4 5 from . import hdrs 6 from .client import ( 7 BaseConnector, 8 ClientConnectionError, 9 ClientConnectorCertificateError, 10 ClientConnectorError, 11 ClientConnectorSSLError, 12 ClientError, 13 ClientHttpProxyError, 14 ClientOSError, 15 ClientPayloadError, 16 ClientProxyConnectionError, 17 ClientRequest, 18 ClientResponse, 19 ClientResponseError, 20 ClientSession, 21 ClientSSLError, 22 ClientTimeout, 23 ClientWebSocketResponse, 24 ContentTypeError, 25 Fingerprint, 26 InvalidURL, 27 RequestInfo, 28 ServerConnectionError, 29 ServerDisconnectedError, 30 ServerFingerprintMismatch, 31 ServerTimeoutError, 32 TCPConnector, 33 UnixConnector, 34 WSServerHandshakeError, 35 request, 36 ) 37 from .cookiejar import CookieJar, DummyCookieJar 38 from .formdata import FormData 39 from .helpers import BasicAuth, ChainMapProxy 40 from .http import ( 41 HttpVersion, 42 HttpVersion10, 43 HttpVersion11, 44 WebSocketError, 45 WSCloseCode, 46 WSMessage, 47 WSMsgType, 48 ) 49 from .multipart import ( 50 BadContentDispositionHeader, 51 BadContentDispositionParam, 52 BodyPartReader, 53 MultipartReader, 54 MultipartWriter, 55 content_disposition_filename, 56 parse_content_disposition, 57 ) 58 from .payload import ( 59 PAYLOAD_REGISTRY, 60 AsyncIterablePayload, 61 BufferedReaderPayload, 62 BytesIOPayload, 63 BytesPayload, 64 IOBasePayload, 65 JsonPayload, 66 Payload, 67 StringIOPayload, 68 StringPayload, 69 TextIOPayload, 70 get_payload, 71 payload_type, 72 ) 73 from .resolver import AsyncResolver, DefaultResolver, ThreadedResolver 74 from .signals import Signal 75 from .streams import ( 76 EMPTY_PAYLOAD, 77 DataQueue, 78 EofStream, 79 FlowControlDataQueue, 80 StreamReader, 81 ) 82 from .tracing import ( 83 TraceConfig, 84 TraceConnectionCreateEndParams, 85 TraceConnectionCreateStartParams, 86 TraceConnectionQueuedEndParams, 87 TraceConnectionQueuedStartParams, 88 TraceConnectionReuseconnParams, 89 TraceDnsCacheHitParams, 90 TraceDnsCacheMissParams, 91 TraceDnsResolveHostEndParams, 92 TraceDnsResolveHostStartParams, 93 TraceRequestChunkSentParams, 94 TraceRequestEndParams, 95 TraceRequestExceptionParams, 96 TraceRequestRedirectParams, 97 TraceRequestStartParams, 98 TraceResponseChunkReceivedParams, 99 ) 100 101 __all__ = ( 102 'hdrs', 103 # client 104 'BaseConnector', 105 'ClientConnectionError', 106 'ClientConnectorCertificateError', 107 'ClientConnectorError', 108 'ClientConnectorSSLError', 109 'ClientError', 110 'ClientHttpProxyError', 111 'ClientOSError', 112 'ClientPayloadError', 113 'ClientProxyConnectionError', 114 'ClientResponse', 115 'ClientRequest', 116 'ClientResponseError', 117 'ClientSSLError', 118 'ClientSession', 119 'ClientTimeout', 120 'ClientWebSocketResponse', 121 'ContentTypeError', 122 'Fingerprint', 123 'InvalidURL', 124 'RequestInfo', 125 'ServerConnectionError', 126 'ServerDisconnectedError', 127 'ServerFingerprintMismatch', 128 'ServerTimeoutError', 129 'TCPConnector', 130 'UnixConnector', 131 'WSServerHandshakeError', 132 'request', 133 # cookiejar 134 'CookieJar', 135 'DummyCookieJar', 136 # formdata 137 'FormData', 138 # helpers 139 'BasicAuth', 140 'ChainMapProxy', 141 # http 142 'HttpVersion', 143 'HttpVersion10', 144 'HttpVersion11', 145 'WSMsgType', 146 'WSCloseCode', 147 'WSMessage', 148 'WebSocketError', 149 # multipart 150 'BadContentDispositionHeader', 151 'BadContentDispositionParam', 152 'BodyPartReader', 153 'MultipartReader', 154 'MultipartWriter', 155 'content_disposition_filename', 156 'parse_content_disposition', 157 # payload 158 'AsyncIterablePayload', 159 'BufferedReaderPayload', 160 'BytesIOPayload', 161 'BytesPayload', 162 'IOBasePayload', 163 'JsonPayload', 164 'PAYLOAD_REGISTRY', 165 'Payload', 166 'StringIOPayload', 167 'StringPayload', 168 'TextIOPayload', 169 'get_payload', 170 'payload_type', 171 # resolver 172 'AsyncResolver', 173 'DefaultResolver', 174 'ThreadedResolver', 175 # signals 176 'Signal', 177 'DataQueue', 178 'EMPTY_PAYLOAD', 179 'EofStream', 180 'FlowControlDataQueue', 181 'StreamReader', 182 # tracing 183 'TraceConfig', 184 'TraceConnectionCreateEndParams', 185 'TraceConnectionCreateStartParams', 186 'TraceConnectionQueuedEndParams', 187 'TraceConnectionQueuedStartParams', 188 'TraceConnectionReuseconnParams', 189 'TraceDnsCacheHitParams', 190 'TraceDnsCacheMissParams', 191 'TraceDnsResolveHostEndParams', 192 'TraceDnsResolveHostStartParams', 193 'TraceRequestChunkSentParams', 194 'TraceRequestEndParams', 195 'TraceRequestExceptionParams', 196 'TraceRequestRedirectParams', 197 'TraceRequestStartParams', 198 'TraceResponseChunkReceivedParams', 199 ) # type: Tuple[str, ...] 200 201 try: 202 from .worker import GunicornWebWorker, GunicornUVLoopWebWorker # noqa 203 __all__ += ('GunicornWebWorker', 'GunicornUVLoopWebWorker') 204 except ImportError: # pragma: no cover 205 pass 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py --- a/aiohttp/__init__.py +++ b/aiohttp/__init__.py @@ -30,6 +30,7 @@ ServerFingerprintMismatch, ServerTimeoutError, TCPConnector, + TooManyRedirects, UnixConnector, WSServerHandshakeError, request, @@ -127,6 +128,7 @@ 'ServerFingerprintMismatch', 'ServerTimeoutError', 'TCPConnector', + 'TooManyRedirects', 'UnixConnector', 'WSServerHandshakeError', 'request',
{"golden_diff": "diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py\n--- a/aiohttp/__init__.py\n+++ b/aiohttp/__init__.py\n@@ -30,6 +30,7 @@\n ServerFingerprintMismatch,\n ServerTimeoutError,\n TCPConnector,\n+ TooManyRedirects,\n UnixConnector,\n WSServerHandshakeError,\n request,\n@@ -127,6 +128,7 @@\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TCPConnector',\n+ 'TooManyRedirects',\n 'UnixConnector',\n 'WSServerHandshakeError',\n 'request',\n", "issue": "TooManyRedirects is not properly exposed publicly\n## Description\r\n\r\nhttps://github.com/aio-libs/aiohttp/commit/544716cf93b3856e485c8e993de60d9911c002cb (#2943) added `TooManyRedirects` and added it as `aiohttp.TooManyRedirects` [in the documentation](https://docs.aiohttp.org/en/latest/client_reference.html#aiohttp.TooManyRedirects).\r\nHowever, it's not properly exposed publicly in `__init__.py` and can only be referenced as `aiohttp.client_exceptions.TooManyRedirects`.\r\n\r\n## Expected Behaviour\r\n\r\n`aiohttp.TooManyRedirects` works, by reference to `aiohttp.client_exceptions.TooManyRedirects`\r\n\r\n## Actual Behaviour\r\n\r\n`AttributeError: module 'aiohttp' has no attribute 'TooManyRedirects`\r\n\r\n## Reproduction Steps\r\n\r\n`import aiohttp` and attempt to use `aiohttp.TooManyRedirects`\r\n\r\n## Environment\r\n\r\naiohttp 3.5.4\r\nAlso reproducible on latest master (https://github.com/aio-libs/aiohttp/commit/81cc715227f6b0f3b36ee6d8bcc95a63a37fbdab)\n", "before_files": [{"content": "__version__ = '4.0.0a0'\n\nfrom typing import Tuple # noqa\n\nfrom . import hdrs\nfrom .client import (\n BaseConnector,\n ClientConnectionError,\n ClientConnectorCertificateError,\n ClientConnectorError,\n ClientConnectorSSLError,\n ClientError,\n ClientHttpProxyError,\n ClientOSError,\n ClientPayloadError,\n ClientProxyConnectionError,\n ClientRequest,\n ClientResponse,\n ClientResponseError,\n ClientSession,\n ClientSSLError,\n ClientTimeout,\n ClientWebSocketResponse,\n ContentTypeError,\n Fingerprint,\n InvalidURL,\n RequestInfo,\n ServerConnectionError,\n ServerDisconnectedError,\n ServerFingerprintMismatch,\n ServerTimeoutError,\n TCPConnector,\n UnixConnector,\n WSServerHandshakeError,\n request,\n)\nfrom .cookiejar import CookieJar, DummyCookieJar\nfrom .formdata import FormData\nfrom .helpers import BasicAuth, ChainMapProxy\nfrom .http import (\n HttpVersion,\n HttpVersion10,\n HttpVersion11,\n WebSocketError,\n WSCloseCode,\n WSMessage,\n WSMsgType,\n)\nfrom .multipart import (\n BadContentDispositionHeader,\n BadContentDispositionParam,\n BodyPartReader,\n MultipartReader,\n MultipartWriter,\n content_disposition_filename,\n parse_content_disposition,\n)\nfrom .payload import (\n PAYLOAD_REGISTRY,\n AsyncIterablePayload,\n BufferedReaderPayload,\n BytesIOPayload,\n BytesPayload,\n IOBasePayload,\n JsonPayload,\n Payload,\n StringIOPayload,\n StringPayload,\n TextIOPayload,\n get_payload,\n payload_type,\n)\nfrom .resolver import AsyncResolver, DefaultResolver, ThreadedResolver\nfrom .signals import Signal\nfrom .streams import (\n EMPTY_PAYLOAD,\n DataQueue,\n EofStream,\n FlowControlDataQueue,\n StreamReader,\n)\nfrom .tracing import (\n TraceConfig,\n TraceConnectionCreateEndParams,\n TraceConnectionCreateStartParams,\n TraceConnectionQueuedEndParams,\n TraceConnectionQueuedStartParams,\n TraceConnectionReuseconnParams,\n TraceDnsCacheHitParams,\n TraceDnsCacheMissParams,\n TraceDnsResolveHostEndParams,\n TraceDnsResolveHostStartParams,\n TraceRequestChunkSentParams,\n TraceRequestEndParams,\n TraceRequestExceptionParams,\n TraceRequestRedirectParams,\n TraceRequestStartParams,\n TraceResponseChunkReceivedParams,\n)\n\n__all__ = (\n 'hdrs',\n # client\n 'BaseConnector',\n 'ClientConnectionError',\n 'ClientConnectorCertificateError',\n 'ClientConnectorError',\n 'ClientConnectorSSLError',\n 'ClientError',\n 'ClientHttpProxyError',\n 'ClientOSError',\n 'ClientPayloadError',\n 'ClientProxyConnectionError',\n 'ClientResponse',\n 'ClientRequest',\n 'ClientResponseError',\n 'ClientSSLError',\n 'ClientSession',\n 'ClientTimeout',\n 'ClientWebSocketResponse',\n 'ContentTypeError',\n 'Fingerprint',\n 'InvalidURL',\n 'RequestInfo',\n 'ServerConnectionError',\n 'ServerDisconnectedError',\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TCPConnector',\n 'UnixConnector',\n 'WSServerHandshakeError',\n 'request',\n # cookiejar\n 'CookieJar',\n 'DummyCookieJar',\n # formdata\n 'FormData',\n # helpers\n 'BasicAuth',\n 'ChainMapProxy',\n # http\n 'HttpVersion',\n 'HttpVersion10',\n 'HttpVersion11',\n 'WSMsgType',\n 'WSCloseCode',\n 'WSMessage',\n 'WebSocketError',\n # multipart\n 'BadContentDispositionHeader',\n 'BadContentDispositionParam',\n 'BodyPartReader',\n 'MultipartReader',\n 'MultipartWriter',\n 'content_disposition_filename',\n 'parse_content_disposition',\n # payload\n 'AsyncIterablePayload',\n 'BufferedReaderPayload',\n 'BytesIOPayload',\n 'BytesPayload',\n 'IOBasePayload',\n 'JsonPayload',\n 'PAYLOAD_REGISTRY',\n 'Payload',\n 'StringIOPayload',\n 'StringPayload',\n 'TextIOPayload',\n 'get_payload',\n 'payload_type',\n # resolver\n 'AsyncResolver',\n 'DefaultResolver',\n 'ThreadedResolver',\n # signals\n 'Signal',\n 'DataQueue',\n 'EMPTY_PAYLOAD',\n 'EofStream',\n 'FlowControlDataQueue',\n 'StreamReader',\n # tracing\n 'TraceConfig',\n 'TraceConnectionCreateEndParams',\n 'TraceConnectionCreateStartParams',\n 'TraceConnectionQueuedEndParams',\n 'TraceConnectionQueuedStartParams',\n 'TraceConnectionReuseconnParams',\n 'TraceDnsCacheHitParams',\n 'TraceDnsCacheMissParams',\n 'TraceDnsResolveHostEndParams',\n 'TraceDnsResolveHostStartParams',\n 'TraceRequestChunkSentParams',\n 'TraceRequestEndParams',\n 'TraceRequestExceptionParams',\n 'TraceRequestRedirectParams',\n 'TraceRequestStartParams',\n 'TraceResponseChunkReceivedParams',\n) # type: Tuple[str, ...]\n\ntry:\n from .worker import GunicornWebWorker, GunicornUVLoopWebWorker # noqa\n __all__ += ('GunicornWebWorker', 'GunicornUVLoopWebWorker')\nexcept ImportError: # pragma: no cover\n pass\n", "path": "aiohttp/__init__.py"}], "after_files": [{"content": "__version__ = '4.0.0a0'\n\nfrom typing import Tuple # noqa\n\nfrom . import hdrs\nfrom .client import (\n BaseConnector,\n ClientConnectionError,\n ClientConnectorCertificateError,\n ClientConnectorError,\n ClientConnectorSSLError,\n ClientError,\n ClientHttpProxyError,\n ClientOSError,\n ClientPayloadError,\n ClientProxyConnectionError,\n ClientRequest,\n ClientResponse,\n ClientResponseError,\n ClientSession,\n ClientSSLError,\n ClientTimeout,\n ClientWebSocketResponse,\n ContentTypeError,\n Fingerprint,\n InvalidURL,\n RequestInfo,\n ServerConnectionError,\n ServerDisconnectedError,\n ServerFingerprintMismatch,\n ServerTimeoutError,\n TCPConnector,\n TooManyRedirects,\n UnixConnector,\n WSServerHandshakeError,\n request,\n)\nfrom .cookiejar import CookieJar, DummyCookieJar\nfrom .formdata import FormData\nfrom .helpers import BasicAuth, ChainMapProxy\nfrom .http import (\n HttpVersion,\n HttpVersion10,\n HttpVersion11,\n WebSocketError,\n WSCloseCode,\n WSMessage,\n WSMsgType,\n)\nfrom .multipart import (\n BadContentDispositionHeader,\n BadContentDispositionParam,\n BodyPartReader,\n MultipartReader,\n MultipartWriter,\n content_disposition_filename,\n parse_content_disposition,\n)\nfrom .payload import (\n PAYLOAD_REGISTRY,\n AsyncIterablePayload,\n BufferedReaderPayload,\n BytesIOPayload,\n BytesPayload,\n IOBasePayload,\n JsonPayload,\n Payload,\n StringIOPayload,\n StringPayload,\n TextIOPayload,\n get_payload,\n payload_type,\n)\nfrom .resolver import AsyncResolver, DefaultResolver, ThreadedResolver\nfrom .signals import Signal\nfrom .streams import (\n EMPTY_PAYLOAD,\n DataQueue,\n EofStream,\n FlowControlDataQueue,\n StreamReader,\n)\nfrom .tracing import (\n TraceConfig,\n TraceConnectionCreateEndParams,\n TraceConnectionCreateStartParams,\n TraceConnectionQueuedEndParams,\n TraceConnectionQueuedStartParams,\n TraceConnectionReuseconnParams,\n TraceDnsCacheHitParams,\n TraceDnsCacheMissParams,\n TraceDnsResolveHostEndParams,\n TraceDnsResolveHostStartParams,\n TraceRequestChunkSentParams,\n TraceRequestEndParams,\n TraceRequestExceptionParams,\n TraceRequestRedirectParams,\n TraceRequestStartParams,\n TraceResponseChunkReceivedParams,\n)\n\n__all__ = (\n 'hdrs',\n # client\n 'BaseConnector',\n 'ClientConnectionError',\n 'ClientConnectorCertificateError',\n 'ClientConnectorError',\n 'ClientConnectorSSLError',\n 'ClientError',\n 'ClientHttpProxyError',\n 'ClientOSError',\n 'ClientPayloadError',\n 'ClientProxyConnectionError',\n 'ClientResponse',\n 'ClientRequest',\n 'ClientResponseError',\n 'ClientSSLError',\n 'ClientSession',\n 'ClientTimeout',\n 'ClientWebSocketResponse',\n 'ContentTypeError',\n 'Fingerprint',\n 'InvalidURL',\n 'RequestInfo',\n 'ServerConnectionError',\n 'ServerDisconnectedError',\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TCPConnector',\n 'TooManyRedirects',\n 'UnixConnector',\n 'WSServerHandshakeError',\n 'request',\n # cookiejar\n 'CookieJar',\n 'DummyCookieJar',\n # formdata\n 'FormData',\n # helpers\n 'BasicAuth',\n 'ChainMapProxy',\n # http\n 'HttpVersion',\n 'HttpVersion10',\n 'HttpVersion11',\n 'WSMsgType',\n 'WSCloseCode',\n 'WSMessage',\n 'WebSocketError',\n # multipart\n 'BadContentDispositionHeader',\n 'BadContentDispositionParam',\n 'BodyPartReader',\n 'MultipartReader',\n 'MultipartWriter',\n 'content_disposition_filename',\n 'parse_content_disposition',\n # payload\n 'AsyncIterablePayload',\n 'BufferedReaderPayload',\n 'BytesIOPayload',\n 'BytesPayload',\n 'IOBasePayload',\n 'JsonPayload',\n 'PAYLOAD_REGISTRY',\n 'Payload',\n 'StringIOPayload',\n 'StringPayload',\n 'TextIOPayload',\n 'get_payload',\n 'payload_type',\n # resolver\n 'AsyncResolver',\n 'DefaultResolver',\n 'ThreadedResolver',\n # signals\n 'Signal',\n 'DataQueue',\n 'EMPTY_PAYLOAD',\n 'EofStream',\n 'FlowControlDataQueue',\n 'StreamReader',\n # tracing\n 'TraceConfig',\n 'TraceConnectionCreateEndParams',\n 'TraceConnectionCreateStartParams',\n 'TraceConnectionQueuedEndParams',\n 'TraceConnectionQueuedStartParams',\n 'TraceConnectionReuseconnParams',\n 'TraceDnsCacheHitParams',\n 'TraceDnsCacheMissParams',\n 'TraceDnsResolveHostEndParams',\n 'TraceDnsResolveHostStartParams',\n 'TraceRequestChunkSentParams',\n 'TraceRequestEndParams',\n 'TraceRequestExceptionParams',\n 'TraceRequestRedirectParams',\n 'TraceRequestStartParams',\n 'TraceResponseChunkReceivedParams',\n) # type: Tuple[str, ...]\n\ntry:\n from .worker import GunicornWebWorker, GunicornUVLoopWebWorker # noqa\n __all__ += ('GunicornWebWorker', 'GunicornUVLoopWebWorker')\nexcept ImportError: # pragma: no cover\n pass\n", "path": "aiohttp/__init__.py"}]}
2,245
147
gh_patches_debug_13016
rasdani/github-patches
git_diff
PrefectHQ__prefect-3847
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AzureResult read function not working ## Description <!-- A clear description of the bug --> Using `AzureResult` to read a file, an exception is raised: > TypeError: a bytes-like object is required, not 'StorageStreamDownloader' Looking at the `read` function in `src/prefect/engine/results/azure_result.py` I see the following: ```python content_string = client.download_blob() try: new.value = new.serializer.deserialize(content_string) ``` `client.download_blob()` returns a `StorageStreamDownloader`, while `new.serailizer.deserialize(content_string)` expects a byte array. ## Expected Behavior I would expect that the file would be read without raising an exception. ## Reproduction ```python from prefect.engine.results import AzureResult con_string = "" azure_result = AzureResult(container="data", connection_string=con_string) w = azure_result.write({"Test": "123"}) r = azure_result.read(w.location) ``` ## Environment ```json { "config_overrides": { "context": { "secrets": false } }, "env_vars": [], "system_information": { "platform": "Windows-10-10.0.18362-SP0", "prefect_backend": "server", "prefect_version": "0.13.19", "python_version": "3.9.0" } } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/prefect/engine/results/azure_result.py` Content: ``` 1 import os 2 from typing import TYPE_CHECKING, Any 3 4 from prefect.client import Secret 5 from prefect.engine.result import Result 6 7 if TYPE_CHECKING: 8 import azure.storage.blob 9 10 11 class AzureResult(Result): 12 """ 13 Result for writing to and reading from an Azure Blob storage. 14 15 Note that your flow's runtime environment must be able to authenticate with 16 Azure; there are currently two supported options: provide a connection string 17 either at initialization or at runtime through an environment variable, or 18 set your Azure connection string as a Prefect Secret. Using an environment 19 variable is the recommended approach. 20 21 Args: 22 - container (str): the name of the container to write to / read from 23 - connection_string (str, optional): an Azure connection string for communicating with 24 Blob storage. If not provided the value set in the environment as 25 `AZURE_STORAGE_CONNECTION_STRING` will be used 26 - connection_string_secret (str, optional): the name of a Prefect Secret 27 which stores your Azure connection tring 28 - **kwargs (Any, optional): any additional `Result` initialization options 29 """ 30 31 def __init__( 32 self, 33 container: str, 34 connection_string: str = None, 35 connection_string_secret: str = None, 36 **kwargs: Any 37 ) -> None: 38 self.container = container 39 self.connection_string = connection_string or os.getenv( 40 "AZURE_STORAGE_CONNECTION_STRING" 41 ) 42 self.connection_string_secret = connection_string_secret 43 super().__init__(**kwargs) 44 45 def initialize_service(self) -> None: 46 """ 47 Initialize a Blob service. 48 """ 49 import azure.storage.blob 50 51 connection_string = self.connection_string 52 if not connection_string and self.connection_string_secret: 53 connection_string = Secret(self.connection_string_secret).get() 54 55 self._service = azure.storage.blob.BlobServiceClient.from_connection_string( 56 conn_str=connection_string 57 ) 58 59 @property 60 def service(self) -> "azure.storage.blob.BlobServiceClient": 61 if not hasattr(self, "_service"): 62 self.initialize_service() 63 return self._service 64 65 @service.setter 66 def service(self, val: Any) -> None: 67 self._service = val 68 69 def __getstate__(self) -> dict: 70 state = self.__dict__.copy() 71 if "_service" in state: 72 del state["_service"] 73 return state 74 75 def __setstate__(self, state: dict) -> None: 76 self.__dict__.update(state) 77 78 def write(self, value_: Any, **kwargs: Any) -> Result: 79 """ 80 Writes the result value to a blob storage in Azure. 81 82 Args: 83 - value_ (Any): the value to write; will then be stored as the `value` attribute 84 of the returned `Result` instance 85 - **kwargs (optional): if provided, will be used to format the location template 86 to determine the location to write to 87 88 Returns: 89 - Result: a new Result instance with the appropriately formatted location 90 """ 91 new = self.format(**kwargs) 92 new.value = value_ 93 94 self.logger.debug("Starting to upload result to {}...".format(new.location)) 95 96 # prepare data 97 binary_data = new.serializer.serialize(new.value) 98 99 # initialize client and upload 100 client = self.service.get_blob_client( 101 container=self.container, blob=new.location 102 ) 103 client.upload_blob(binary_data) 104 105 self.logger.debug("Finished uploading result to {}.".format(new.location)) 106 107 return new 108 109 def read(self, location: str) -> Result: 110 """ 111 Reads a result from an Azure Blob container and returns a corresponding `Result` instance. 112 113 Args: 114 - location (str): the Azure blob location to read from 115 116 Returns: 117 - Result: the read result 118 """ 119 new = self.copy() 120 new.location = location 121 122 try: 123 self.logger.debug("Starting to download result from {}...".format(location)) 124 125 # initialize client and download 126 client = self.service.get_blob_client( 127 container=self.container, blob=location 128 ) 129 content_string = client.download_blob() 130 131 try: 132 new.value = new.serializer.deserialize(content_string) 133 except EOFError: 134 new.value = None 135 self.logger.debug("Finished downloading result from {}.".format(location)) 136 except Exception as exc: 137 self.logger.exception( 138 "Unexpected error while reading from result handler: {}".format( 139 repr(exc) 140 ) 141 ) 142 raise exc 143 return new 144 145 def exists(self, location: str, **kwargs: Any) -> bool: 146 """ 147 Checks whether the target result exists. 148 149 Does not validate whether the result is `valid`, only that it is present. 150 151 Args: 152 - location (str): Location of the result in the specific result target. 153 Will check whether the provided location exists 154 - **kwargs (Any): string format arguments for `location` 155 156 Returns: 157 - bool: whether or not the target result exists. 158 """ 159 from azure.core.exceptions import ResourceNotFoundError 160 161 # initialize client and download 162 client = self.service.get_blob_client( 163 container=self.container, blob=location.format(**kwargs) 164 ) 165 166 # Catch exception because Azure python bindings do not yet have an exists method 167 # https://github.com/Azure/azure-sdk-for-python/issues/9507 168 try: 169 client.get_blob_properties() 170 return True 171 except ResourceNotFoundError: 172 return False 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py --- a/src/prefect/engine/results/azure_result.py +++ b/src/prefect/engine/results/azure_result.py @@ -126,10 +126,10 @@ client = self.service.get_blob_client( container=self.container, blob=location ) - content_string = client.download_blob() + content_bytes = client.download_blob().content_as_bytes() try: - new.value = new.serializer.deserialize(content_string) + new.value = new.serializer.deserialize(content_bytes) except EOFError: new.value = None self.logger.debug("Finished downloading result from {}.".format(location))
{"golden_diff": "diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py\n--- a/src/prefect/engine/results/azure_result.py\n+++ b/src/prefect/engine/results/azure_result.py\n@@ -126,10 +126,10 @@\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n- content_string = client.download_blob()\n+ content_bytes = client.download_blob().content_as_bytes()\n \n try:\n- new.value = new.serializer.deserialize(content_string)\n+ new.value = new.serializer.deserialize(content_bytes)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n", "issue": "AzureResult read function not working\n## Description\r\n<!-- A clear description of the bug -->\r\nUsing `AzureResult` to read a file, an exception is raised:\r\n\r\n> TypeError: a bytes-like object is required, not 'StorageStreamDownloader'\r\n\r\nLooking at the `read` function in `src/prefect/engine/results/azure_result.py` I see the following:\r\n\r\n```python\r\ncontent_string = client.download_blob()\r\n\r\ntry:\r\n new.value = new.serializer.deserialize(content_string)\r\n```\r\n\r\n`client.download_blob()` returns a `StorageStreamDownloader`, while `new.serailizer.deserialize(content_string)` expects a byte array.\r\n\r\n## Expected Behavior\r\nI would expect that the file would be read without raising an exception.\r\n\r\n## Reproduction\r\n```python\r\nfrom prefect.engine.results import AzureResult\r\n\r\ncon_string = \"\"\r\nazure_result = AzureResult(container=\"data\", connection_string=con_string)\r\n\r\nw = azure_result.write({\"Test\": \"123\"})\r\nr = azure_result.read(w.location)\r\n```\r\n\r\n\r\n## Environment\r\n```json\r\n{\r\n \"config_overrides\": {\r\n \"context\": {\r\n \"secrets\": false\r\n }\r\n },\r\n \"env_vars\": [],\r\n \"system_information\": {\r\n \"platform\": \"Windows-10-10.0.18362-SP0\",\r\n \"prefect_backend\": \"server\",\r\n \"prefect_version\": \"0.13.19\",\r\n \"python_version\": \"3.9.0\"\r\n }\r\n}\r\n```\n", "before_files": [{"content": "import os\nfrom typing import TYPE_CHECKING, Any\n\nfrom prefect.client import Secret\nfrom prefect.engine.result import Result\n\nif TYPE_CHECKING:\n import azure.storage.blob\n\n\nclass AzureResult(Result):\n \"\"\"\n Result for writing to and reading from an Azure Blob storage.\n\n Note that your flow's runtime environment must be able to authenticate with\n Azure; there are currently two supported options: provide a connection string\n either at initialization or at runtime through an environment variable, or\n set your Azure connection string as a Prefect Secret. Using an environment\n variable is the recommended approach.\n\n Args:\n - container (str): the name of the container to write to / read from\n - connection_string (str, optional): an Azure connection string for communicating with\n Blob storage. If not provided the value set in the environment as\n `AZURE_STORAGE_CONNECTION_STRING` will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(\n self,\n container: str,\n connection_string: str = None,\n connection_string_secret: str = None,\n **kwargs: Any\n ) -> None:\n self.container = container\n self.connection_string = connection_string or os.getenv(\n \"AZURE_STORAGE_CONNECTION_STRING\"\n )\n self.connection_string_secret = connection_string_secret\n super().__init__(**kwargs)\n\n def initialize_service(self) -> None:\n \"\"\"\n Initialize a Blob service.\n \"\"\"\n import azure.storage.blob\n\n connection_string = self.connection_string\n if not connection_string and self.connection_string_secret:\n connection_string = Secret(self.connection_string_secret).get()\n\n self._service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=connection_string\n )\n\n @property\n def service(self) -> \"azure.storage.blob.BlobServiceClient\":\n if not hasattr(self, \"_service\"):\n self.initialize_service()\n return self._service\n\n @service.setter\n def service(self, val: Any) -> None:\n self._service = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_service\" in state:\n del state[\"_service\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, value_: Any, **kwargs: Any) -> Result:\n \"\"\"\n Writes the result value to a blob storage in Azure.\n\n Args:\n - value_ (Any): the value to write; will then be stored as the `value` attribute\n of the returned `Result` instance\n - **kwargs (optional): if provided, will be used to format the location template\n to determine the location to write to\n\n Returns:\n - Result: a new Result instance with the appropriately formatted location\n \"\"\"\n new = self.format(**kwargs)\n new.value = value_\n\n self.logger.debug(\"Starting to upload result to {}...\".format(new.location))\n\n # prepare data\n binary_data = new.serializer.serialize(new.value)\n\n # initialize client and upload\n client = self.service.get_blob_client(\n container=self.container, blob=new.location\n )\n client.upload_blob(binary_data)\n\n self.logger.debug(\"Finished uploading result to {}.\".format(new.location))\n\n return new\n\n def read(self, location: str) -> Result:\n \"\"\"\n Reads a result from an Azure Blob container and returns a corresponding `Result` instance.\n\n Args:\n - location (str): the Azure blob location to read from\n\n Returns:\n - Result: the read result\n \"\"\"\n new = self.copy()\n new.location = location\n\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(location))\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n content_string = client.download_blob()\n\n try:\n new.value = new.serializer.deserialize(content_string)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n raise exc\n return new\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n Checks whether the target result exists.\n\n Does not validate whether the result is `valid`, only that it is present.\n\n Args:\n - location (str): Location of the result in the specific result target.\n Will check whether the provided location exists\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: whether or not the target result exists.\n \"\"\"\n from azure.core.exceptions import ResourceNotFoundError\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location.format(**kwargs)\n )\n\n # Catch exception because Azure python bindings do not yet have an exists method\n # https://github.com/Azure/azure-sdk-for-python/issues/9507\n try:\n client.get_blob_properties()\n return True\n except ResourceNotFoundError:\n return False\n", "path": "src/prefect/engine/results/azure_result.py"}], "after_files": [{"content": "import os\nfrom typing import TYPE_CHECKING, Any\n\nfrom prefect.client import Secret\nfrom prefect.engine.result import Result\n\nif TYPE_CHECKING:\n import azure.storage.blob\n\n\nclass AzureResult(Result):\n \"\"\"\n Result for writing to and reading from an Azure Blob storage.\n\n Note that your flow's runtime environment must be able to authenticate with\n Azure; there are currently two supported options: provide a connection string\n either at initialization or at runtime through an environment variable, or\n set your Azure connection string as a Prefect Secret. Using an environment\n variable is the recommended approach.\n\n Args:\n - container (str): the name of the container to write to / read from\n - connection_string (str, optional): an Azure connection string for communicating with\n Blob storage. If not provided the value set in the environment as\n `AZURE_STORAGE_CONNECTION_STRING` will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(\n self,\n container: str,\n connection_string: str = None,\n connection_string_secret: str = None,\n **kwargs: Any\n ) -> None:\n self.container = container\n self.connection_string = connection_string or os.getenv(\n \"AZURE_STORAGE_CONNECTION_STRING\"\n )\n self.connection_string_secret = connection_string_secret\n super().__init__(**kwargs)\n\n def initialize_service(self) -> None:\n \"\"\"\n Initialize a Blob service.\n \"\"\"\n import azure.storage.blob\n\n connection_string = self.connection_string\n if not connection_string and self.connection_string_secret:\n connection_string = Secret(self.connection_string_secret).get()\n\n self._service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=connection_string\n )\n\n @property\n def service(self) -> \"azure.storage.blob.BlobServiceClient\":\n if not hasattr(self, \"_service\"):\n self.initialize_service()\n return self._service\n\n @service.setter\n def service(self, val: Any) -> None:\n self._service = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_service\" in state:\n del state[\"_service\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, value_: Any, **kwargs: Any) -> Result:\n \"\"\"\n Writes the result value to a blob storage in Azure.\n\n Args:\n - value_ (Any): the value to write; will then be stored as the `value` attribute\n of the returned `Result` instance\n - **kwargs (optional): if provided, will be used to format the location template\n to determine the location to write to\n\n Returns:\n - Result: a new Result instance with the appropriately formatted location\n \"\"\"\n new = self.format(**kwargs)\n new.value = value_\n\n self.logger.debug(\"Starting to upload result to {}...\".format(new.location))\n\n # prepare data\n binary_data = new.serializer.serialize(new.value)\n\n # initialize client and upload\n client = self.service.get_blob_client(\n container=self.container, blob=new.location\n )\n client.upload_blob(binary_data)\n\n self.logger.debug(\"Finished uploading result to {}.\".format(new.location))\n\n return new\n\n def read(self, location: str) -> Result:\n \"\"\"\n Reads a result from an Azure Blob container and returns a corresponding `Result` instance.\n\n Args:\n - location (str): the Azure blob location to read from\n\n Returns:\n - Result: the read result\n \"\"\"\n new = self.copy()\n new.location = location\n\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(location))\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n content_bytes = client.download_blob().content_as_bytes()\n\n try:\n new.value = new.serializer.deserialize(content_bytes)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n raise exc\n return new\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n Checks whether the target result exists.\n\n Does not validate whether the result is `valid`, only that it is present.\n\n Args:\n - location (str): Location of the result in the specific result target.\n Will check whether the provided location exists\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: whether or not the target result exists.\n \"\"\"\n from azure.core.exceptions import ResourceNotFoundError\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location.format(**kwargs)\n )\n\n # Catch exception because Azure python bindings do not yet have an exists method\n # https://github.com/Azure/azure-sdk-for-python/issues/9507\n try:\n client.get_blob_properties()\n return True\n except ResourceNotFoundError:\n return False\n", "path": "src/prefect/engine/results/azure_result.py"}]}
2,161
161
gh_patches_debug_57772
rasdani/github-patches
git_diff
celery__kombu-400
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Django Kombu app conflicts with Django app with Django 1.7 ``` python Traceback (most recent call last): File "manage.py", line 13, in <module> execute_from_command_line(sys.argv) File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 385, in execute_from_command_line utility.execute() File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute django.setup() File "/.../.env/lib/python2.7/site-packages/django/__init__.py", line 21, in setup apps.populate(settings.INSTALLED_APPS) File "/.../.env/lib/python2.7/site-packages/django/apps/registry.py", line 89, in populate "duplicates: %s" % app_config.label) django.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django ``` More info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kombu/transport/django/__init__.py` Content: ``` 1 """Kombu transport using the Django database as a message store.""" 2 from __future__ import absolute_import 3 4 from django.conf import settings 5 from django.core import exceptions as errors 6 7 from kombu.five import Empty 8 from kombu.transport import virtual 9 from kombu.utils.encoding import bytes_to_str 10 from kombu.utils.json import loads, dumps 11 12 13 from .models import Queue 14 15 try: 16 from django.apps import AppConfig 17 except ImportError: # pragma: no cover 18 pass 19 else: 20 class KombuAppConfig(AppConfig): 21 name = __name__ 22 label = name.replace('.', '_') 23 verbose_name = 'Message queue' 24 default_app_config = 'kombu.transport.django.KombuAppConfig' 25 26 VERSION = (1, 0, 0) 27 __version__ = '.'.join(map(str, VERSION)) 28 29 POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL', 30 getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0)) 31 32 33 class Channel(virtual.Channel): 34 35 def _new_queue(self, queue, **kwargs): 36 Queue.objects.get_or_create(name=queue) 37 38 def _put(self, queue, message, **kwargs): 39 Queue.objects.publish(queue, dumps(message)) 40 41 def basic_consume(self, queue, *args, **kwargs): 42 qinfo = self.state.bindings[queue] 43 exchange = qinfo[0] 44 if self.typeof(exchange).type == 'fanout': 45 return 46 super(Channel, self).basic_consume(queue, *args, **kwargs) 47 48 def _get(self, queue): 49 m = Queue.objects.fetch(queue) 50 if m: 51 return loads(bytes_to_str(m)) 52 raise Empty() 53 54 def _size(self, queue): 55 return Queue.objects.size(queue) 56 57 def _purge(self, queue): 58 return Queue.objects.purge(queue) 59 60 def refresh_connection(self): 61 from django import db 62 db.close_connection() 63 64 65 class Transport(virtual.Transport): 66 Channel = Channel 67 68 default_port = 0 69 polling_interval = POLLING_INTERVAL 70 channel_errors = ( 71 virtual.Transport.channel_errors + ( 72 errors.ObjectDoesNotExist, errors.MultipleObjectsReturned) 73 ) 74 driver_type = 'sql' 75 driver_name = 'django' 76 77 def driver_version(self): 78 import django 79 return '.'.join(map(str, django.VERSION)) 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py --- a/kombu/transport/django/__init__.py +++ b/kombu/transport/django/__init__.py @@ -18,7 +18,7 @@ pass else: class KombuAppConfig(AppConfig): - name = __name__ + name = 'kombu.transport.django' label = name.replace('.', '_') verbose_name = 'Message queue' default_app_config = 'kombu.transport.django.KombuAppConfig'
{"golden_diff": "diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py\n--- a/kombu/transport/django/__init__.py\n+++ b/kombu/transport/django/__init__.py\n@@ -18,7 +18,7 @@\n pass\n else:\n class KombuAppConfig(AppConfig):\n- name = __name__\n+ name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n", "issue": "Django Kombu app conflicts with Django app with Django 1.7\n``` python\nTraceback (most recent call last):\n File \"manage.py\", line 13, in <module>\n execute_from_command_line(sys.argv)\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 385, in execute_from_command_line\n utility.execute()\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 354, in execute\n django.setup()\n File \"/.../.env/lib/python2.7/site-packages/django/__init__.py\", line 21, in setup\n apps.populate(settings.INSTALLED_APPS)\n File \"/.../.env/lib/python2.7/site-packages/django/apps/registry.py\", line 89, in populate\n \"duplicates: %s\" % app_config.label)\ndjango.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django\n```\n\nMore info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)\n\n", "before_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = __name__\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}], "after_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}]}
1,180
136
gh_patches_debug_32907
rasdani/github-patches
git_diff
scikit-hep__pyhf-873
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Move metadata from setup.py to setup.cfg # Description @henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing. @henryiii Can you comment on how to deal with `project_urls`? https://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from pathlib import Path 3 4 this_directory = Path(__file__).parent.resolve() 5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst: 6 long_description = readme_rst.read() 7 8 extras_require = { 9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'], 10 'torch': ['torch~=1.2'], 11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 12 'xmlio': ['uproot'], 13 'minuit': ['iminuit'], 14 } 15 extras_require['backends'] = sorted( 16 set( 17 extras_require['tensorflow'] 18 + extras_require['torch'] 19 + extras_require['jax'] 20 + extras_require['minuit'] 21 ) 22 ) 23 extras_require['contrib'] = sorted(set(['matplotlib'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + [ 31 'pyflakes', 32 'pytest~=3.5', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=2.0', 41 'nteract-scrapbook~=0.2', 42 'check-manifest', 43 'jupyter', 44 'uproot~=3.3', 45 'graphviz', 46 'jsonpatch', 47 'black', 48 ] 49 ) 50 ) 51 extras_require['docs'] = sorted( 52 set( 53 [ 54 'sphinx', 55 'sphinxcontrib-bibtex', 56 'sphinx-click', 57 'sphinx_rtd_theme', 58 'nbsphinx', 59 'ipywidgets', 60 'sphinx-issues', 61 'sphinx-copybutton>0.2.9', 62 ] 63 ) 64 ) 65 extras_require['develop'] = sorted( 66 set( 67 extras_require['docs'] 68 + extras_require['test'] 69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine'] 70 ) 71 ) 72 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 73 74 75 setup( 76 name='pyhf', 77 version='0.4.1', 78 description='(partial) pure python histfactory implementation', 79 long_description=long_description, 80 long_description_content_type='text/x-rst', 81 url='https://github.com/scikit-hep/pyhf', 82 project_urls={ 83 "Documentation": "https://scikit-hep.org/pyhf/", 84 "Source": "https://github.com/scikit-hep/pyhf", 85 "Tracker": "https://github.com/scikit-hep/pyhf/issues", 86 }, 87 author='Lukas Heinrich, Matthew Feickert, Giordon Stark', 88 author_email='[email protected], [email protected], [email protected]', 89 license='Apache', 90 keywords='physics fitting numpy scipy tensorflow pytorch jax', 91 classifiers=[ 92 "Development Status :: 4 - Beta", 93 "License :: OSI Approved :: Apache Software License", 94 "Intended Audience :: Science/Research", 95 "Topic :: Scientific/Engineering", 96 "Topic :: Scientific/Engineering :: Physics", 97 "Programming Language :: Python :: 3", 98 "Programming Language :: Python :: 3.6", 99 "Programming Language :: Python :: 3.7", 100 "Programming Language :: Python :: 3.8", 101 ], 102 package_dir={'': 'src'}, 103 packages=find_packages(where='src'), 104 include_package_data=True, 105 python_requires=">=3.6", 106 install_requires=[ 107 'scipy', # requires numpy, which is required by pyhf and tensorflow 108 'click>=6.0', # for console scripts, 109 'tqdm', # for readxml 110 'jsonschema>=3.2.0', # for utils 111 'jsonpatch', 112 'pyyaml', # for parsing CLI equal-delimited options 113 ], 114 extras_require=extras_require, 115 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']}, 116 dependency_links=[], 117 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 118 ) 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,9 +1,4 @@ -from setuptools import setup, find_packages -from pathlib import Path - -this_directory = Path(__file__).parent.resolve() -with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst: - long_description = readme_rst.read() +from setuptools import setup extras_require = { 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'], @@ -73,46 +68,6 @@ setup( - name='pyhf', - version='0.4.1', - description='(partial) pure python histfactory implementation', - long_description=long_description, - long_description_content_type='text/x-rst', - url='https://github.com/scikit-hep/pyhf', - project_urls={ - "Documentation": "https://scikit-hep.org/pyhf/", - "Source": "https://github.com/scikit-hep/pyhf", - "Tracker": "https://github.com/scikit-hep/pyhf/issues", - }, - author='Lukas Heinrich, Matthew Feickert, Giordon Stark', - author_email='[email protected], [email protected], [email protected]', - license='Apache', - keywords='physics fitting numpy scipy tensorflow pytorch jax', - classifiers=[ - "Development Status :: 4 - Beta", - "License :: OSI Approved :: Apache Software License", - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Physics", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - ], - package_dir={'': 'src'}, - packages=find_packages(where='src'), - include_package_data=True, - python_requires=">=3.6", - install_requires=[ - 'scipy', # requires numpy, which is required by pyhf and tensorflow - 'click>=6.0', # for console scripts, - 'tqdm', # for readxml - 'jsonschema>=3.2.0', # for utils - 'jsonpatch', - 'pyyaml', # for parsing CLI equal-delimited options - ], extras_require=extras_require, - entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']}, - dependency_links=[], use_scm_version=lambda: {'local_scheme': lambda version: ''}, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,9 +1,4 @@\n-from setuptools import setup, find_packages\n-from pathlib import Path\n-\n-this_directory = Path(__file__).parent.resolve()\n-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n- long_description = readme_rst.read()\n+from setuptools import setup\n \n extras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n@@ -73,46 +68,6 @@\n \n \n setup(\n- name='pyhf',\n- version='0.4.1',\n- description='(partial) pure python histfactory implementation',\n- long_description=long_description,\n- long_description_content_type='text/x-rst',\n- url='https://github.com/scikit-hep/pyhf',\n- project_urls={\n- \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n- \"Source\": \"https://github.com/scikit-hep/pyhf\",\n- \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n- },\n- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n- author_email='[email protected], [email protected], [email protected]',\n- license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch jax',\n- classifiers=[\n- \"Development Status :: 4 - Beta\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Intended Audience :: Science/Research\",\n- \"Topic :: Scientific/Engineering\",\n- \"Topic :: Scientific/Engineering :: Physics\",\n- \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- ],\n- package_dir={'': 'src'},\n- packages=find_packages(where='src'),\n- include_package_data=True,\n- python_requires=\">=3.6\",\n- install_requires=[\n- 'scipy', # requires numpy, which is required by pyhf and tensorflow\n- 'click>=6.0', # for console scripts,\n- 'tqdm', # for readxml\n- 'jsonschema>=3.2.0', # for utils\n- 'jsonpatch',\n- 'pyyaml', # for parsing CLI equal-delimited options\n- ],\n extras_require=extras_require,\n- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n- dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n )\n", "issue": "Move metadata from setup.py to setup.cfg\n# Description\r\n\r\n@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.\r\n\r\n@henryiii Can you comment on how to deal with `project_urls`?\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n project_urls={\n \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n \"Source\": \"https://github.com/scikit-hep/pyhf\",\n \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,640
617
gh_patches_debug_63275
rasdani/github-patches
git_diff
Mailu__Mailu-2929
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fetchmail does not fetch mails in 2.0 <!-- Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests. For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net). To be able to help you best, we need some more information. Before you open your issue - Check if no issue or pull-request for this already exists. - Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page) - You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur. - The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title. Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check. --> ## Environment & Version ### Environment - [X] docker compose - [ ] kubernetes - [ ] docker swarm ### Version - Version: 2.0, Container-"Id": "sha256:dd0a30f2ecb74491c49c8ab1b1d8d710eb55cc1e81d411acea9171ebd40ff314", <!-- To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7). $> docker ps -a | grep mailu 140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoi…" 2 weeks ago Up 2 days (healthy) 80/tcp $> grep MAILU_VERSION docker-compose.yml mailu.env --> ## Description <!-- Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language. --> After updating from 1.9 to 2.0 fetchmails does not fetch mails from remote POP3s-Servers. The container-logs contain the following information with log-level WARNING. Log-level DEBUG does not print more useful information ``` mailu-20-fetchmail-1 | Option --folder is not supported with POP3 mailu-20-fetchmail-1 | Option --folder is not supported with POP3 mailu-20-fetchmail-1 | Option --folder is not supported with POP3 mailu-20-fetchmail-1 | Sleeping for 1000 seconds mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX) mailu-20-fetchmail-1 | mailu-20-fetchmail-1 | mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX) ``` I copied the mailu.db and fired up the new stack based on the newly created configs and docker-compose.yml-files. ## Replication Steps <!-- Steps for replicating your issue --> migrated DB from 1.9 copied to the new folder and starting the stack. ## Observed behaviour <!-- Explain or paste the result you received. --> Any chance to get more detailed information from within the container? ## Expected behaviour <!-- Explain what results you expected - be as specific as possible. Just saying "it doesn’t work as expected" is not useful. It's also helpful to describe what you actually experienced. --> Fetching the remote mails and make them available in mailus mail-storage ## Logs <!-- Often it is very useful to include log fragments of the involved component. You can get the logs via `docker logs <container name> --tail 1000`. For example for the admin container: `docker logs mailu_admin_1 --tail 1000` or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin` If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so: ``` Your logs here! ``` --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optional/fetchmail/fetchmail.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import time 4 import os 5 from pathlib import Path 6 from pwd import getpwnam 7 import tempfile 8 import shlex 9 import subprocess 10 import requests 11 from socrate import system 12 import sys 13 import traceback 14 15 16 FETCHMAIL = """ 17 fetchmail -N \ 18 --idfile /data/fetchids --uidl \ 19 --pidfile /dev/shm/fetchmail.pid \ 20 --sslcertck --sslcertpath /etc/ssl/certs \ 21 -f {} 22 """ 23 24 25 RC_LINE = """ 26 poll "{host}" proto {protocol} port {port} 27 user "{username}" password "{password}" 28 is "{user_email}" 29 smtphost "{smtphost}" 30 {folders} 31 {options} 32 {lmtp} 33 """ 34 35 36 def escape_rc_string(arg): 37 return "".join("\\x%2x" % ord(char) for char in arg) 38 39 40 def fetchmail(fetchmailrc): 41 with tempfile.NamedTemporaryFile() as handler: 42 handler.write(fetchmailrc.encode("utf8")) 43 handler.flush() 44 command = FETCHMAIL.format(shlex.quote(handler.name)) 45 output = subprocess.check_output(command, shell=True) 46 return output 47 48 49 def run(debug): 50 try: 51 fetches = requests.get(f"http://{os.environ['ADMIN_ADDRESS']}:8080/internal/fetch").json() 52 for fetch in fetches: 53 fetchmailrc = "" 54 options = "options antispam 501, 504, 550, 553, 554" 55 options += " ssl" if fetch["tls"] else "" 56 options += " keep" if fetch["keep"] else " fetchall" 57 folders = "folders %s" % ((','.join('"' + item + '"' for item in fetch['folders'])) if fetch['folders'] else '"INBOX"') 58 fetchmailrc += RC_LINE.format( 59 user_email=escape_rc_string(fetch["user_email"]), 60 protocol=fetch["protocol"], 61 host=escape_rc_string(fetch["host"]), 62 port=fetch["port"], 63 smtphost=f'{os.environ["FRONT_ADDRESS"]}' if fetch['scan'] else f'{os.environ["FRONT_ADDRESS"]}/2525', 64 username=escape_rc_string(fetch["username"]), 65 password=escape_rc_string(fetch["password"]), 66 options=options, 67 folders=folders, 68 lmtp='' if fetch['scan'] else 'lmtp', 69 ) 70 if debug: 71 print(fetchmailrc) 72 try: 73 print(fetchmail(fetchmailrc)) 74 error_message = "" 75 except subprocess.CalledProcessError as error: 76 error_message = error.output.decode("utf8") 77 # No mail is not an error 78 if not error_message.startswith("fetchmail: No mail"): 79 print(error_message) 80 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"]) 81 # Number of messages seen is not a error as well 82 if ("messages" in error_message and 83 "(seen " in error_message and 84 user_info in error_message): 85 print(error_message) 86 finally: 87 requests.post("http://{}:8080/internal/fetch/{}".format(os.environ['ADMIN_ADDRESS'],fetch['id']), 88 json=error_message.split('\n')[0] 89 ) 90 except Exception: 91 traceback.print_exc() 92 93 94 if __name__ == "__main__": 95 id_fetchmail = getpwnam('fetchmail') 96 Path('/data/fetchids').touch() 97 os.chown("/data/fetchids", id_fetchmail.pw_uid, id_fetchmail.pw_gid) 98 os.chown("/data/", id_fetchmail.pw_uid, id_fetchmail.pw_gid) 99 os.chmod("/data/fetchids", 0o700) 100 system.drop_privs_to('fetchmail') 101 config = system.set_env() 102 while True: 103 delay = int(os.environ.get('FETCHMAIL_DELAY', 60)) 104 print("Sleeping for {} seconds".format(delay)) 105 time.sleep(delay) 106 107 if not config.get('FETCHMAIL_ENABLED', True): 108 print("Fetchmail disabled, skipping...") 109 continue 110 111 run(config.get('DEBUG', False)) 112 sys.stdout.flush() 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py --- a/optional/fetchmail/fetchmail.py +++ b/optional/fetchmail/fetchmail.py @@ -64,7 +64,7 @@ username=escape_rc_string(fetch["username"]), password=escape_rc_string(fetch["password"]), options=options, - folders=folders, + folders='' if fetch['protocol'] == 'pop3' else folders, lmtp='' if fetch['scan'] else 'lmtp', ) if debug:
{"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -64,7 +64,7 @@\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options,\n- folders=folders,\n+ folders='' if fetch['protocol'] == 'pop3' else folders,\n lmtp='' if fetch['scan'] else 'lmtp',\n )\n if debug:\n", "issue": "fetchmail does not fetch mails in 2.0\n<!--\r\n\r\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\nBefore you open your issue\r\n- Check if no issue or pull-request for this already exists.\r\n- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- You understand `Mailu` is made by volunteers in their **free time** \u2014 be concise, civil and accept that delays can occur.\r\n- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\nPlease put your text outside of the comment blocks to be visible. You can use the button \"Preview\" above to check.\r\n\r\n-->\r\n\r\n## Environment & Version\r\n\r\n### Environment\r\n\r\n- [X] docker compose\r\n- [ ] kubernetes\r\n- [ ] docker swarm\r\n\r\n### Version\r\n\r\n- Version: 2.0, Container-\"Id\": \"sha256:dd0a30f2ecb74491c49c8ab1b1d8d710eb55cc1e81d411acea9171ebd40ff314\",\r\n\r\n<!--\r\nTo find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).\r\n\r\n$> docker ps -a | grep mailu\r\n140b09d4b09c mailu/roundcube:1.7 \"docker-php-entrypoi\u2026\" 2 weeks ago Up 2 days (healthy) 80/tcp\r\n$> grep MAILU_VERSION docker-compose.yml mailu.env\r\n-->\r\n\r\n## Description\r\n<!--\r\nFurther explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.\r\n-->\r\nAfter updating from 1.9 to 2.0 fetchmails does not fetch mails from remote POP3s-Servers. The container-logs contain the following information with log-level WARNING. Log-level DEBUG does not print more useful information\r\n\r\n```\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Sleeping for 1000 seconds\r\nmailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)\r\nmailu-20-fetchmail-1 |\r\nmailu-20-fetchmail-1 |\r\nmailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)\r\n```\r\nI copied the mailu.db and fired up the new stack based on the newly created configs and docker-compose.yml-files.\r\n\r\n## Replication Steps\r\n<!--\r\nSteps for replicating your issue\r\n-->\r\nmigrated DB from 1.9 copied to the new folder and starting the stack.\r\n\r\n## Observed behaviour\r\n<!--\r\nExplain or paste the result you received.\r\n-->\r\nAny chance to get more detailed information from within the container?\r\n\r\n## Expected behaviour\r\n<!--\r\nExplain what results you expected - be as specific as possible.\r\nJust saying \"it doesn\u2019t work as expected\" is not useful. It's also helpful to describe what you actually experienced.\r\n-->\r\nFetching the remote mails and make them available in mailus mail-storage\r\n\r\n## Logs\r\n<!--\r\nOften it is very useful to include log fragments of the involved component.\r\nYou can get the logs via `docker logs <container name> --tail 1000`.\r\nFor example for the admin container: `docker logs mailu_admin_1 --tail 1000`\r\nor using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`\r\n\r\nIf you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:\r\n\r\n```\r\nYour logs here!\r\n```\r\n-->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport time\nimport os\nfrom pathlib import Path\nfrom pwd import getpwnam\nimport tempfile\nimport shlex\nimport subprocess\nimport requests\nfrom socrate import system\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --idfile /data/fetchids --uidl \\\n --pidfile /dev/shm/fetchmail.pid \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {folders}\n {options}\n {lmtp}\n\"\"\"\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(f\"http://{os.environ['ADMIN_ADDRESS']}:8080/internal/fetch\").json()\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n folders = \"folders %s\" % ((','.join('\"' + item + '\"' for item in fetch['folders'])) if fetch['folders'] else '\"INBOX\"')\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=f'{os.environ[\"FRONT_ADDRESS\"]}' if fetch['scan'] else f'{os.environ[\"FRONT_ADDRESS\"]}/2525',\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options,\n folders=folders,\n lmtp='' if fetch['scan'] else 'lmtp',\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://{}:8080/internal/fetch/{}\".format(os.environ['ADMIN_ADDRESS'],fetch['id']),\n json=error_message.split('\\n')[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n id_fetchmail = getpwnam('fetchmail')\n Path('/data/fetchids').touch()\n os.chown(\"/data/fetchids\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chown(\"/data/\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chmod(\"/data/fetchids\", 0o700)\n system.drop_privs_to('fetchmail')\n config = system.set_env()\n while True:\n delay = int(os.environ.get('FETCHMAIL_DELAY', 60))\n print(\"Sleeping for {} seconds\".format(delay))\n time.sleep(delay)\n\n if not config.get('FETCHMAIL_ENABLED', True):\n print(\"Fetchmail disabled, skipping...\")\n continue\n\n run(config.get('DEBUG', False))\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport time\nimport os\nfrom pathlib import Path\nfrom pwd import getpwnam\nimport tempfile\nimport shlex\nimport subprocess\nimport requests\nfrom socrate import system\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --idfile /data/fetchids --uidl \\\n --pidfile /dev/shm/fetchmail.pid \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {folders}\n {options}\n {lmtp}\n\"\"\"\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(f\"http://{os.environ['ADMIN_ADDRESS']}:8080/internal/fetch\").json()\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n folders = \"folders %s\" % ((','.join('\"' + item + '\"' for item in fetch['folders'])) if fetch['folders'] else '\"INBOX\"')\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=f'{os.environ[\"FRONT_ADDRESS\"]}' if fetch['scan'] else f'{os.environ[\"FRONT_ADDRESS\"]}/2525',\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options,\n folders='' if fetch['protocol'] == 'pop3' else folders,\n lmtp='' if fetch['scan'] else 'lmtp',\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://{}:8080/internal/fetch/{}\".format(os.environ['ADMIN_ADDRESS'],fetch['id']),\n json=error_message.split('\\n')[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n id_fetchmail = getpwnam('fetchmail')\n Path('/data/fetchids').touch()\n os.chown(\"/data/fetchids\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chown(\"/data/\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chmod(\"/data/fetchids\", 0o700)\n system.drop_privs_to('fetchmail')\n config = system.set_env()\n while True:\n delay = int(os.environ.get('FETCHMAIL_DELAY', 60))\n print(\"Sleeping for {} seconds\".format(delay))\n time.sleep(delay)\n\n if not config.get('FETCHMAIL_ENABLED', True):\n print(\"Fetchmail disabled, skipping...\")\n continue\n\n run(config.get('DEBUG', False))\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]}
2,364
127
gh_patches_debug_27479
rasdani/github-patches
git_diff
elastic__apm-agent-python-1308
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode' **Describe the bug** In some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`. **Current workaround** Disable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS="true"`) prior `elasticapm.instrument()` is called. **To Reproduce** ```python3 import asyncio import aioredis import elasticapm REDIS_URL = "redis://localhost:6379" async def main(): elasticapm.instrument() client = await aioredis.create_redis_pool(REDIS_URL) elastic_apm_client = elasticapm.Client() elastic_apm_client.begin_transaction("redis-ping") try: assert await client.ping() == b"PONG" finally: client.close() await client.wait_closed() elastic_apm_client.end_transaction("redis-ping") if __name__ == "__main__": asyncio.run(main()) ``` **Expected result** No exception is raised. **Actual result** ```python3 Traceback (most recent call last): File "<...>/test.py", line 24, in <module> asyncio.run(main()) File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete return future.result() File "<...>/test.py", line 17, in main assert await client.ping() == b"PONG" File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 114, in ping return self.execute('PING', *args, encoding=encoding) File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 51, in execute return self._pool_or_conn.execute(command, *args, **kwargs) File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling return self.call(module, method, wrapped, instance, args, kwargs) File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py", line 46, in call wrapped_name = args[0].decode() AttributeError: 'str' object has no attribute 'decode' ``` **Environment** - OS: Ubuntu 18.04.5 LTS - Python version: 3.8.9 - Framework and version: `aioredis==1.3.1` - APM Server version: -- - Agent version: `elasticapm==6.3.3` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/asyncio/aioredis.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from __future__ import absolute_import 32 33 from elasticapm.contrib.asyncio.traces import async_capture_span 34 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule 35 from elasticapm.traces import execution_context 36 37 38 class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule): 39 name = "aioredis" 40 41 instrument_list = [("aioredis.pool", "ConnectionsPool.execute"), 42 ("aioredis.pool", "ConnectionsPool.execute_pubsub")] 43 44 def call(self, module, method, wrapped, instance, args, kwargs): 45 if len(args) > 0: 46 wrapped_name = args[0].decode() 47 else: 48 wrapped_name = self.get_wrapped_name(wrapped, instance, method) 49 50 with async_capture_span( 51 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True 52 ) as span: 53 span.context["destination"] = _get_destination_info(instance) 54 55 return wrapped(*args, **kwargs) 56 57 58 class RedisPipelineInstrumentation(AbstractInstrumentedModule): 59 name = "aioredis" 60 61 instrument_list = [("aioredis.commands.transaction", "Pipeline.execute")] 62 63 def call(self, module, method, wrapped, instance, args, kwargs): 64 wrapped_name = self.get_wrapped_name(wrapped, instance, method) 65 66 with async_capture_span( 67 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True 68 ) as span: 69 span.context["destination"] = _get_destination_info(instance) 70 71 return wrapped(*args, **kwargs) 72 73 74 class RedisConnectionInstrumentation(AbstractInstrumentedModule): 75 name = "aioredis" 76 77 instrument_list = (("aioredis.connection", "RedisConnection.execute"), 78 ("aioredis.pool", "ConnectionsPool.execute_pubsub")) 79 80 def call(self, module, method, wrapped, instance, args, kwargs): 81 span = execution_context.get_span() 82 if span and span.subtype == "aioredis": 83 span.context["destination"] = _get_destination_info(instance) 84 return wrapped(*args, **kwargs) 85 86 87 def _get_destination_info(connection): 88 destination_info = {"service": {"name": "aioredis", "resource": "redis", "type": "db"}} 89 90 if hasattr(connection, "_pool_or_conn"): 91 destination_info["port"] = connection._pool_or_conn.address[1] 92 destination_info["address"] = connection._pool_or_conn.address[0] 93 else: 94 destination_info["port"] = connection.address[1] 95 destination_info["address"] = connection.address[0] 96 97 return destination_info 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py --- a/elasticapm/instrumentation/packages/asyncio/aioredis.py +++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py @@ -38,12 +38,16 @@ class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule): name = "aioredis" - instrument_list = [("aioredis.pool", "ConnectionsPool.execute"), - ("aioredis.pool", "ConnectionsPool.execute_pubsub")] + instrument_list = [ + ("aioredis.pool", "ConnectionsPool.execute"), + ("aioredis.pool", "ConnectionsPool.execute_pubsub"), + ] def call(self, module, method, wrapped, instance, args, kwargs): if len(args) > 0: - wrapped_name = args[0].decode() + wrapped_name = args[0] + if isinstance(wrapped_name, bytes): + wrapped_name = wrapped_name.decode() else: wrapped_name = self.get_wrapped_name(wrapped, instance, method) @@ -74,8 +78,10 @@ class RedisConnectionInstrumentation(AbstractInstrumentedModule): name = "aioredis" - instrument_list = (("aioredis.connection", "RedisConnection.execute"), - ("aioredis.pool", "ConnectionsPool.execute_pubsub")) + instrument_list = ( + ("aioredis.connection", "RedisConnection.execute"), + ("aioredis.pool", "ConnectionsPool.execute_pubsub"), + ) def call(self, module, method, wrapped, instance, args, kwargs): span = execution_context.get_span()
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n--- a/elasticapm/instrumentation/packages/asyncio/aioredis.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n@@ -38,12 +38,16 @@\n class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n+ instrument_list = [\n+ (\"aioredis.pool\", \"ConnectionsPool.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ ]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n- wrapped_name = args[0].decode()\n+ wrapped_name = args[0]\n+ if isinstance(wrapped_name, bytes):\n+ wrapped_name = wrapped_name.decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n \n@@ -74,8 +78,10 @@\n class RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n+ instrument_list = (\n+ (\"aioredis.connection\", \"RedisConnection.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ )\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n", "issue": "aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode'\n**Describe the bug**\r\nIn some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`. \r\n\r\n**Current workaround**\r\nDisable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS=\"true\"`) prior `elasticapm.instrument()` is called.\r\n\r\n**To Reproduce**\r\n```python3\r\nimport asyncio\r\n\r\nimport aioredis\r\nimport elasticapm\r\n\r\nREDIS_URL = \"redis://localhost:6379\"\r\n\r\n\r\nasync def main():\r\n elasticapm.instrument()\r\n\r\n client = await aioredis.create_redis_pool(REDIS_URL)\r\n elastic_apm_client = elasticapm.Client()\r\n\r\n elastic_apm_client.begin_transaction(\"redis-ping\")\r\n try:\r\n assert await client.ping() == b\"PONG\"\r\n finally:\r\n client.close()\r\n await client.wait_closed()\r\n elastic_apm_client.end_transaction(\"redis-ping\")\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.run(main())\r\n```\r\n\r\n**Expected result**\r\nNo exception is raised.\r\n\r\n**Actual result**\r\n```python3\r\nTraceback (most recent call last):\r\n File \"<...>/test.py\", line 24, in <module>\r\n asyncio.run(main())\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n return future.result()\r\n File \"<...>/test.py\", line 17, in main\r\n assert await client.ping() == b\"PONG\"\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 114, in ping\r\n return self.execute('PING', *args, encoding=encoding)\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 51, in execute\r\n return self._pool_or_conn.execute(command, *args, **kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py\", line 46, in call\r\n wrapped_name = args[0].decode()\r\nAttributeError: 'str' object has no attribute 'decode'\r\n```\r\n\r\n**Environment**\r\n- OS: Ubuntu 18.04.5 LTS\r\n- Python version: 3.8.9\r\n- Framework and version: `aioredis==1.3.1`\r\n- APM Server version: --\r\n- Agent version: `elasticapm==6.3.3`\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\n\n\nclass RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = args[0].decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.commands.transaction\", \"Pipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"aioredis\":\n span.context[\"destination\"] = _get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef _get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"aioredis\", \"resource\": \"redis\", \"type\": \"db\"}}\n\n if hasattr(connection, \"_pool_or_conn\"):\n destination_info[\"port\"] = connection._pool_or_conn.address[1]\n destination_info[\"address\"] = connection._pool_or_conn.address[0]\n else:\n destination_info[\"port\"] = connection.address[1]\n destination_info[\"address\"] = connection.address[0]\n\n return destination_info\n", "path": "elasticapm/instrumentation/packages/asyncio/aioredis.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\n\n\nclass RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [\n (\"aioredis.pool\", \"ConnectionsPool.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = args[0]\n if isinstance(wrapped_name, bytes):\n wrapped_name = wrapped_name.decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.commands.transaction\", \"Pipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = (\n (\"aioredis.connection\", \"RedisConnection.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n )\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"aioredis\":\n span.context[\"destination\"] = _get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef _get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"aioredis\", \"resource\": \"redis\", \"type\": \"db\"}}\n\n if hasattr(connection, \"_pool_or_conn\"):\n destination_info[\"port\"] = connection._pool_or_conn.address[1]\n destination_info[\"address\"] = connection._pool_or_conn.address[0]\n else:\n destination_info[\"port\"] = connection.address[1]\n destination_info[\"address\"] = connection.address[0]\n\n return destination_info\n", "path": "elasticapm/instrumentation/packages/asyncio/aioredis.py"}]}
2,030
390
gh_patches_debug_25118
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-375
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User profile failing to load **Describe the bug** Attempting to access https://bookwyrm.social/user/jonathan.brodsky causes a ``` Server Error Something went wrong! Sorry about that. ``` **To Reproduce** Go to https://bookwyrm.social/user/jonathan.brodsky It appears to apply to all urls within user/jonathan.brodsky - see https://bookwyrm.social/user/jonathan.brodsky/generatednote/1994 **Expected behavior** A user profile page loads. **Desktop (please complete the following information):** - OS: windows - Browser Chrome - Version 87.0.4280.66 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/urls.py` Content: ``` 1 ''' url routing for the app and api ''' 2 from django.conf.urls.static import static 3 from django.contrib import admin 4 from django.urls import path, re_path 5 6 from bookwyrm import incoming, outgoing, views, settings, wellknown 7 from bookwyrm import view_actions as actions 8 9 username_regex = r'(?P<username>[\w\-_]+@[\w\-\_\.]+)' 10 localname_regex = r'(?P<username>[\w\-_]+)' 11 user_path = r'^user/%s' % username_regex 12 local_user_path = r'^user/%s' % localname_regex 13 14 status_types = [ 15 'status', 16 'review', 17 'comment', 18 'quotation', 19 'boost', 20 'generatednote' 21 ] 22 status_path = r'%s/(%s)/(?P<status_id>\d+)' % \ 23 (local_user_path, '|'.join(status_types)) 24 25 book_path = r'^book/(?P<book_id>\d+)' 26 27 handler404 = 'bookwyrm.views.not_found_page' 28 handler500 = 'bookwyrm.views.server_error_page' 29 urlpatterns = [ 30 path('admin/', admin.site.urls), 31 32 # federation endpoints 33 re_path(r'^inbox/?$', incoming.shared_inbox), 34 re_path(r'%s/inbox/?$' % local_user_path, incoming.inbox), 35 re_path(r'%s/outbox/?$' % local_user_path, outgoing.outbox), 36 37 # .well-known endpoints 38 re_path(r'^.well-known/webfinger/?$', wellknown.webfinger), 39 re_path(r'^.well-known/nodeinfo/?$', wellknown.nodeinfo_pointer), 40 re_path(r'^nodeinfo/2\.0/?$', wellknown.nodeinfo), 41 re_path(r'^api/v1/instance/?$', wellknown.instance_info), 42 re_path(r'^api/v1/instance/peers/?$', wellknown.peers), 43 # TODO: re_path(r'^.well-known/host-meta/?$', incoming.host_meta), 44 # TODO: robots.txt 45 46 # ui views 47 re_path(r'^login/?$', views.login_page), 48 re_path(r'^about/?$', views.about_page), 49 re_path(r'^password-reset/?$', views.password_reset_request), 50 re_path(r'^password-reset/(?P<code>[A-Za-z0-9]+)/?$', views.password_reset), 51 re_path(r'^invite/?$', views.manage_invites), 52 re_path(r'^invite/(?P<code>[A-Za-z0-9]+)/?$', views.invite_page), 53 54 path('', views.home), 55 re_path(r'^(?P<tab>home|local|federated)/?$', views.home_tab), 56 re_path(r'^notifications/?', views.notifications_page), 57 re_path(r'^import/?$', views.import_page), 58 re_path(r'^import-status/(\d+)/?$', views.import_status), 59 re_path(r'^user-edit/?$', views.edit_profile_page), 60 61 # should return a ui view or activitypub json blob as requested 62 # users 63 re_path(r'%s/?$' % user_path, views.user_page), 64 re_path(r'%s/?$' % local_user_path, views.user_page), 65 re_path(r'%s\.json$' % local_user_path, views.user_page), 66 re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page), 67 re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page), 68 re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page), 69 70 # statuses 71 re_path(r'%s(.json)?/?$' % status_path, views.status_page), 72 re_path(r'%s/activity/?$' % status_path, views.status_page), 73 re_path(r'%s/replies(.json)?/?$' % status_path, views.replies_page), 74 75 # books 76 re_path(r'%s(.json)?/?$' % book_path, views.book_page), 77 re_path(r'%s/edit/?$' % book_path, views.edit_book_page), 78 re_path(r'%s/editions(.json)?/?$' % book_path, views.editions_page), 79 80 re_path(r'^author/(?P<author_id>[\w\-]+)(.json)?/?$', views.author_page), 81 re_path(r'^tag/(?P<tag_id>.+)\.json/?$', views.tag_page), 82 re_path(r'^tag/(?P<tag_id>.+)/?$', views.tag_page), 83 re_path(r'^%s/shelf/(?P<shelf_identifier>[\w-]+)(.json)?/?$' % \ 84 user_path, views.shelf_page), 85 re_path(r'^%s/shelf/(?P<shelf_identifier>[\w-]+)(.json)?/?$' % \ 86 local_user_path, views.shelf_page), 87 88 re_path(r'^search/?$', views.search), 89 90 # internal action endpoints 91 re_path(r'^logout/?$', actions.user_logout), 92 re_path(r'^user-login/?$', actions.user_login), 93 re_path(r'^user-register/?$', actions.register), 94 re_path(r'^reset-password-request/?$', actions.password_reset_request), 95 re_path(r'^reset-password/?$', actions.password_reset), 96 re_path(r'^change-password/?$', actions.password_change), 97 98 re_path(r'^edit-profile/?$', actions.edit_profile), 99 100 re_path(r'^import-data/?', actions.import_data), 101 re_path(r'^retry-import/?', actions.retry_import), 102 re_path(r'^resolve-book/?', actions.resolve_book), 103 re_path(r'^edit-book/(?P<book_id>\d+)/?', actions.edit_book), 104 re_path(r'^upload-cover/(?P<book_id>\d+)/?', actions.upload_cover), 105 re_path(r'^add-description/(?P<book_id>\d+)/?', actions.add_description), 106 107 re_path(r'^edit-readthrough/?', actions.edit_readthrough), 108 re_path(r'^delete-readthrough/?', actions.delete_readthrough), 109 110 re_path(r'^rate/?$', actions.rate), 111 re_path(r'^review/?$', actions.review), 112 re_path(r'^quote/?$', actions.quotate), 113 re_path(r'^comment/?$', actions.comment), 114 re_path(r'^tag/?$', actions.tag), 115 re_path(r'^untag/?$', actions.untag), 116 re_path(r'^reply/?$', actions.reply), 117 118 re_path(r'^favorite/(?P<status_id>\d+)/?$', actions.favorite), 119 re_path(r'^unfavorite/(?P<status_id>\d+)/?$', actions.unfavorite), 120 re_path(r'^boost/(?P<status_id>\d+)/?$', actions.boost), 121 re_path(r'^unboost/(?P<status_id>\d+)/?$', actions.unboost), 122 123 re_path(r'^delete-status/(?P<status_id>\d+)/?$', actions.delete_status), 124 125 re_path(r'^create-shelf/?$', actions.create_shelf), 126 re_path(r'^edit-shelf/(?P<shelf_id>\d+)?$', actions.edit_shelf), 127 re_path(r'^delete-shelf/(?P<shelf_id>\d+)?$', actions.delete_shelf), 128 re_path(r'^shelve/?$', actions.shelve), 129 re_path(r'^unshelve/?$', actions.unshelve), 130 re_path(r'^start-reading/(?P<book_id>\d+)/?$', actions.start_reading), 131 re_path(r'^finish-reading/(?P<book_id>\d+)/?$', actions.finish_reading), 132 133 re_path(r'^follow/?$', actions.follow), 134 re_path(r'^unfollow/?$', actions.unfollow), 135 re_path(r'^accept-follow-request/?$', actions.accept_follow_request), 136 re_path(r'^delete-follow-request/?$', actions.delete_follow_request), 137 138 re_path(r'^clear-notifications/?$', actions.clear_notifications), 139 140 re_path(r'^create-invite/?$', actions.create_invite), 141 142 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/urls.py b/bookwyrm/urls.py --- a/bookwyrm/urls.py +++ b/bookwyrm/urls.py @@ -6,8 +6,8 @@ from bookwyrm import incoming, outgoing, views, settings, wellknown from bookwyrm import view_actions as actions -username_regex = r'(?P<username>[\w\-_]+@[\w\-\_\.]+)' -localname_regex = r'(?P<username>[\w\-_]+)' +username_regex = r'(?P<username>[\w\-_\.]+@[\w\-\_\.]+)' +localname_regex = r'(?P<username>[\w\-_\.]+)' user_path = r'^user/%s' % username_regex local_user_path = r'^user/%s' % localname_regex @@ -61,8 +61,8 @@ # should return a ui view or activitypub json blob as requested # users re_path(r'%s/?$' % user_path, views.user_page), - re_path(r'%s/?$' % local_user_path, views.user_page), re_path(r'%s\.json$' % local_user_path, views.user_page), + re_path(r'%s/?$' % local_user_path, views.user_page), re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page), re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page), re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),
{"golden_diff": "diff --git a/bookwyrm/urls.py b/bookwyrm/urls.py\n--- a/bookwyrm/urls.py\n+++ b/bookwyrm/urls.py\n@@ -6,8 +6,8 @@\n from bookwyrm import incoming, outgoing, views, settings, wellknown\n from bookwyrm import view_actions as actions\n \n-username_regex = r'(?P<username>[\\w\\-_]+@[\\w\\-\\_\\.]+)'\n-localname_regex = r'(?P<username>[\\w\\-_]+)'\n+username_regex = r'(?P<username>[\\w\\-_\\.]+@[\\w\\-\\_\\.]+)'\n+localname_regex = r'(?P<username>[\\w\\-_\\.]+)'\n user_path = r'^user/%s' % username_regex\n local_user_path = r'^user/%s' % localname_regex\n \n@@ -61,8 +61,8 @@\n # should return a ui view or activitypub json blob as requested\n # users\n re_path(r'%s/?$' % user_path, views.user_page),\n- re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s\\.json$' % local_user_path, views.user_page),\n+ re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),\n re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),\n re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),\n", "issue": "User profile failing to load\n**Describe the bug**\r\nAttempting to access https://bookwyrm.social/user/jonathan.brodsky causes a\r\n```\r\nServer Error\r\nSomething went wrong! Sorry about that.\r\n```\r\n\r\n**To Reproduce**\r\nGo to https://bookwyrm.social/user/jonathan.brodsky\r\nIt appears to apply to all urls within user/jonathan.brodsky - see https://bookwyrm.social/user/jonathan.brodsky/generatednote/1994\r\n\r\n**Expected behavior**\r\nA user profile page loads.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows\r\n - Browser Chrome\r\n - Version 87.0.4280.66\n", "before_files": [{"content": "''' url routing for the app and api '''\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, re_path\n\nfrom bookwyrm import incoming, outgoing, views, settings, wellknown\nfrom bookwyrm import view_actions as actions\n\nusername_regex = r'(?P<username>[\\w\\-_]+@[\\w\\-\\_\\.]+)'\nlocalname_regex = r'(?P<username>[\\w\\-_]+)'\nuser_path = r'^user/%s' % username_regex\nlocal_user_path = r'^user/%s' % localname_regex\n\nstatus_types = [\n 'status',\n 'review',\n 'comment',\n 'quotation',\n 'boost',\n 'generatednote'\n]\nstatus_path = r'%s/(%s)/(?P<status_id>\\d+)' % \\\n (local_user_path, '|'.join(status_types))\n\nbook_path = r'^book/(?P<book_id>\\d+)'\n\nhandler404 = 'bookwyrm.views.not_found_page'\nhandler500 = 'bookwyrm.views.server_error_page'\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # federation endpoints\n re_path(r'^inbox/?$', incoming.shared_inbox),\n re_path(r'%s/inbox/?$' % local_user_path, incoming.inbox),\n re_path(r'%s/outbox/?$' % local_user_path, outgoing.outbox),\n\n # .well-known endpoints\n re_path(r'^.well-known/webfinger/?$', wellknown.webfinger),\n re_path(r'^.well-known/nodeinfo/?$', wellknown.nodeinfo_pointer),\n re_path(r'^nodeinfo/2\\.0/?$', wellknown.nodeinfo),\n re_path(r'^api/v1/instance/?$', wellknown.instance_info),\n re_path(r'^api/v1/instance/peers/?$', wellknown.peers),\n # TODO: re_path(r'^.well-known/host-meta/?$', incoming.host_meta),\n # TODO: robots.txt\n\n # ui views\n re_path(r'^login/?$', views.login_page),\n re_path(r'^about/?$', views.about_page),\n re_path(r'^password-reset/?$', views.password_reset_request),\n re_path(r'^password-reset/(?P<code>[A-Za-z0-9]+)/?$', views.password_reset),\n re_path(r'^invite/?$', views.manage_invites),\n re_path(r'^invite/(?P<code>[A-Za-z0-9]+)/?$', views.invite_page),\n\n path('', views.home),\n re_path(r'^(?P<tab>home|local|federated)/?$', views.home_tab),\n re_path(r'^notifications/?', views.notifications_page),\n re_path(r'^import/?$', views.import_page),\n re_path(r'^import-status/(\\d+)/?$', views.import_status),\n re_path(r'^user-edit/?$', views.edit_profile_page),\n\n # should return a ui view or activitypub json blob as requested\n # users\n re_path(r'%s/?$' % user_path, views.user_page),\n re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s\\.json$' % local_user_path, views.user_page),\n re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),\n re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),\n re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),\n\n # statuses\n re_path(r'%s(.json)?/?$' % status_path, views.status_page),\n re_path(r'%s/activity/?$' % status_path, views.status_page),\n re_path(r'%s/replies(.json)?/?$' % status_path, views.replies_page),\n\n # books\n re_path(r'%s(.json)?/?$' % book_path, views.book_page),\n re_path(r'%s/edit/?$' % book_path, views.edit_book_page),\n re_path(r'%s/editions(.json)?/?$' % book_path, views.editions_page),\n\n re_path(r'^author/(?P<author_id>[\\w\\-]+)(.json)?/?$', views.author_page),\n re_path(r'^tag/(?P<tag_id>.+)\\.json/?$', views.tag_page),\n re_path(r'^tag/(?P<tag_id>.+)/?$', views.tag_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n user_path, views.shelf_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n local_user_path, views.shelf_page),\n\n re_path(r'^search/?$', views.search),\n\n # internal action endpoints\n re_path(r'^logout/?$', actions.user_logout),\n re_path(r'^user-login/?$', actions.user_login),\n re_path(r'^user-register/?$', actions.register),\n re_path(r'^reset-password-request/?$', actions.password_reset_request),\n re_path(r'^reset-password/?$', actions.password_reset),\n re_path(r'^change-password/?$', actions.password_change),\n\n re_path(r'^edit-profile/?$', actions.edit_profile),\n\n re_path(r'^import-data/?', actions.import_data),\n re_path(r'^retry-import/?', actions.retry_import),\n re_path(r'^resolve-book/?', actions.resolve_book),\n re_path(r'^edit-book/(?P<book_id>\\d+)/?', actions.edit_book),\n re_path(r'^upload-cover/(?P<book_id>\\d+)/?', actions.upload_cover),\n re_path(r'^add-description/(?P<book_id>\\d+)/?', actions.add_description),\n\n re_path(r'^edit-readthrough/?', actions.edit_readthrough),\n re_path(r'^delete-readthrough/?', actions.delete_readthrough),\n\n re_path(r'^rate/?$', actions.rate),\n re_path(r'^review/?$', actions.review),\n re_path(r'^quote/?$', actions.quotate),\n re_path(r'^comment/?$', actions.comment),\n re_path(r'^tag/?$', actions.tag),\n re_path(r'^untag/?$', actions.untag),\n re_path(r'^reply/?$', actions.reply),\n\n re_path(r'^favorite/(?P<status_id>\\d+)/?$', actions.favorite),\n re_path(r'^unfavorite/(?P<status_id>\\d+)/?$', actions.unfavorite),\n re_path(r'^boost/(?P<status_id>\\d+)/?$', actions.boost),\n re_path(r'^unboost/(?P<status_id>\\d+)/?$', actions.unboost),\n\n re_path(r'^delete-status/(?P<status_id>\\d+)/?$', actions.delete_status),\n\n re_path(r'^create-shelf/?$', actions.create_shelf),\n re_path(r'^edit-shelf/(?P<shelf_id>\\d+)?$', actions.edit_shelf),\n re_path(r'^delete-shelf/(?P<shelf_id>\\d+)?$', actions.delete_shelf),\n re_path(r'^shelve/?$', actions.shelve),\n re_path(r'^unshelve/?$', actions.unshelve),\n re_path(r'^start-reading/(?P<book_id>\\d+)/?$', actions.start_reading),\n re_path(r'^finish-reading/(?P<book_id>\\d+)/?$', actions.finish_reading),\n\n re_path(r'^follow/?$', actions.follow),\n re_path(r'^unfollow/?$', actions.unfollow),\n re_path(r'^accept-follow-request/?$', actions.accept_follow_request),\n re_path(r'^delete-follow-request/?$', actions.delete_follow_request),\n\n re_path(r'^clear-notifications/?$', actions.clear_notifications),\n\n re_path(r'^create-invite/?$', actions.create_invite),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "bookwyrm/urls.py"}], "after_files": [{"content": "''' url routing for the app and api '''\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, re_path\n\nfrom bookwyrm import incoming, outgoing, views, settings, wellknown\nfrom bookwyrm import view_actions as actions\n\nusername_regex = r'(?P<username>[\\w\\-_\\.]+@[\\w\\-\\_\\.]+)'\nlocalname_regex = r'(?P<username>[\\w\\-_\\.]+)'\nuser_path = r'^user/%s' % username_regex\nlocal_user_path = r'^user/%s' % localname_regex\n\nstatus_types = [\n 'status',\n 'review',\n 'comment',\n 'quotation',\n 'boost',\n 'generatednote'\n]\nstatus_path = r'%s/(%s)/(?P<status_id>\\d+)' % \\\n (local_user_path, '|'.join(status_types))\n\nbook_path = r'^book/(?P<book_id>\\d+)'\n\nhandler404 = 'bookwyrm.views.not_found_page'\nhandler500 = 'bookwyrm.views.server_error_page'\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # federation endpoints\n re_path(r'^inbox/?$', incoming.shared_inbox),\n re_path(r'%s/inbox/?$' % local_user_path, incoming.inbox),\n re_path(r'%s/outbox/?$' % local_user_path, outgoing.outbox),\n\n # .well-known endpoints\n re_path(r'^.well-known/webfinger/?$', wellknown.webfinger),\n re_path(r'^.well-known/nodeinfo/?$', wellknown.nodeinfo_pointer),\n re_path(r'^nodeinfo/2\\.0/?$', wellknown.nodeinfo),\n re_path(r'^api/v1/instance/?$', wellknown.instance_info),\n re_path(r'^api/v1/instance/peers/?$', wellknown.peers),\n # TODO: re_path(r'^.well-known/host-meta/?$', incoming.host_meta),\n # TODO: robots.txt\n\n # ui views\n re_path(r'^login/?$', views.login_page),\n re_path(r'^about/?$', views.about_page),\n re_path(r'^password-reset/?$', views.password_reset_request),\n re_path(r'^password-reset/(?P<code>[A-Za-z0-9]+)/?$', views.password_reset),\n re_path(r'^invite/?$', views.manage_invites),\n re_path(r'^invite/(?P<code>[A-Za-z0-9]+)/?$', views.invite_page),\n\n path('', views.home),\n re_path(r'^(?P<tab>home|local|federated)/?$', views.home_tab),\n re_path(r'^notifications/?', views.notifications_page),\n re_path(r'^import/?$', views.import_page),\n re_path(r'^import-status/(\\d+)/?$', views.import_status),\n re_path(r'^user-edit/?$', views.edit_profile_page),\n\n # should return a ui view or activitypub json blob as requested\n # users\n re_path(r'%s/?$' % user_path, views.user_page),\n re_path(r'%s\\.json$' % local_user_path, views.user_page),\n re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),\n re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),\n re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),\n\n # statuses\n re_path(r'%s(.json)?/?$' % status_path, views.status_page),\n re_path(r'%s/activity/?$' % status_path, views.status_page),\n re_path(r'%s/replies(.json)?/?$' % status_path, views.replies_page),\n\n # books\n re_path(r'%s(.json)?/?$' % book_path, views.book_page),\n re_path(r'%s/edit/?$' % book_path, views.edit_book_page),\n re_path(r'%s/editions(.json)?/?$' % book_path, views.editions_page),\n\n re_path(r'^author/(?P<author_id>[\\w\\-]+)(.json)?/?$', views.author_page),\n re_path(r'^tag/(?P<tag_id>.+)\\.json/?$', views.tag_page),\n re_path(r'^tag/(?P<tag_id>.+)/?$', views.tag_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n user_path, views.shelf_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n local_user_path, views.shelf_page),\n\n re_path(r'^search/?$', views.search),\n\n # internal action endpoints\n re_path(r'^logout/?$', actions.user_logout),\n re_path(r'^user-login/?$', actions.user_login),\n re_path(r'^user-register/?$', actions.register),\n re_path(r'^reset-password-request/?$', actions.password_reset_request),\n re_path(r'^reset-password/?$', actions.password_reset),\n re_path(r'^change-password/?$', actions.password_change),\n\n re_path(r'^edit-profile/?$', actions.edit_profile),\n\n re_path(r'^import-data/?', actions.import_data),\n re_path(r'^retry-import/?', actions.retry_import),\n re_path(r'^resolve-book/?', actions.resolve_book),\n re_path(r'^edit-book/(?P<book_id>\\d+)/?', actions.edit_book),\n re_path(r'^upload-cover/(?P<book_id>\\d+)/?', actions.upload_cover),\n re_path(r'^add-description/(?P<book_id>\\d+)/?', actions.add_description),\n\n re_path(r'^edit-readthrough/?', actions.edit_readthrough),\n re_path(r'^delete-readthrough/?', actions.delete_readthrough),\n\n re_path(r'^rate/?$', actions.rate),\n re_path(r'^review/?$', actions.review),\n re_path(r'^quote/?$', actions.quotate),\n re_path(r'^comment/?$', actions.comment),\n re_path(r'^tag/?$', actions.tag),\n re_path(r'^untag/?$', actions.untag),\n re_path(r'^reply/?$', actions.reply),\n\n re_path(r'^favorite/(?P<status_id>\\d+)/?$', actions.favorite),\n re_path(r'^unfavorite/(?P<status_id>\\d+)/?$', actions.unfavorite),\n re_path(r'^boost/(?P<status_id>\\d+)/?$', actions.boost),\n re_path(r'^unboost/(?P<status_id>\\d+)/?$', actions.unboost),\n\n re_path(r'^delete-status/(?P<status_id>\\d+)/?$', actions.delete_status),\n\n re_path(r'^create-shelf/?$', actions.create_shelf),\n re_path(r'^edit-shelf/(?P<shelf_id>\\d+)?$', actions.edit_shelf),\n re_path(r'^delete-shelf/(?P<shelf_id>\\d+)?$', actions.delete_shelf),\n re_path(r'^shelve/?$', actions.shelve),\n re_path(r'^unshelve/?$', actions.unshelve),\n re_path(r'^start-reading/(?P<book_id>\\d+)/?$', actions.start_reading),\n re_path(r'^finish-reading/(?P<book_id>\\d+)/?$', actions.finish_reading),\n\n re_path(r'^follow/?$', actions.follow),\n re_path(r'^unfollow/?$', actions.unfollow),\n re_path(r'^accept-follow-request/?$', actions.accept_follow_request),\n re_path(r'^delete-follow-request/?$', actions.delete_follow_request),\n\n re_path(r'^clear-notifications/?$', actions.clear_notifications),\n\n re_path(r'^create-invite/?$', actions.create_invite),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "bookwyrm/urls.py"}]}
2,439
364
gh_patches_debug_31884
rasdani/github-patches
git_diff
apluslms__a-plus-560
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- A+ front page The A+ front page should display the name of the course instance in addition to the course name and code; Multiple instances of the same course are hard to separate in the A+ front page because the only different parts are the open/close dates and the image. Course instances have names, so that should be displayed in the front page too. Jaakko has some ideas; contact him! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `course/views.py` Content: ``` 1 import datetime 2 3 import icalendar 4 from django.conf import settings 5 from django.contrib import messages 6 from django.core.exceptions import PermissionDenied 7 from django.http import Http404 8 from django.http.response import HttpResponse 9 from django.shortcuts import get_object_or_404, redirect 10 from django.utils import html 11 from django.utils import timezone 12 from django.utils.translation import ugettext_lazy as _ 13 14 from authorization.permissions import ACCESS 15 from exercise.cache.hierarchy import NoSuchContent 16 from exercise.models import LearningObject 17 from lib.helpers import settings_text 18 from lib.viewbase import BaseTemplateView, BaseRedirectMixin, BaseFormView, BaseView 19 from userprofile.viewbase import UserProfileView 20 from .forms import GroupsForm, GroupSelectForm 21 from .models import CourseInstance, Enrollment 22 from .permissions import EnrollInfoVisiblePermission 23 from .renders import group_info_context 24 from .viewbase import CourseModuleBaseView, CourseInstanceMixin, EnrollableViewMixin 25 26 27 class HomeView(UserProfileView): 28 access_mode = ACCESS.ANONYMOUS 29 template_name = "course/index.html" 30 31 def get_common_objects(self): 32 super().get_common_objects() 33 self.welcome_text = settings_text('WELCOME_TEXT') 34 self.internal_user_label = settings_text('INTERNAL_USER_LABEL') 35 self.external_user_label = settings_text('EXTERNAL_USER_LABEL') 36 self.instances = [] 37 prio2 = [] 38 treshold = timezone.now() - datetime.timedelta(days=10) 39 for instance in CourseInstance.objects.get_visible(self.request.user)\ 40 .filter(ending_time__gte=timezone.now()): 41 if instance.starting_time > treshold: 42 self.instances += [instance] 43 else: 44 prio2 += [instance] 45 self.instances += prio2 46 self.note("welcome_text", "internal_user_label", "external_user_label", "instances") 47 48 49 class ArchiveView(UserProfileView): 50 access_mode = ACCESS.ANONYMOUS 51 template_name = "course/archive.html" 52 53 def get_common_objects(self): 54 super().get_common_objects() 55 self.instances = CourseInstance.objects.get_visible(self.request.user) 56 self.note("instances") 57 58 class InstanceView(EnrollableViewMixin, BaseTemplateView): 59 access_mode = ACCESS.STUDENT 60 # ACCESS.STUDENT requires users to log in, but the access mode is dropped 61 # in public courses. CourseVisiblePermission has more restrictions as well. 62 template_name = "course/course.html" 63 64 def handle_no_permission(self): 65 if self.request.user.is_authenticated \ 66 and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED: 67 # The course instance is visible to only enrolled students, so 68 # redirect the user to the enroll page instead of showing 69 # a 403 Forbidden error. 70 return redirect(self.instance.get_url('enroll')) 71 return super().handle_no_permission() 72 73 def get(self, request, *args, **kwargs): 74 # external LTI Tool Providers may return the user to the course instance view 75 # with a message given in GET query parameters 76 lti_error_msg = request.GET.get('lti_errormsg') 77 lti_msg = request.GET.get('lti_msg') 78 # message HTML is not escaped in the templates so escape it here 79 if lti_error_msg: 80 messages.error(request, html.escape(lti_error_msg)) 81 elif lti_msg: 82 messages.info(request, html.escape(lti_msg)) 83 84 return super().get(request, *args, **kwargs) 85 86 87 class Enroll(EnrollableViewMixin, BaseRedirectMixin, BaseTemplateView): 88 permission_classes = [EnrollInfoVisiblePermission] 89 course_permission_classes = [] 90 template_name = "course/enroll.html" 91 92 def post(self, request, *args, **kwargs): 93 94 if self.is_student or not self.enrollable: 95 messages.error(self.request, _("You cannot enroll, or have already enrolled, in this course.")) 96 raise PermissionDenied() 97 98 if not self.instance.is_enrollment_open(): 99 messages.error(self.request, _("The enrollment is not open.")) 100 raise PermissionDenied() 101 102 # Support enrollment questionnaires. 103 exercise = LearningObject.objects.find_enrollment_exercise( 104 self.instance, self.profile) 105 if exercise: 106 return self.redirect(exercise.get_absolute_url()) 107 108 self.instance.enroll_student(self.request.user) 109 return self.redirect(self.instance.get_absolute_url()) 110 111 112 class ModuleView(CourseModuleBaseView): 113 template_name = "course/module.html" 114 115 def get_common_objects(self): 116 super().get_common_objects() 117 self.now = timezone.now() 118 try: 119 self.children = self.content.flat_module(self.module) 120 cur, tree, prev, nex = self.content.find(self.module) 121 self.previous = prev 122 self.current = cur 123 self.next = nex 124 except NoSuchContent: 125 raise Http404 126 self.note('now', 'children', 'previous', 'current', 'next') 127 128 129 class CalendarExport(CourseInstanceMixin, BaseView): 130 131 def get(self, request, *args, **kwargs): 132 cal = icalendar.Calendar() 133 cal.add('prodid', '-// {} calendar //'.format(settings.BRAND_NAME)) 134 cal.add('version', '2.0') 135 for module in self.instance.course_modules.all(): 136 event = icalendar.Event() 137 event.add('summary', module.name) 138 event.add('dtstart', 139 module.closing_time - datetime.timedelta(hours=1)) 140 event.add('dtend', module.closing_time) 141 event.add('dtstamp', module.closing_time) 142 event['uid'] = "module/" + str(module.id) + "/A+" 143 cal.add_component(event) 144 145 return HttpResponse(cal.to_ical(), 146 content_type="text/calendar; charset=utf-8") 147 148 149 class GroupsView(CourseInstanceMixin, BaseFormView): 150 access_mode = ACCESS.ENROLLED 151 template_name = "course/groups.html" 152 form_class = GroupsForm 153 154 def get_common_objects(self): 155 super().get_common_objects() 156 self.enrollment = self.instance.get_enrollment_for(self.request.user) 157 self.groups = list(self.profile.groups.filter(course_instance=self.instance)) 158 self.note('enrollment','groups') 159 160 def get_form_kwargs(self): 161 kwargs = super().get_form_kwargs() 162 kwargs["profile"] = self.profile 163 kwargs["instance"] = self.instance 164 kwargs["content"] = self.content 165 return kwargs 166 167 def get_success_url(self): 168 return self.instance.get_url('groups') 169 170 def form_valid(self, form): 171 form.save() 172 messages.success(self.request, _("A new student group was created.")) 173 return super().form_valid(form) 174 175 176 class GroupSelect(CourseInstanceMixin, BaseFormView): 177 access_mode = ACCESS.ENROLLED 178 form_class = GroupSelectForm 179 template_name = "course/_group_info.html" 180 181 def get_form_kwargs(self): 182 kwargs = super().get_form_kwargs() 183 kwargs["profile"] = self.profile 184 kwargs["instance"] = self.instance 185 return kwargs 186 187 def get_success_url(self): 188 return self.instance.get_absolute_url() 189 190 def get(self, request, *args, **kwargs): 191 return self.http_method_not_allowed(request, *args, **kwargs) 192 193 def form_invalid(self, form): 194 return HttpResponse('Invalid group selection') 195 196 def form_valid(self, form): 197 enrollment = form.save() 198 if self.request.is_ajax(): 199 return self.render_to_response(self.get_context_data( 200 **group_info_context(enrollment.selected_group, self.profile))) 201 return super().form_valid(form) 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/course/views.py b/course/views.py --- a/course/views.py +++ b/course/views.py @@ -33,17 +33,42 @@ self.welcome_text = settings_text('WELCOME_TEXT') self.internal_user_label = settings_text('INTERNAL_USER_LABEL') self.external_user_label = settings_text('EXTERNAL_USER_LABEL') - self.instances = [] - prio2 = [] - treshold = timezone.now() - datetime.timedelta(days=10) - for instance in CourseInstance.objects.get_visible(self.request.user)\ - .filter(ending_time__gte=timezone.now()): - if instance.starting_time > treshold: - self.instances += [instance] - else: - prio2 += [instance] - self.instances += prio2 - self.note("welcome_text", "internal_user_label", "external_user_label", "instances") + my_instances = [] + all_instances = [] + end_threshold = timezone.now() - datetime.timedelta(days=30) + user = self.request.user + is_logged_in = False + + if user and user.is_authenticated: + is_logged_in = True + for instance in (CourseInstance.objects + .filter(course__teachers=user.userprofile, + ending_time__gte=end_threshold) + .all()): + my_instances.append(instance) + + for instance in user.userprofile.assisting_courses.all().filter(ending_time__gte=end_threshold): + if instance not in my_instances: + my_instances.append(instance) + + for instance in user.userprofile.enrolled.all().filter(ending_time__gte=end_threshold): + if instance not in my_instances: + my_instances.append(instance) + + all_instances = CourseInstance.objects.get_visible(user).filter(ending_time__gte=end_threshold) + all_instances = [c for c in all_instances if c not in my_instances] + + self.all_instances = all_instances + self.my_instances = my_instances + self.is_logged_in = is_logged_in + + self.note("welcome_text", + "internal_user_label", + "external_user_label", + "my_instances", + "all_instances", + "is_logged_in", + ) class ArchiveView(UserProfileView):
{"golden_diff": "diff --git a/course/views.py b/course/views.py\n--- a/course/views.py\n+++ b/course/views.py\n@@ -33,17 +33,42 @@\n self.welcome_text = settings_text('WELCOME_TEXT')\n self.internal_user_label = settings_text('INTERNAL_USER_LABEL')\n self.external_user_label = settings_text('EXTERNAL_USER_LABEL')\n- self.instances = []\n- prio2 = []\n- treshold = timezone.now() - datetime.timedelta(days=10)\n- for instance in CourseInstance.objects.get_visible(self.request.user)\\\n- .filter(ending_time__gte=timezone.now()):\n- if instance.starting_time > treshold:\n- self.instances += [instance]\n- else:\n- prio2 += [instance]\n- self.instances += prio2\n- self.note(\"welcome_text\", \"internal_user_label\", \"external_user_label\", \"instances\")\n+ my_instances = []\n+ all_instances = []\n+ end_threshold = timezone.now() - datetime.timedelta(days=30)\n+ user = self.request.user\n+ is_logged_in = False\n+\n+ if user and user.is_authenticated:\n+ is_logged_in = True\n+ for instance in (CourseInstance.objects\n+ .filter(course__teachers=user.userprofile,\n+ ending_time__gte=end_threshold)\n+ .all()):\n+ my_instances.append(instance)\n+\n+ for instance in user.userprofile.assisting_courses.all().filter(ending_time__gte=end_threshold):\n+ if instance not in my_instances:\n+ my_instances.append(instance)\n+ \n+ for instance in user.userprofile.enrolled.all().filter(ending_time__gte=end_threshold):\n+ if instance not in my_instances:\n+ my_instances.append(instance)\n+ \n+ all_instances = CourseInstance.objects.get_visible(user).filter(ending_time__gte=end_threshold)\n+ all_instances = [c for c in all_instances if c not in my_instances]\n+ \n+ self.all_instances = all_instances\n+ self.my_instances = my_instances\n+ self.is_logged_in = is_logged_in\n+\n+ self.note(\"welcome_text\", \n+ \"internal_user_label\", \n+ \"external_user_label\",\n+ \"my_instances\",\n+ \"all_instances\",\n+ \"is_logged_in\",\n+ )\n \n \n class ArchiveView(UserProfileView):\n", "issue": "A+ front page\nThe A+ front page should display the name of the course instance in addition to the course name and code; Multiple instances of the same course are hard to separate in the A+ front page because the only different parts are the open/close dates and the image. Course instances have names, so that should be displayed in the front page too. Jaakko has some ideas; contact him!\n", "before_files": [{"content": "import datetime\n\nimport icalendar\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import html\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.hierarchy import NoSuchContent\nfrom exercise.models import LearningObject\nfrom lib.helpers import settings_text\nfrom lib.viewbase import BaseTemplateView, BaseRedirectMixin, BaseFormView, BaseView\nfrom userprofile.viewbase import UserProfileView\nfrom .forms import GroupsForm, GroupSelectForm\nfrom .models import CourseInstance, Enrollment\nfrom .permissions import EnrollInfoVisiblePermission\nfrom .renders import group_info_context\nfrom .viewbase import CourseModuleBaseView, CourseInstanceMixin, EnrollableViewMixin\n\n\nclass HomeView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/index.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.welcome_text = settings_text('WELCOME_TEXT')\n self.internal_user_label = settings_text('INTERNAL_USER_LABEL')\n self.external_user_label = settings_text('EXTERNAL_USER_LABEL')\n self.instances = []\n prio2 = []\n treshold = timezone.now() - datetime.timedelta(days=10)\n for instance in CourseInstance.objects.get_visible(self.request.user)\\\n .filter(ending_time__gte=timezone.now()):\n if instance.starting_time > treshold:\n self.instances += [instance]\n else:\n prio2 += [instance]\n self.instances += prio2\n self.note(\"welcome_text\", \"internal_user_label\", \"external_user_label\", \"instances\")\n\n\nclass ArchiveView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/archive.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.instances = CourseInstance.objects.get_visible(self.request.user)\n self.note(\"instances\")\n\nclass InstanceView(EnrollableViewMixin, BaseTemplateView):\n access_mode = ACCESS.STUDENT\n # ACCESS.STUDENT requires users to log in, but the access mode is dropped\n # in public courses. CourseVisiblePermission has more restrictions as well.\n template_name = \"course/course.html\"\n\n def handle_no_permission(self):\n if self.request.user.is_authenticated \\\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED:\n # The course instance is visible to only enrolled students, so\n # redirect the user to the enroll page instead of showing\n # a 403 Forbidden error.\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n def get(self, request, *args, **kwargs):\n # external LTI Tool Providers may return the user to the course instance view\n # with a message given in GET query parameters\n lti_error_msg = request.GET.get('lti_errormsg')\n lti_msg = request.GET.get('lti_msg')\n # message HTML is not escaped in the templates so escape it here\n if lti_error_msg:\n messages.error(request, html.escape(lti_error_msg))\n elif lti_msg:\n messages.info(request, html.escape(lti_msg))\n\n return super().get(request, *args, **kwargs)\n\n\nclass Enroll(EnrollableViewMixin, BaseRedirectMixin, BaseTemplateView):\n permission_classes = [EnrollInfoVisiblePermission]\n course_permission_classes = []\n template_name = \"course/enroll.html\"\n\n def post(self, request, *args, **kwargs):\n\n if self.is_student or not self.enrollable:\n messages.error(self.request, _(\"You cannot enroll, or have already enrolled, in this course.\"))\n raise PermissionDenied()\n\n if not self.instance.is_enrollment_open():\n messages.error(self.request, _(\"The enrollment is not open.\"))\n raise PermissionDenied()\n\n # Support enrollment questionnaires.\n exercise = LearningObject.objects.find_enrollment_exercise(\n self.instance, self.profile)\n if exercise:\n return self.redirect(exercise.get_absolute_url())\n\n self.instance.enroll_student(self.request.user)\n return self.redirect(self.instance.get_absolute_url())\n\n\nclass ModuleView(CourseModuleBaseView):\n template_name = \"course/module.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.now = timezone.now()\n try:\n self.children = self.content.flat_module(self.module)\n cur, tree, prev, nex = self.content.find(self.module)\n self.previous = prev\n self.current = cur\n self.next = nex\n except NoSuchContent:\n raise Http404\n self.note('now', 'children', 'previous', 'current', 'next')\n\n\nclass CalendarExport(CourseInstanceMixin, BaseView):\n\n def get(self, request, *args, **kwargs):\n cal = icalendar.Calendar()\n cal.add('prodid', '-// {} calendar //'.format(settings.BRAND_NAME))\n cal.add('version', '2.0')\n for module in self.instance.course_modules.all():\n event = icalendar.Event()\n event.add('summary', module.name)\n event.add('dtstart',\n module.closing_time - datetime.timedelta(hours=1))\n event.add('dtend', module.closing_time)\n event.add('dtstamp', module.closing_time)\n event['uid'] = \"module/\" + str(module.id) + \"/A+\"\n cal.add_component(event)\n\n return HttpResponse(cal.to_ical(),\n content_type=\"text/calendar; charset=utf-8\")\n\n\nclass GroupsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n template_name = \"course/groups.html\"\n form_class = GroupsForm\n\n def get_common_objects(self):\n super().get_common_objects()\n self.enrollment = self.instance.get_enrollment_for(self.request.user)\n self.groups = list(self.profile.groups.filter(course_instance=self.instance))\n self.note('enrollment','groups')\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n kwargs[\"content\"] = self.content\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url('groups')\n\n def form_valid(self, form):\n form.save()\n messages.success(self.request, _(\"A new student group was created.\"))\n return super().form_valid(form)\n\n\nclass GroupSelect(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n form_class = GroupSelectForm\n template_name = \"course/_group_info.html\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_absolute_url()\n\n def get(self, request, *args, **kwargs):\n return self.http_method_not_allowed(request, *args, **kwargs)\n\n def form_invalid(self, form):\n return HttpResponse('Invalid group selection')\n\n def form_valid(self, form):\n enrollment = form.save()\n if self.request.is_ajax():\n return self.render_to_response(self.get_context_data(\n **group_info_context(enrollment.selected_group, self.profile)))\n return super().form_valid(form)\n", "path": "course/views.py"}], "after_files": [{"content": "import datetime\n\nimport icalendar\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import html\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.hierarchy import NoSuchContent\nfrom exercise.models import LearningObject\nfrom lib.helpers import settings_text\nfrom lib.viewbase import BaseTemplateView, BaseRedirectMixin, BaseFormView, BaseView\nfrom userprofile.viewbase import UserProfileView\nfrom .forms import GroupsForm, GroupSelectForm\nfrom .models import CourseInstance, Enrollment\nfrom .permissions import EnrollInfoVisiblePermission\nfrom .renders import group_info_context\nfrom .viewbase import CourseModuleBaseView, CourseInstanceMixin, EnrollableViewMixin\n\n\nclass HomeView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/index.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.welcome_text = settings_text('WELCOME_TEXT')\n self.internal_user_label = settings_text('INTERNAL_USER_LABEL')\n self.external_user_label = settings_text('EXTERNAL_USER_LABEL')\n my_instances = []\n all_instances = []\n end_threshold = timezone.now() - datetime.timedelta(days=30)\n user = self.request.user\n is_logged_in = False\n\n if user and user.is_authenticated:\n is_logged_in = True\n for instance in (CourseInstance.objects\n .filter(course__teachers=user.userprofile,\n ending_time__gte=end_threshold)\n .all()):\n my_instances.append(instance)\n\n for instance in user.userprofile.assisting_courses.all().filter(ending_time__gte=end_threshold):\n if instance not in my_instances:\n my_instances.append(instance)\n \n for instance in user.userprofile.enrolled.all().filter(ending_time__gte=end_threshold):\n if instance not in my_instances:\n my_instances.append(instance)\n \n all_instances = CourseInstance.objects.get_visible(user).filter(ending_time__gte=end_threshold)\n all_instances = [c for c in all_instances if c not in my_instances]\n \n self.all_instances = all_instances\n self.my_instances = my_instances\n self.is_logged_in = is_logged_in\n\n self.note(\"welcome_text\", \n \"internal_user_label\", \n \"external_user_label\",\n \"my_instances\",\n \"all_instances\",\n \"is_logged_in\",\n )\n\n\nclass ArchiveView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/archive.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.instances = CourseInstance.objects.get_visible(self.request.user)\n self.note(\"instances\")\n\nclass InstanceView(EnrollableViewMixin, BaseTemplateView):\n access_mode = ACCESS.STUDENT\n # ACCESS.STUDENT requires users to log in, but the access mode is dropped\n # in public courses. CourseVisiblePermission has more restrictions as well.\n template_name = \"course/course.html\"\n\n def handle_no_permission(self):\n if self.request.user.is_authenticated \\\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED:\n # The course instance is visible to only enrolled students, so\n # redirect the user to the enroll page instead of showing\n # a 403 Forbidden error.\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n def get(self, request, *args, **kwargs):\n # external LTI Tool Providers may return the user to the course instance view\n # with a message given in GET query parameters\n lti_error_msg = request.GET.get('lti_errormsg')\n lti_msg = request.GET.get('lti_msg')\n # message HTML is not escaped in the templates so escape it here\n if lti_error_msg:\n messages.error(request, html.escape(lti_error_msg))\n elif lti_msg:\n messages.info(request, html.escape(lti_msg))\n\n return super().get(request, *args, **kwargs)\n\n\nclass Enroll(EnrollableViewMixin, BaseRedirectMixin, BaseTemplateView):\n permission_classes = [EnrollInfoVisiblePermission]\n course_permission_classes = []\n template_name = \"course/enroll.html\"\n\n def post(self, request, *args, **kwargs):\n\n if self.is_student or not self.enrollable:\n messages.error(self.request, _(\"You cannot enroll, or have already enrolled, in this course.\"))\n raise PermissionDenied()\n\n if not self.instance.is_enrollment_open():\n messages.error(self.request, _(\"The enrollment is not open.\"))\n raise PermissionDenied()\n\n # Support enrollment questionnaires.\n exercise = LearningObject.objects.find_enrollment_exercise(\n self.instance, self.profile)\n if exercise:\n return self.redirect(exercise.get_absolute_url())\n\n self.instance.enroll_student(self.request.user)\n return self.redirect(self.instance.get_absolute_url())\n\n\nclass ModuleView(CourseModuleBaseView):\n template_name = \"course/module.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.now = timezone.now()\n try:\n self.children = self.content.flat_module(self.module)\n cur, tree, prev, nex = self.content.find(self.module)\n self.previous = prev\n self.current = cur\n self.next = nex\n except NoSuchContent:\n raise Http404\n self.note('now', 'children', 'previous', 'current', 'next')\n\n\nclass CalendarExport(CourseInstanceMixin, BaseView):\n\n def get(self, request, *args, **kwargs):\n cal = icalendar.Calendar()\n cal.add('prodid', '-// {} calendar //'.format(settings.BRAND_NAME))\n cal.add('version', '2.0')\n for module in self.instance.course_modules.all():\n event = icalendar.Event()\n event.add('summary', module.name)\n event.add('dtstart',\n module.closing_time - datetime.timedelta(hours=1))\n event.add('dtend', module.closing_time)\n event.add('dtstamp', module.closing_time)\n event['uid'] = \"module/\" + str(module.id) + \"/A+\"\n cal.add_component(event)\n\n return HttpResponse(cal.to_ical(),\n content_type=\"text/calendar; charset=utf-8\")\n\n\nclass GroupsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n template_name = \"course/groups.html\"\n form_class = GroupsForm\n\n def get_common_objects(self):\n super().get_common_objects()\n self.enrollment = self.instance.get_enrollment_for(self.request.user)\n self.groups = list(self.profile.groups.filter(course_instance=self.instance))\n self.note('enrollment','groups')\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n kwargs[\"content\"] = self.content\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url('groups')\n\n def form_valid(self, form):\n form.save()\n messages.success(self.request, _(\"A new student group was created.\"))\n return super().form_valid(form)\n\n\nclass GroupSelect(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n form_class = GroupSelectForm\n template_name = \"course/_group_info.html\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_absolute_url()\n\n def get(self, request, *args, **kwargs):\n return self.http_method_not_allowed(request, *args, **kwargs)\n\n def form_invalid(self, form):\n return HttpResponse('Invalid group selection')\n\n def form_valid(self, form):\n enrollment = form.save()\n if self.request.is_ajax():\n return self.render_to_response(self.get_context_data(\n **group_info_context(enrollment.selected_group, self.profile)))\n return super().form_valid(form)\n", "path": "course/views.py"}]}
2,453
503
gh_patches_debug_18035
rasdani/github-patches
git_diff
gratipay__gratipay.com-2628
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Failed ACH transfers aren't reflected in user balance When payday initiates an ACH transfer for a user's balance, that user's balance is immediately reduced by the amount of the transfer. The problem come on the rare occasions where the ACH fails. The only notification we get (that I know about) is an email from Balanced about ACH failures. We need to restore funds to the user's account when the transfer did not, in fact, occur. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gittip/security/csrf.py` Content: ``` 1 """Cross Site Request Forgery middleware, borrowed from Django. 2 3 See also: 4 5 https://github.com/django/django/blob/master/django/middleware/csrf.py 6 https://docs.djangoproject.com/en/dev/ref/contrib/csrf/ 7 https://github.com/gittip/www.gittip.com/issues/88 8 9 """ 10 11 from datetime import timedelta 12 import re 13 import urlparse 14 from aspen import log_dammit 15 16 17 #from django.utils.cache import patch_vary_headers 18 cc_delim_re = re.compile(r'\s*,\s*') 19 def patch_vary_headers(response, newheaders): 20 """ 21 Adds (or updates) the "Vary" header in the given HttpResponse object. 22 newheaders is a list of header names that should be in "Vary". Existing 23 headers in "Vary" aren't removed. 24 """ 25 # Note that we need to keep the original order intact, because cache 26 # implementations may rely on the order of the Vary contents in, say, 27 # computing an MD5 hash. 28 if 'Vary' in response.headers: 29 vary_headers = cc_delim_re.split(response.headers['Vary']) 30 else: 31 vary_headers = [] 32 # Use .lower() here so we treat headers as case-insensitive. 33 existing_headers = set([header.lower() for header in vary_headers]) 34 additional_headers = [newheader for newheader in newheaders 35 if newheader.lower() not in existing_headers] 36 response.headers['Vary'] = ', '.join(vary_headers + additional_headers) 37 38 39 #from django.utils.http import same_origin 40 def same_origin(url1, url2): 41 """ 42 Checks if two URLs are 'same-origin' 43 """ 44 p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2) 45 return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) 46 47 48 from aspen import Response 49 from crypto import constant_time_compare, get_random_string 50 51 REASON_NO_REFERER = "Referer checking failed - no Referer." 52 REASON_BAD_REFERER = "Referer checking failed - %s does not match %s." 53 REASON_NO_CSRF_COOKIE = "CSRF cookie not set." 54 REASON_BAD_TOKEN = "CSRF token missing or incorrect." 55 56 TOKEN_LENGTH = 32 57 CSRF_TIMEOUT = timedelta(days=7) 58 59 60 def _get_new_csrf_key(): 61 return get_random_string(TOKEN_LENGTH) 62 63 64 def _sanitize_token(token): 65 # Allow only alphanum, and ensure we return a 'str' for the sake 66 # of the post processing middleware. 67 if len(token) > TOKEN_LENGTH: 68 return _get_new_csrf_key() 69 token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore'))) 70 if token == "": 71 # In case the cookie has been truncated to nothing at some point. 72 return _get_new_csrf_key() 73 return token 74 75 def _is_secure(request): 76 import gittip 77 return gittip.canonical_scheme == 'https' 78 79 def _get_host(request): 80 """Returns the HTTP host using the request headers. 81 """ 82 return request.headers.get('X-Forwarded-Host', request.headers['Host']) 83 84 85 86 def inbound(request): 87 """Given a Request object, reject it if it's a forgery. 88 """ 89 if request.line.uri.startswith('/assets/'): return 90 91 try: 92 csrf_token = request.headers.cookie.get('csrf_token') 93 csrf_token = '' if csrf_token is None else csrf_token.value 94 csrf_token = _sanitize_token(csrf_token) 95 except KeyError: 96 csrf_token = _get_new_csrf_key() 97 98 request.context['csrf_token'] = csrf_token 99 100 # Assume that anything not defined as 'safe' by RC2616 needs protection 101 if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): 102 103 if _is_secure(request): 104 # Suppose user visits http://example.com/ 105 # An active network attacker (man-in-the-middle, MITM) sends a 106 # POST form that targets https://example.com/detonate-bomb/ and 107 # submits it via JavaScript. 108 # 109 # The attacker will need to provide a CSRF cookie and token, but 110 # that's no problem for a MITM and the session-independent 111 # nonce we're using. So the MITM can circumvent the CSRF 112 # protection. This is true for any HTTP connection, but anyone 113 # using HTTPS expects better! For this reason, for 114 # https://example.com/ we need additional protection that treats 115 # http://example.com/ as completely untrusted. Under HTTPS, 116 # Barth et al. found that the Referer header is missing for 117 # same-domain requests in only about 0.2% of cases or less, so 118 # we can use strict Referer checking. 119 referer = request.headers.get('Referer') 120 if referer is None: 121 raise Response(403, REASON_NO_REFERER) 122 123 good_referer = 'https://%s/' % _get_host(request) 124 if not same_origin(referer, good_referer): 125 reason = REASON_BAD_REFERER % (referer, good_referer) 126 log_dammit(reason) 127 raise Response(403, reason) 128 129 if csrf_token is None: 130 raise Response(403, REASON_NO_CSRF_COOKIE) 131 132 # Check non-cookie token for match. 133 request_csrf_token = "" 134 if request.line.method == "POST": 135 request_csrf_token = request.body.get('csrf_token', '') 136 137 if request_csrf_token == "": 138 # Fall back to X-CSRF-TOKEN, to make things easier for AJAX, 139 # and possible for PUT/DELETE. 140 request_csrf_token = request.headers.get('X-CSRF-TOKEN', '') 141 142 if not constant_time_compare(request_csrf_token, csrf_token): 143 raise Response(403, REASON_BAD_TOKEN) 144 145 146 def outbound(request, response): 147 """Store the latest CSRF token as a cookie. 148 """ 149 csrf_token = request.context.get('csrf_token') 150 if csrf_token: 151 response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False) 152 153 # Content varies with the CSRF cookie, so set the Vary header. 154 patch_vary_headers(response, ('Cookie',)) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py --- a/gittip/security/csrf.py +++ b/gittip/security/csrf.py @@ -87,15 +87,14 @@ """Given a Request object, reject it if it's a forgery. """ if request.line.uri.startswith('/assets/'): return + if request.line.uri.startswith('/callbacks/'): return try: - csrf_token = request.headers.cookie.get('csrf_token') - csrf_token = '' if csrf_token is None else csrf_token.value - csrf_token = _sanitize_token(csrf_token) + csrf_token = _sanitize_token(request.headers.cookie['csrf_token'].value) except KeyError: - csrf_token = _get_new_csrf_key() + csrf_token = None - request.context['csrf_token'] = csrf_token + request.context['csrf_token'] = csrf_token or _get_new_csrf_key() # Assume that anything not defined as 'safe' by RC2616 needs protection if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
{"golden_diff": "diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py\n--- a/gittip/security/csrf.py\n+++ b/gittip/security/csrf.py\n@@ -87,15 +87,14 @@\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n+ if request.line.uri.startswith('/callbacks/'): return\n \n try:\n- csrf_token = request.headers.cookie.get('csrf_token')\n- csrf_token = '' if csrf_token is None else csrf_token.value\n- csrf_token = _sanitize_token(csrf_token)\n+ csrf_token = _sanitize_token(request.headers.cookie['csrf_token'].value)\n except KeyError:\n- csrf_token = _get_new_csrf_key()\n+ csrf_token = None\n \n- request.context['csrf_token'] = csrf_token\n+ request.context['csrf_token'] = csrf_token or _get_new_csrf_key()\n \n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n", "issue": "Failed ACH transfers aren't reflected in user balance\nWhen payday initiates an ACH transfer for a user's balance, that user's balance is immediately reduced by the amount of the transfer. The problem come on the rare occasions where the ACH fails. The only notification we get (that I know about) is an email from Balanced about ACH failures. We need to restore funds to the user's account when the transfer did not, in fact, occur.\n\n", "before_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gittip/www.gittip.com/issues/88\n\n\"\"\"\n\nfrom datetime import timedelta\nimport re\nimport urlparse\nfrom aspen import log_dammit\n\n\n#from django.utils.cache import patch_vary_headers\ncc_delim_re = re.compile(r'\\s*,\\s*')\ndef patch_vary_headers(response, newheaders):\n \"\"\"\n Adds (or updates) the \"Vary\" header in the given HttpResponse object.\n newheaders is a list of header names that should be in \"Vary\". Existing\n headers in \"Vary\" aren't removed.\n \"\"\"\n # Note that we need to keep the original order intact, because cache\n # implementations may rely on the order of the Vary contents in, say,\n # computing an MD5 hash.\n if 'Vary' in response.headers:\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n else:\n vary_headers = []\n # Use .lower() here so we treat headers as case-insensitive.\n existing_headers = set([header.lower() for header in vary_headers])\n additional_headers = [newheader for newheader in newheaders\n if newheader.lower() not in existing_headers]\n response.headers['Vary'] = ', '.join(vary_headers + additional_headers)\n\n\n#from django.utils.http import same_origin\ndef same_origin(url1, url2):\n \"\"\"\n Checks if two URLs are 'same-origin'\n \"\"\"\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)\n\n\nfrom aspen import Response\nfrom crypto import constant_time_compare, get_random_string\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nTOKEN_LENGTH = 32\nCSRF_TIMEOUT = timedelta(days=7)\n\n\ndef _get_new_csrf_key():\n return get_random_string(TOKEN_LENGTH)\n\n\ndef _sanitize_token(token):\n # Allow only alphanum, and ensure we return a 'str' for the sake\n # of the post processing middleware.\n if len(token) > TOKEN_LENGTH:\n return _get_new_csrf_key()\n token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))\n if token == \"\":\n # In case the cookie has been truncated to nothing at some point.\n return _get_new_csrf_key()\n return token\n\ndef _is_secure(request):\n import gittip\n return gittip.canonical_scheme == 'https'\n\ndef _get_host(request):\n \"\"\"Returns the HTTP host using the request headers.\n \"\"\"\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])\n\n\n\ndef inbound(request):\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n except KeyError:\n csrf_token = _get_new_csrf_key()\n\n request.context['csrf_token'] = csrf_token\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n log_dammit(reason)\n raise Response(403, reason)\n\n if csrf_token is None:\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)\n\n\ndef outbound(request, response):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "path": "gittip/security/csrf.py"}], "after_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gittip/www.gittip.com/issues/88\n\n\"\"\"\n\nfrom datetime import timedelta\nimport re\nimport urlparse\nfrom aspen import log_dammit\n\n\n#from django.utils.cache import patch_vary_headers\ncc_delim_re = re.compile(r'\\s*,\\s*')\ndef patch_vary_headers(response, newheaders):\n \"\"\"\n Adds (or updates) the \"Vary\" header in the given HttpResponse object.\n newheaders is a list of header names that should be in \"Vary\". Existing\n headers in \"Vary\" aren't removed.\n \"\"\"\n # Note that we need to keep the original order intact, because cache\n # implementations may rely on the order of the Vary contents in, say,\n # computing an MD5 hash.\n if 'Vary' in response.headers:\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n else:\n vary_headers = []\n # Use .lower() here so we treat headers as case-insensitive.\n existing_headers = set([header.lower() for header in vary_headers])\n additional_headers = [newheader for newheader in newheaders\n if newheader.lower() not in existing_headers]\n response.headers['Vary'] = ', '.join(vary_headers + additional_headers)\n\n\n#from django.utils.http import same_origin\ndef same_origin(url1, url2):\n \"\"\"\n Checks if two URLs are 'same-origin'\n \"\"\"\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)\n\n\nfrom aspen import Response\nfrom crypto import constant_time_compare, get_random_string\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nTOKEN_LENGTH = 32\nCSRF_TIMEOUT = timedelta(days=7)\n\n\ndef _get_new_csrf_key():\n return get_random_string(TOKEN_LENGTH)\n\n\ndef _sanitize_token(token):\n # Allow only alphanum, and ensure we return a 'str' for the sake\n # of the post processing middleware.\n if len(token) > TOKEN_LENGTH:\n return _get_new_csrf_key()\n token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))\n if token == \"\":\n # In case the cookie has been truncated to nothing at some point.\n return _get_new_csrf_key()\n return token\n\ndef _is_secure(request):\n import gittip\n return gittip.canonical_scheme == 'https'\n\ndef _get_host(request):\n \"\"\"Returns the HTTP host using the request headers.\n \"\"\"\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])\n\n\n\ndef inbound(request):\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n if request.line.uri.startswith('/callbacks/'): return\n\n try:\n csrf_token = _sanitize_token(request.headers.cookie['csrf_token'].value)\n except KeyError:\n csrf_token = None\n\n request.context['csrf_token'] = csrf_token or _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n log_dammit(reason)\n raise Response(403, reason)\n\n if csrf_token is None:\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)\n\n\ndef outbound(request, response):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "path": "gittip/security/csrf.py"}]}
2,098
252
gh_patches_debug_14676
rasdani/github-patches
git_diff
pymedusa__Medusa-6751
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [APP SUBMITTED]: AttributeError: 'NoneType' object has no attribute 'find_all' ### INFO **Python Version**: `3.7.3rc1 (default, Mar 13 2019, 11:01:15) [GCC 8.2.0]` **Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid` **Locale**: `UTF-8` **Branch**: [develop](../tree/develop) **Database**: `44.14` **Commit**: pymedusa/Medusa@429bbf9f1c90de20036a86e3512ed895a0fe5f2c **Link to Log**: https://gist.github.com/63aae653b5bade13d837f97ec5089a0f ### ERROR <pre> 2019-05-26 11:01:29 ERROR SEARCHQUEUE-DAILY-SEARCH :: [BTDB] :: [429bbf9] DailySearchQueueItem Exception, error: 'NoneType' object has no attribute 'find_all' Traceback (most recent call last): File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/queue.py#L263">medusa/search/queue.py</a>", line 263, in run found_results = search_for_needed_episodes(self.scheduler_start_time, force=self.force) File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/core.py#L524">medusa/search/core.py</a>", line 524, in search_for_needed_episodes cur_provider.cache.update_cache(scheduler_start_time) File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L207">medusa/tv/cache.py</a>", line 207, in update_cache data = self._get_rss_data() File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L190">medusa/tv/cache.py</a>", line 190, in _get_rss_data return {'entries': self.provider.search(self.search_params)} File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L78">medusa/providers/torrent/html/btdb.py</a>", line 78, in search results += self.parse(response.text, mode) File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L99">medusa/providers/torrent/html/btdb.py</a>", line 99, in parse torrent_rows = table_body.find_all('li', class_='recent-item') AttributeError: 'NoneType' object has no attribute 'find_all' </pre> --- _STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/providers/torrent/html/btdb.py` Content: ``` 1 # coding=utf-8 2 3 """Provider code for BTDB.""" 4 5 from __future__ import unicode_literals 6 7 import logging 8 9 from medusa import tv 10 from medusa.bs4_parser import BS4Parser 11 from medusa.helper.common import convert_size 12 from medusa.logger.adapters.style import BraceAdapter 13 from medusa.providers.torrent.torrent_provider import TorrentProvider 14 15 from requests.compat import urljoin 16 17 log = BraceAdapter(logging.getLogger(__name__)) 18 log.logger.addHandler(logging.NullHandler()) 19 20 21 class BTDBProvider(TorrentProvider): 22 """BTDB Torrent provider.""" 23 24 def __init__(self): 25 """Initialize the class.""" 26 super(BTDBProvider, self).__init__('BTDB') 27 28 # Credentials 29 self.public = True 30 31 # URLs 32 self.url = 'https://btdb.eu' 33 self.urls = { 34 'daily': urljoin(self.url, 'recent'), 35 } 36 37 # Miscellaneous Options 38 39 # Cache 40 self.cache = tv.Cache(self, min_time=20) 41 42 def search(self, search_strings, age=0, ep_obj=None, **kwargs): 43 """ 44 Search a provider and parse the results. 45 46 :param search_strings: A dict with mode (key) and the search value (value) 47 :param age: Not used 48 :param ep_obj: Not used 49 :returns: A list of search results (structure) 50 """ 51 results = [] 52 53 # Search Params 54 search_params = { 55 'category': 'show', 56 } 57 58 for mode in search_strings: 59 log.debug('Search mode: {0}', mode) 60 61 for search_string in search_strings[mode]: 62 search_url = self.urls['daily'] 63 64 if mode != 'RSS': 65 search_url = self.url 66 67 search_params['search'] = search_string 68 search_params['sort'] = 'popular' 69 70 log.debug('Search string: {search}', 71 {'search': search_string}) 72 73 response = self.session.get(search_url, params=search_params) 74 if not response or not response.text: 75 log.debug('No data returned from provider') 76 continue 77 78 results += self.parse(response.text, mode) 79 80 return results 81 82 def parse(self, data, mode): 83 """ 84 Parse search results for items. 85 86 :param data: The raw response from a search 87 :param mode: The current mode used to search, e.g. RSS 88 89 :return: A list of items found 90 """ 91 items = [] 92 93 with BS4Parser(data, 'html5lib') as html: 94 if mode != 'RSS': 95 table_body = html.find('div', class_='search-ret') 96 torrent_rows = table_body.find_all('li', class_='search-ret-item') 97 else: 98 table_body = html.find('div', class_='recent') 99 torrent_rows = table_body.find_all('li', class_='recent-item') 100 101 # Continue only if at least one release is found 102 if not table_body: 103 log.debug('Data returned from provider does not contain any torrents') 104 return items 105 106 for row in torrent_rows: 107 try: 108 109 title = row.h2.find('a').get('title') 110 download_url = row.div.find('a').get('href') 111 if not all([title, download_url]): 112 continue 113 114 download_url += self._custom_trackers 115 116 spans = row.find('div').find_all('span') 117 118 seeders = int(spans[3].get_text().replace(',', '')) 119 leechers = int(spans[4].get_text().replace(',', '')) 120 121 torrent_size = spans[0].get_text() 122 size = convert_size(torrent_size, default=-1) 123 124 pubdate_raw = spans[2].get_text() 125 pubdate = self.parse_pubdate(pubdate_raw) 126 127 item = { 128 'title': title, 129 'link': download_url, 130 'size': size, 131 'seeders': seeders, 132 'leechers': leechers, 133 'pubdate': pubdate, 134 } 135 136 if mode != 'RSS': 137 log.debug('Found result: {0} with {1} seeders and {2} leechers', 138 title, seeders, leechers) 139 140 items.append(item) 141 except (AttributeError, TypeError, KeyError, ValueError, IndexError): 142 log.exception('Failed parsing provider.') 143 144 return items 145 146 147 provider = BTDBProvider() 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/providers/torrent/html/btdb.py b/medusa/providers/torrent/html/btdb.py --- a/medusa/providers/torrent/html/btdb.py +++ b/medusa/providers/torrent/html/btdb.py @@ -91,12 +91,11 @@ items = [] with BS4Parser(data, 'html5lib') as html: - if mode != 'RSS': - table_body = html.find('div', class_='search-ret') - torrent_rows = table_body.find_all('li', class_='search-ret-item') - else: - table_body = html.find('div', class_='recent') - torrent_rows = table_body.find_all('li', class_='recent-item') + cls_name = 'search-ret' if mode != 'RSS' else 'recent' + table_body = html.find('div', class_=cls_name) + torrent_rows = table_body.find_all( + 'li', class_='{0}-item'.format(cls_name) + ) if table_body else [] # Continue only if at least one release is found if not table_body:
{"golden_diff": "diff --git a/medusa/providers/torrent/html/btdb.py b/medusa/providers/torrent/html/btdb.py\n--- a/medusa/providers/torrent/html/btdb.py\n+++ b/medusa/providers/torrent/html/btdb.py\n@@ -91,12 +91,11 @@\n items = []\n \n with BS4Parser(data, 'html5lib') as html:\n- if mode != 'RSS':\n- table_body = html.find('div', class_='search-ret')\n- torrent_rows = table_body.find_all('li', class_='search-ret-item')\n- else:\n- table_body = html.find('div', class_='recent')\n- torrent_rows = table_body.find_all('li', class_='recent-item')\n+ cls_name = 'search-ret' if mode != 'RSS' else 'recent'\n+ table_body = html.find('div', class_=cls_name)\n+ torrent_rows = table_body.find_all(\n+ 'li', class_='{0}-item'.format(cls_name)\n+ ) if table_body else []\n \n # Continue only if at least one release is found\n if not table_body:\n", "issue": "[APP SUBMITTED]: AttributeError: 'NoneType' object has no attribute 'find_all'\n\n### INFO\n**Python Version**: `3.7.3rc1 (default, Mar 13 2019, 11:01:15) [GCC 8.2.0]`\n**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`\n**Locale**: `UTF-8`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.14`\n**Commit**: pymedusa/Medusa@429bbf9f1c90de20036a86e3512ed895a0fe5f2c\n**Link to Log**: https://gist.github.com/63aae653b5bade13d837f97ec5089a0f\n### ERROR\n<pre>\n2019-05-26 11:01:29 ERROR SEARCHQUEUE-DAILY-SEARCH :: [BTDB] :: [429bbf9] DailySearchQueueItem Exception, error: 'NoneType' object has no attribute 'find_all'\nTraceback (most recent call last):\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/queue.py#L263\">medusa/search/queue.py</a>\", line 263, in run\n found_results = search_for_needed_episodes(self.scheduler_start_time, force=self.force)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/core.py#L524\">medusa/search/core.py</a>\", line 524, in search_for_needed_episodes\n cur_provider.cache.update_cache(scheduler_start_time)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L207\">medusa/tv/cache.py</a>\", line 207, in update_cache\n data = self._get_rss_data()\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L190\">medusa/tv/cache.py</a>\", line 190, in _get_rss_data\n return {'entries': self.provider.search(self.search_params)}\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L78\">medusa/providers/torrent/html/btdb.py</a>\", line 78, in search\n results += self.parse(response.text, mode)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L99\">medusa/providers/torrent/html/btdb.py</a>\", line 99, in parse\n torrent_rows = table_body.find_all('li', class_='recent-item')\nAttributeError: 'NoneType' object has no attribute 'find_all'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for BTDB.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BTDBProvider(TorrentProvider):\n \"\"\"BTDB Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BTDBProvider, self).__init__('BTDB')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://btdb.eu'\n self.urls = {\n 'daily': urljoin(self.url, 'recent'),\n }\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n # Search Params\n search_params = {\n 'category': 'show',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n search_url = self.urls['daily']\n\n if mode != 'RSS':\n search_url = self.url\n\n search_params['search'] = search_string\n search_params['sort'] = 'popular'\n\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if mode != 'RSS':\n table_body = html.find('div', class_='search-ret')\n torrent_rows = table_body.find_all('li', class_='search-ret-item')\n else:\n table_body = html.find('div', class_='recent')\n torrent_rows = table_body.find_all('li', class_='recent-item')\n\n # Continue only if at least one release is found\n if not table_body:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n try:\n\n title = row.h2.find('a').get('title')\n download_url = row.div.find('a').get('href')\n if not all([title, download_url]):\n continue\n\n download_url += self._custom_trackers\n\n spans = row.find('div').find_all('span')\n\n seeders = int(spans[3].get_text().replace(',', ''))\n leechers = int(spans[4].get_text().replace(',', ''))\n\n torrent_size = spans[0].get_text()\n size = convert_size(torrent_size, default=-1)\n\n pubdate_raw = spans[2].get_text()\n pubdate = self.parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n\nprovider = BTDBProvider()\n", "path": "medusa/providers/torrent/html/btdb.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for BTDB.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BTDBProvider(TorrentProvider):\n \"\"\"BTDB Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BTDBProvider, self).__init__('BTDB')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://btdb.eu'\n self.urls = {\n 'daily': urljoin(self.url, 'recent'),\n }\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n # Search Params\n search_params = {\n 'category': 'show',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n search_url = self.urls['daily']\n\n if mode != 'RSS':\n search_url = self.url\n\n search_params['search'] = search_string\n search_params['sort'] = 'popular'\n\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n cls_name = 'search-ret' if mode != 'RSS' else 'recent'\n table_body = html.find('div', class_=cls_name)\n torrent_rows = table_body.find_all(\n 'li', class_='{0}-item'.format(cls_name)\n ) if table_body else []\n\n # Continue only if at least one release is found\n if not table_body:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n try:\n\n title = row.h2.find('a').get('title')\n download_url = row.div.find('a').get('href')\n if not all([title, download_url]):\n continue\n\n download_url += self._custom_trackers\n\n spans = row.find('div').find_all('span')\n\n seeders = int(spans[3].get_text().replace(',', ''))\n leechers = int(spans[4].get_text().replace(',', ''))\n\n torrent_size = spans[0].get_text()\n size = convert_size(torrent_size, default=-1)\n\n pubdate_raw = spans[2].get_text()\n pubdate = self.parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n\nprovider = BTDBProvider()\n", "path": "medusa/providers/torrent/html/btdb.py"}]}
2,464
253
gh_patches_debug_31047
rasdani/github-patches
git_diff
conan-io__conan-center-index-11189
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0 ### Package and Environment Details * Package Name/Version: **libpciaccess/0.16** * Operating System+version: **Fedora 34 x86_64** * Compiler+version: **gcc 11.3.1** * Conan version: **conan 1.49.0** * Python version: **Python 3.9.13** ### Conan profile ``` Configuration for profile default: [settings] os=Linux os_build=Linux arch=x86_64 arch_build=x86_64 compiler=gcc compiler.version=11 compiler.libcxx=libstdc++11 build_type=Release [options] [conf] [build_requires] [env] ``` ### Steps to reproduce Simply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0. Expected result: libpciaccess/0.16 is successfully built and made available in conan cache. Actual result: Build fails with the following error: ``` libpciaccess/0.16: libpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed libpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646 ERROR: libpciaccess/0.16: Error in build() method, line 66 autotools = Autotools(self, build_script_folder=self._source_subfolder) TypeError: __init__() got an unexpected keyword argument 'build_script_folder' ``` The problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version? ### Logs _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/libpciaccess/all/conanfile.py` Content: ``` 1 import os 2 3 from conan.tools.gnu import Autotools, AutotoolsToolchain 4 from conans import ConanFile, tools 5 from conans.errors import ConanInvalidConfiguration 6 7 required_conan_version = ">=1.33.0" 8 9 10 class LibPciAccessConan(ConanFile): 11 name = "libpciaccess" 12 description = "Generic PCI access library" 13 topics = ("pci", "xorg") 14 url = "https://github.com/conan-io/conan-center-index" 15 homepage = "https://gitlab.freedesktop.org/xorg/lib/libpciaccess" 16 license = "MIT", "X11" 17 18 settings = "os", "arch", "compiler", "build_type" 19 options = {"shared": [True, False], "fPIC": [True, False]} 20 default_options = {"shared": False, "fPIC": True} 21 22 _source_subfolder = "source_subfolder" 23 _build_subfolder = "build_subfolder" 24 25 def validate(self): 26 def is_supported(settings): 27 if settings.os in ("Linux", "FreeBSD", "SunOS"): 28 return True 29 return settings.os == "Windows" and settings.get_safe("os.subsystem") == "cygwin" 30 if not is_supported(self.settings): 31 raise ConanInvalidConfiguration("Unsupported architecture.") 32 33 def config_options(self): 34 if self.settings.os == "Windows": 35 del self.options.fPIC 36 37 def configure(self): 38 if self.options.shared: 39 del self.options.fPIC 40 del self.settings.compiler.libcxx 41 del self.settings.compiler.cppstd 42 43 def build_requirements(self): 44 self.build_requires("pkgconf/1.7.4") 45 self.build_requires("xorg-macros/1.19.3") 46 self.build_requires("libtool/2.4.6") 47 48 def generate(self): 49 tc = AutotoolsToolchain(self) 50 tc.default_configure_install_args = True 51 tc.generate() 52 53 def source(self): 54 tools.get(**self.conan_data["sources"][self.version], 55 strip_root=True, destination=self._source_subfolder) 56 57 def build(self): 58 # autoreconf 59 self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"), 60 win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder) 61 62 autotools = Autotools(self) 63 autotools.configure(build_script_folder=self._source_subfolder) 64 autotools.make() 65 66 def package(self): 67 self.copy(pattern="COPYING", dst="licenses", 68 src=self._source_subfolder) 69 70 autotools = Autotools(self) 71 autotools.install() 72 73 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) 74 tools.remove_files_by_mask(os.path.join( 75 self.package_folder, "lib"), "*.la") 76 77 def package_info(self): 78 self.cpp_info.libs = tools.collect_libs(self) 79 self.cpp_info.set_property("pkg_config_name", "pciaccess") 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py --- a/recipes/libpciaccess/all/conanfile.py +++ b/recipes/libpciaccess/all/conanfile.py @@ -1,10 +1,10 @@ import os - from conan.tools.gnu import Autotools, AutotoolsToolchain from conans import ConanFile, tools from conans.errors import ConanInvalidConfiguration -required_conan_version = ">=1.33.0" + +required_conan_version = ">=1.44.0" class LibPciAccessConan(ConanFile): @@ -19,8 +19,9 @@ options = {"shared": [True, False], "fPIC": [True, False]} default_options = {"shared": False, "fPIC": True} - _source_subfolder = "source_subfolder" - _build_subfolder = "build_subfolder" + @property + def _source_subfolder(self): + return "source_subfolder" def validate(self): def is_supported(settings): @@ -55,7 +56,6 @@ strip_root=True, destination=self._source_subfolder) def build(self): - # autoreconf self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"), win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder) @@ -75,5 +75,5 @@ self.package_folder, "lib"), "*.la") def package_info(self): - self.cpp_info.libs = tools.collect_libs(self) + self.cpp_info.libs = ["pciaccess"] self.cpp_info.set_property("pkg_config_name", "pciaccess")
{"golden_diff": "diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py\n--- a/recipes/libpciaccess/all/conanfile.py\n+++ b/recipes/libpciaccess/all/conanfile.py\n@@ -1,10 +1,10 @@\n import os\n-\n from conan.tools.gnu import Autotools, AutotoolsToolchain\n from conans import ConanFile, tools\n from conans.errors import ConanInvalidConfiguration\n \n-required_conan_version = \">=1.33.0\"\n+\n+required_conan_version = \">=1.44.0\"\n \n \n class LibPciAccessConan(ConanFile):\n@@ -19,8 +19,9 @@\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n \n- _source_subfolder = \"source_subfolder\"\n- _build_subfolder = \"build_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def validate(self):\n def is_supported(settings):\n@@ -55,7 +56,6 @@\n strip_root=True, destination=self._source_subfolder)\n \n def build(self):\n- # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n \n@@ -75,5 +75,5 @@\n self.package_folder, \"lib\"), \"*.la\")\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "issue": "[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0\n### Package and Environment Details\r\n\r\n* Package Name/Version: **libpciaccess/0.16**\r\n* Operating System+version: **Fedora 34 x86_64**\r\n* Compiler+version: **gcc 11.3.1**\r\n* Conan version: **conan 1.49.0**\r\n* Python version: **Python 3.9.13**\r\n\r\n\r\n### Conan profile\r\n\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=11\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nSimply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.\r\n\r\nExpected result:\r\nlibpciaccess/0.16 is successfully built and made available in conan cache.\r\n\r\nActual result:\r\nBuild fails with the following error:\r\n\r\n```\r\nlibpciaccess/0.16: \r\nlibpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed\r\nlibpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646\r\nERROR: libpciaccess/0.16: Error in build() method, line 66\r\n\tautotools = Autotools(self, build_script_folder=self._source_subfolder)\r\n\tTypeError: __init__() got an unexpected keyword argument 'build_script_folder'\r\n\r\n```\r\nThe problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?\r\n\r\n### Logs\r\n\r\n_No response_\n", "before_files": [{"content": "import os\n\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nrequired_conan_version = \">=1.44.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}]}
1,601
404
gh_patches_debug_18621
rasdani/github-patches
git_diff
open-mmlab__mmdetection3d-474
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bugs in configs in configs/benchmark/ folder Thanks for your error report and we appreciate it a lot. **Checklist** 1. I have searched related issues but cannot get the expected help. 2. The bug has not been fixed in the latest version. **Describe the bug** A clear and concise description of what the bug is. **Reproduction** 1. What command or script did you run? ``` bash ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate ``` 2. Did you make any modifications on the code or config? Did you understand what you have modified? 3. What dataset did you use? Kitti **Environment** 1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here. ![image](https://user-images.githubusercontent.com/49447593/115668492-bfd96d80-a379-11eb-8ba5-d58646d24191.png) 2. You may add addition that may be helpful for locating the problem, such as - How you installed PyTorch [e.g., pip, conda, source] - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) **Error traceback** If applicable, paste the error trackback here. ``` File "projects/mmcv/mmcv/utils/registry.py", line 54, in build_from_cfg self.pipeline = Compose(pipeline) File "anaconda3/envs/mmdet3d/lib/python3.7/site-packages/mmdet/datasets/pipelines/compose.py", line 22, in __init__ raise type(e)(f'{obj_cls.__name__}: {e}') KeyError: "KittiDataset: 'GlobalRotScale is not in the pipeline registry'" ``` **Bug fix** If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated! The arguments' name and some type of settings are not updated in benchmark folder. Inconsistency interface between config and function Dear developers: I found there exists an inconsistent interface of "ObjectNoise" between "mmdet3d/datasets/pipelines/transforms_3d.py" and "configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py". Especially, I found that the options "loc_noise_std" and "rot_uniform_noise" mentioned in the config file do not appear in the corresponding function. I think maybe it is a new feature in progress. I appreciate developers who maintain this repo and contribute to this issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py` Content: ``` 1 # model settings 2 voxel_size = [0.16, 0.16, 4] 3 point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1] 4 model = dict( 5 type='VoxelNet', 6 voxel_layer=dict( 7 max_num_points=64, 8 point_cloud_range=point_cloud_range, 9 voxel_size=voxel_size, 10 max_voxels=(12000, 20000)), 11 voxel_encoder=dict( 12 type='PillarFeatureNet', 13 in_channels=4, 14 feat_channels=[64], 15 with_distance=False, 16 voxel_size=voxel_size, 17 point_cloud_range=point_cloud_range), 18 middle_encoder=dict( 19 type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), 20 backbone=dict( 21 type='SECOND', 22 in_channels=64, 23 layer_nums=[3, 5, 5], 24 layer_strides=[2, 2, 2], 25 out_channels=[64, 128, 256]), 26 neck=dict( 27 type='SECONDFPN', 28 in_channels=[64, 128, 256], 29 upsample_strides=[1, 2, 4], 30 out_channels=[128, 128, 128]), 31 bbox_head=dict( 32 type='Anchor3DHead', 33 num_classes=1, 34 in_channels=384, 35 feat_channels=384, 36 use_direction_classifier=True, 37 anchor_generator=dict( 38 type='Anchor3DRangeGenerator', 39 ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]], 40 sizes=[[1.6, 3.9, 1.56]], 41 rotations=[0, 1.57], 42 reshape_out=True), 43 diff_rad_by_sin=True, 44 bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), 45 loss_cls=dict( 46 type='FocalLoss', 47 use_sigmoid=True, 48 gamma=2.0, 49 alpha=0.25, 50 loss_weight=1.0), 51 loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), 52 loss_dir=dict( 53 type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), 54 # model training and testing settings 55 train_cfg=dict( 56 assigner=dict( 57 type='MaxIoUAssigner', 58 iou_calculator=dict(type='BboxOverlapsNearest3D'), 59 pos_iou_thr=0.6, 60 neg_iou_thr=0.45, 61 min_pos_iou=0.45, 62 ignore_iof_thr=-1), 63 allowed_border=0, 64 pos_weight=-1, 65 debug=False), 66 test_cfg=dict( 67 use_rotate_nms=True, 68 nms_across_levels=False, 69 nms_thr=0.01, 70 score_thr=0.1, 71 min_bbox_size=0, 72 nms_pre=100, 73 max_num=50)) 74 75 # dataset settings 76 dataset_type = 'KittiDataset' 77 data_root = 'data/kitti/' 78 class_names = ['Car'] 79 input_modality = dict(use_lidar=True, use_camera=False) 80 db_sampler = dict( 81 data_root=data_root, 82 info_path=data_root + 'kitti_dbinfos_train.pkl', 83 rate=1.0, 84 prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), 85 sample_groups=dict(Car=15), 86 classes=class_names) 87 88 train_pipeline = [ 89 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), 90 dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), 91 dict(type='ObjectSample', db_sampler=db_sampler), 92 dict( 93 type='ObjectNoise', 94 num_try=100, 95 loc_noise_std=[0.25, 0.25, 0.25], 96 global_rot_range=[0.0, 0.0], 97 rot_uniform_noise=[-0.15707963267, 0.15707963267]), 98 dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), 99 dict( 100 type='GlobalRotScale', 101 rot_uniform_noise=[-0.78539816, 0.78539816], 102 scaling_uniform_noise=[0.95, 1.05]), 103 dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), 104 dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), 105 dict(type='PointShuffle'), 106 dict(type='DefaultFormatBundle3D', class_names=class_names), 107 dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) 108 ] 109 test_pipeline = [ 110 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), 111 dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), 112 dict( 113 type='DefaultFormatBundle3D', 114 class_names=class_names, 115 with_label=False), 116 dict(type='Collect3D', keys=['points']) 117 ] 118 # construct a pipeline for data and gt loading in show function 119 # please keep its loading function consistent with test_pipeline (e.g. client) 120 eval_pipeline = [ 121 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), 122 dict( 123 type='DefaultFormatBundle3D', 124 class_names=class_names, 125 with_label=False), 126 dict(type='Collect3D', keys=['points']) 127 ] 128 129 data = dict( 130 samples_per_gpu=3, 131 workers_per_gpu=3, 132 train=dict( 133 type='RepeatDataset', 134 times=2, 135 dataset=dict( 136 type=dataset_type, 137 data_root=data_root, 138 ann_file=data_root + 'kitti_infos_train.pkl', 139 split='training', 140 pts_prefix='velodyne_reduced', 141 pipeline=train_pipeline, 142 modality=input_modality, 143 classes=class_names, 144 test_mode=False)), 145 val=dict( 146 type=dataset_type, 147 data_root=data_root, 148 ann_file=data_root + 'kitti_infos_val.pkl', 149 split='training', 150 pts_prefix='velodyne_reduced', 151 pipeline=test_pipeline, 152 modality=input_modality, 153 classes=class_names, 154 test_mode=True), 155 test=dict( 156 type=dataset_type, 157 data_root=data_root, 158 ann_file=data_root + 'kitti_infos_val.pkl', 159 split='training', 160 pts_prefix='velodyne_reduced', 161 pipeline=test_pipeline, 162 modality=input_modality, 163 classes=class_names, 164 test_mode=True)) 165 # optimizer 166 lr = 0.001 # max learning rate 167 optimizer = dict( 168 type='AdamW', 169 lr=lr, 170 betas=(0.95, 0.99), # the momentum is change during training 171 weight_decay=0.01) 172 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) 173 # learning policy 174 lr_config = dict( 175 policy='cyclic', 176 target_ratio=(10, 1e-4), 177 cyclic_times=1, 178 step_ratio_up=0.4) 179 momentum_config = dict( 180 policy='cyclic', 181 target_ratio=(0.85 / 0.95, 1), 182 cyclic_times=1, 183 step_ratio_up=0.4) 184 checkpoint_config = dict(interval=1) 185 evaluation = dict(interval=1, pipeline=eval_pipeline) 186 # yapf:disable 187 log_config = dict( 188 interval=50, 189 hooks=[ 190 dict(type='TextLoggerHook'), 191 dict(type='TensorboardLoggerHook') 192 ]) 193 # yapf:enable 194 # runtime settings 195 runner = dict(type='EpochBasedRunner', max_epochs=50) 196 dist_params = dict(backend='nccl') 197 log_level = 'INFO' 198 work_dir = './work_dirs/pp_secfpn_100e' 199 load_from = None 200 resume_from = None 201 workflow = [('train', 50)] 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py --- a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py +++ b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py @@ -92,14 +92,14 @@ dict( type='ObjectNoise', num_try=100, - loc_noise_std=[0.25, 0.25, 0.25], + translation_std=[0.25, 0.25, 0.25], global_rot_range=[0.0, 0.0], - rot_uniform_noise=[-0.15707963267, 0.15707963267]), + rot_range=[-0.15707963267, 0.15707963267]), dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), dict( - type='GlobalRotScale', - rot_uniform_noise=[-0.78539816, 0.78539816], - scaling_uniform_noise=[0.95, 1.05]), + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointShuffle'),
{"golden_diff": "diff --git a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n--- a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n+++ b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n@@ -92,14 +92,14 @@\n dict(\n type='ObjectNoise',\n num_try=100,\n- loc_noise_std=[0.25, 0.25, 0.25],\n+ translation_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n- rot_uniform_noise=[-0.15707963267, 0.15707963267]),\n+ rot_range=[-0.15707963267, 0.15707963267]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n- type='GlobalRotScale',\n- rot_uniform_noise=[-0.78539816, 0.78539816],\n- scaling_uniform_noise=[0.95, 1.05]),\n+ type='GlobalRotScaleTrans',\n+ rot_range=[-0.78539816, 0.78539816],\n+ scale_ratio_range=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n", "issue": "Bugs in configs in configs/benchmark/ folder\nThanks for your error report and we appreciate it a lot.\r\n\r\n**Checklist**\r\n1. I have searched related issues but cannot get the expected help.\r\n2. The bug has not been fixed in the latest version.\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**Reproduction**\r\n1. What command or script did you run?\r\n\r\n```\r\nbash ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate\r\n```\r\n\r\n2. Did you make any modifications on the code or config? Did you understand what you have modified?\r\n3. What dataset did you use?\r\nKitti\r\n\r\n**Environment**\r\n\r\n1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here.\r\n\r\n![image](https://user-images.githubusercontent.com/49447593/115668492-bfd96d80-a379-11eb-8ba5-d58646d24191.png)\r\n\r\n2. You may add addition that may be helpful for locating the problem, such as\r\n - How you installed PyTorch [e.g., pip, conda, source]\r\n - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\r\n\r\n**Error traceback**\r\nIf applicable, paste the error trackback here.\r\n\r\n```\r\nFile \"projects/mmcv/mmcv/utils/registry.py\", line 54, in build_from_cfg\r\n self.pipeline = Compose(pipeline)\r\nFile \"anaconda3/envs/mmdet3d/lib/python3.7/site-packages/mmdet/datasets/pipelines/compose.py\", line 22, in __init__ \r\n raise type(e)(f'{obj_cls.__name__}: {e}')\r\nKeyError: \"KittiDataset: 'GlobalRotScale is not in the pipeline registry'\"\r\n```\r\n\r\n**Bug fix**\r\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\r\n\r\nThe arguments' name and some type of settings are not updated in benchmark folder.\r\n\nInconsistency interface between config and function\nDear developers:\r\n\r\nI found there exists an inconsistent interface of \"ObjectNoise\" between \"mmdet3d/datasets/pipelines/transforms_3d.py\" and \"configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\". \r\n\r\nEspecially, I found that the options \"loc_noise_std\" and \"rot_uniform_noise\" mentioned in the config file do not appear in the corresponding function.\r\n\r\nI think maybe it is a new feature in progress. I appreciate developers who maintain this repo and contribute to this issue.\n", "before_files": [{"content": "# model settings\nvoxel_size = [0.16, 0.16, 4]\npoint_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]\nmodel = dict(\n type='VoxelNet',\n voxel_layer=dict(\n max_num_points=64,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(12000, 20000)),\n voxel_encoder=dict(\n type='PillarFeatureNet',\n in_channels=4,\n feat_channels=[64],\n with_distance=False,\n voxel_size=voxel_size,\n point_cloud_range=point_cloud_range),\n middle_encoder=dict(\n type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]),\n backbone=dict(\n type='SECOND',\n in_channels=64,\n layer_nums=[3, 5, 5],\n layer_strides=[2, 2, 2],\n out_channels=[64, 128, 256]),\n neck=dict(\n type='SECONDFPN',\n in_channels=[64, 128, 256],\n upsample_strides=[1, 2, 4],\n out_channels=[128, 128, 128]),\n bbox_head=dict(\n type='Anchor3DHead',\n num_classes=1,\n in_channels=384,\n feat_channels=384,\n use_direction_classifier=True,\n anchor_generator=dict(\n type='Anchor3DRangeGenerator',\n ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]],\n sizes=[[1.6, 3.9, 1.56]],\n rotations=[0, 1.57],\n reshape_out=True),\n diff_rad_by_sin=True,\n bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),\n loss_dir=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)),\n # model training and testing settings\n train_cfg=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n iou_calculator=dict(type='BboxOverlapsNearest3D'),\n pos_iou_thr=0.6,\n neg_iou_thr=0.45,\n min_pos_iou=0.45,\n ignore_iof_thr=-1),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n use_rotate_nms=True,\n nms_across_levels=False,\n nms_thr=0.01,\n score_thr=0.1,\n min_bbox_size=0,\n nms_pre=100,\n max_num=50))\n\n# dataset settings\ndataset_type = 'KittiDataset'\ndata_root = 'data/kitti/'\nclass_names = ['Car']\ninput_modality = dict(use_lidar=True, use_camera=False)\ndb_sampler = dict(\n data_root=data_root,\n info_path=data_root + 'kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n sample_groups=dict(Car=15),\n classes=class_names)\n\ntrain_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n dict(type='ObjectSample', db_sampler=db_sampler),\n dict(\n type='ObjectNoise',\n num_try=100,\n loc_noise_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n rot_uniform_noise=[-0.15707963267, 0.15707963267]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScale',\n rot_uniform_noise=[-0.78539816, 0.78539816],\n scaling_uniform_noise=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n]\ntest_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n# construct a pipeline for data and gt loading in show function\n# please keep its loading function consistent with test_pipeline (e.g. client)\neval_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n\ndata = dict(\n samples_per_gpu=3,\n workers_per_gpu=3,\n train=dict(\n type='RepeatDataset',\n times=2,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=train_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=False)),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True),\n test=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True))\n# optimizer\nlr = 0.001 # max learning rate\noptimizer = dict(\n type='AdamW',\n lr=lr,\n betas=(0.95, 0.99), # the momentum is change during training\n weight_decay=0.01)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='cyclic',\n target_ratio=(10, 1e-4),\n cyclic_times=1,\n step_ratio_up=0.4)\nmomentum_config = dict(\n policy='cyclic',\n target_ratio=(0.85 / 0.95, 1),\n cyclic_times=1,\n step_ratio_up=0.4)\ncheckpoint_config = dict(interval=1)\nevaluation = dict(interval=1, pipeline=eval_pipeline)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\nrunner = dict(type='EpochBasedRunner', max_epochs=50)\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/pp_secfpn_100e'\nload_from = None\nresume_from = None\nworkflow = [('train', 50)]\n", "path": "configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py"}], "after_files": [{"content": "# model settings\nvoxel_size = [0.16, 0.16, 4]\npoint_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]\nmodel = dict(\n type='VoxelNet',\n voxel_layer=dict(\n max_num_points=64,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(12000, 20000)),\n voxel_encoder=dict(\n type='PillarFeatureNet',\n in_channels=4,\n feat_channels=[64],\n with_distance=False,\n voxel_size=voxel_size,\n point_cloud_range=point_cloud_range),\n middle_encoder=dict(\n type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]),\n backbone=dict(\n type='SECOND',\n in_channels=64,\n layer_nums=[3, 5, 5],\n layer_strides=[2, 2, 2],\n out_channels=[64, 128, 256]),\n neck=dict(\n type='SECONDFPN',\n in_channels=[64, 128, 256],\n upsample_strides=[1, 2, 4],\n out_channels=[128, 128, 128]),\n bbox_head=dict(\n type='Anchor3DHead',\n num_classes=1,\n in_channels=384,\n feat_channels=384,\n use_direction_classifier=True,\n anchor_generator=dict(\n type='Anchor3DRangeGenerator',\n ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]],\n sizes=[[1.6, 3.9, 1.56]],\n rotations=[0, 1.57],\n reshape_out=True),\n diff_rad_by_sin=True,\n bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),\n loss_dir=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)),\n # model training and testing settings\n train_cfg=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n iou_calculator=dict(type='BboxOverlapsNearest3D'),\n pos_iou_thr=0.6,\n neg_iou_thr=0.45,\n min_pos_iou=0.45,\n ignore_iof_thr=-1),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n use_rotate_nms=True,\n nms_across_levels=False,\n nms_thr=0.01,\n score_thr=0.1,\n min_bbox_size=0,\n nms_pre=100,\n max_num=50))\n\n# dataset settings\ndataset_type = 'KittiDataset'\ndata_root = 'data/kitti/'\nclass_names = ['Car']\ninput_modality = dict(use_lidar=True, use_camera=False)\ndb_sampler = dict(\n data_root=data_root,\n info_path=data_root + 'kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n sample_groups=dict(Car=15),\n classes=class_names)\n\ntrain_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n dict(type='ObjectSample', db_sampler=db_sampler),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.15707963267, 0.15707963267]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n]\ntest_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n# construct a pipeline for data and gt loading in show function\n# please keep its loading function consistent with test_pipeline (e.g. client)\neval_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n\ndata = dict(\n samples_per_gpu=3,\n workers_per_gpu=3,\n train=dict(\n type='RepeatDataset',\n times=2,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=train_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=False)),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True),\n test=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True))\n# optimizer\nlr = 0.001 # max learning rate\noptimizer = dict(\n type='AdamW',\n lr=lr,\n betas=(0.95, 0.99), # the momentum is change during training\n weight_decay=0.01)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='cyclic',\n target_ratio=(10, 1e-4),\n cyclic_times=1,\n step_ratio_up=0.4)\nmomentum_config = dict(\n policy='cyclic',\n target_ratio=(0.85 / 0.95, 1),\n cyclic_times=1,\n step_ratio_up=0.4)\ncheckpoint_config = dict(interval=1)\nevaluation = dict(interval=1, pipeline=eval_pipeline)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\nrunner = dict(type='EpochBasedRunner', max_epochs=50)\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/pp_secfpn_100e'\nload_from = None\nresume_from = None\nworkflow = [('train', 50)]\n", "path": "configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py"}]}
3,286
469
gh_patches_debug_14824
rasdani/github-patches
git_diff
microsoft__playwright-python-190
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `ignoreDefaultArgs` launch parameter is broken I'm trying to disable default launch arguments however when this parameter is set to True one of two things happen: ``` browser: Browser = chromium.launch(ignoreDefaultArgs=True) ``` will break with: ``` File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 66, in launch normalize_launch_params(params) File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 140, in normalize_launch_params params["env"] = {name: str(value) for [name, value] in params["env"].items()} KeyError: 'env' ``` I've made a hotfix patch here https://github.com/Granitosaurus/playwright-python/commit/2dc0e107719097e1105e36149a7fe8238c210e38 now the browser launches fine but timesout on `launch()` call: ``` Traceback (most recent call last): File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 68, in launch return from_channel(await self._channel.send("launch", params)) File "/home/user/.local/lib/python3.8/site-packages/playwright/connection.py", line 39, in send result = await callback.future File "/usr/lib/python3.8/asyncio/futures.py", line 260, in __await__ yield self # This tells Task to wait for completion. File "/usr/lib/python3.8/asyncio/tasks.py", line 349, in __wakeup future.result() File "/usr/lib/python3.8/asyncio/futures.py", line 178, in result raise self._exception playwright.helper.TimeoutError: Timeout 30000ms exceeded. =========================== logs =========================== <launching> /home/user/.cache/ms-playwright/chromium-799411/chrome-linux/chrome <launched> pid=301672 [err] [301699:301699:0914/061714.839670:ERROR:sandbox_linux.cc(374)] InitializeSandbox() called with multiple threads in process gpu-process. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `playwright/browser_type.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from pathlib import Path 16 from typing import Dict, List, Union 17 18 from playwright.browser import Browser 19 from playwright.browser_context import BrowserContext 20 from playwright.connection import ChannelOwner, from_channel 21 from playwright.helper import ( 22 ColorScheme, 23 Credentials, 24 Env, 25 Geolocation, 26 IntSize, 27 ProxyServer, 28 locals_to_params, 29 not_installed_error, 30 ) 31 from playwright.network import serialize_headers 32 33 34 class BrowserType(ChannelOwner): 35 def __init__( 36 self, parent: ChannelOwner, type: str, guid: str, initializer: Dict 37 ) -> None: 38 super().__init__(parent, type, guid, initializer) 39 40 @property 41 def name(self) -> str: 42 return self._initializer["name"] 43 44 @property 45 def executablePath(self) -> str: 46 return self._initializer["executablePath"] 47 48 async def launch( 49 self, 50 executablePath: Union[str, Path] = None, 51 args: List[str] = None, 52 ignoreDefaultArgs: Union[bool, List[str]] = None, 53 handleSIGINT: bool = None, 54 handleSIGTERM: bool = None, 55 handleSIGHUP: bool = None, 56 timeout: int = None, 57 env: Env = None, 58 headless: bool = None, 59 devtools: bool = None, 60 proxy: ProxyServer = None, 61 downloadsPath: Union[str, Path] = None, 62 slowMo: int = None, 63 chromiumSandbox: bool = None, 64 ) -> Browser: 65 params = locals_to_params(locals()) 66 normalize_launch_params(params) 67 try: 68 return from_channel(await self._channel.send("launch", params)) 69 except Exception as e: 70 if f"{self.name}-" in str(e): 71 raise not_installed_error(f'"{self.name}" browser was not found.') 72 raise e 73 74 async def launchPersistentContext( 75 self, 76 userDataDir: Union[str, Path], 77 executablePath: Union[str, Path] = None, 78 args: List[str] = None, 79 ignoreDefaultArgs: Union[bool, List[str]] = None, 80 handleSIGINT: bool = None, 81 handleSIGTERM: bool = None, 82 handleSIGHUP: bool = None, 83 timeout: int = None, 84 env: Env = None, 85 headless: bool = None, 86 devtools: bool = None, 87 proxy: ProxyServer = None, 88 downloadsPath: Union[str, Path] = None, 89 slowMo: int = None, 90 viewport: IntSize = None, 91 ignoreHTTPSErrors: bool = None, 92 javaScriptEnabled: bool = None, 93 bypassCSP: bool = None, 94 userAgent: str = None, 95 locale: str = None, 96 timezoneId: str = None, 97 geolocation: Geolocation = None, 98 permissions: List[str] = None, 99 extraHTTPHeaders: Dict[str, str] = None, 100 offline: bool = None, 101 httpCredentials: Credentials = None, 102 deviceScaleFactor: int = None, 103 isMobile: bool = None, 104 hasTouch: bool = None, 105 colorScheme: ColorScheme = None, 106 acceptDownloads: bool = None, 107 chromiumSandbox: bool = None, 108 ) -> BrowserContext: 109 userDataDir = str(Path(userDataDir)) 110 params = locals_to_params(locals()) 111 if extraHTTPHeaders: 112 params["extraHTTPHeaders"] = serialize_headers(extraHTTPHeaders) 113 normalize_launch_params(params) 114 try: 115 return from_channel( 116 await self._channel.send("launchPersistentContext", params) 117 ) 118 except Exception as e: 119 if f"{self.name}-" in str(e): 120 raise not_installed_error(f'"{self.name}" browser was not found.') 121 raise e 122 123 async def connect( 124 self, wsEndpoint: str, slowMo: int = None, timeout: int = None 125 ) -> Browser: 126 return from_channel( 127 await self._channel.send("connect", locals_to_params(locals())) 128 ) 129 130 131 def normalize_launch_params(params: Dict) -> None: 132 if "env" in params: 133 params["env"] = {name: str(value) for [name, value] in params["env"].items()} 134 if "ignoreDefaultArgs" in params: 135 if isinstance(params["ignoreDefaultArgs"], bool): 136 params["ignoreAllDefaultArgs"] = True 137 del params["ignoreDefaultArgs"] 138 params["env"] = {name: str(value) for [name, value] in params["env"].items()} 139 if "executablePath" in params: 140 params["executablePath"] = str(Path(params["executablePath"])) 141 if "downloadsPath" in params: 142 params["downloadsPath"] = str(Path(params["downloadsPath"])) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/playwright/browser_type.py b/playwright/browser_type.py --- a/playwright/browser_type.py +++ b/playwright/browser_type.py @@ -132,10 +132,9 @@ if "env" in params: params["env"] = {name: str(value) for [name, value] in params["env"].items()} if "ignoreDefaultArgs" in params: - if isinstance(params["ignoreDefaultArgs"], bool): + if params["ignoreDefaultArgs"] is True: params["ignoreAllDefaultArgs"] = True del params["ignoreDefaultArgs"] - params["env"] = {name: str(value) for [name, value] in params["env"].items()} if "executablePath" in params: params["executablePath"] = str(Path(params["executablePath"])) if "downloadsPath" in params:
{"golden_diff": "diff --git a/playwright/browser_type.py b/playwright/browser_type.py\n--- a/playwright/browser_type.py\n+++ b/playwright/browser_type.py\n@@ -132,10 +132,9 @@\n if \"env\" in params:\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"ignoreDefaultArgs\" in params:\n- if isinstance(params[\"ignoreDefaultArgs\"], bool):\n+ if params[\"ignoreDefaultArgs\"] is True:\n params[\"ignoreAllDefaultArgs\"] = True\n del params[\"ignoreDefaultArgs\"]\n- params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"executablePath\" in params:\n params[\"executablePath\"] = str(Path(params[\"executablePath\"]))\n if \"downloadsPath\" in params:\n", "issue": "`ignoreDefaultArgs` launch parameter is broken\nI'm trying to disable default launch arguments however when this parameter is set to True one of two things happen:\r\n\r\n```\r\nbrowser: Browser = chromium.launch(ignoreDefaultArgs=True)\r\n```\r\nwill break with:\r\n```\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 66, in launch\r\n normalize_launch_params(params)\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 140, in normalize_launch_params\r\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\r\nKeyError: 'env'\r\n``` \r\n\r\nI've made a hotfix patch here https://github.com/Granitosaurus/playwright-python/commit/2dc0e107719097e1105e36149a7fe8238c210e38 now the browser launches fine but timesout on `launch()` call:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 68, in launch\r\n return from_channel(await self._channel.send(\"launch\", params))\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/connection.py\", line 39, in send\r\n result = await callback.future\r\n File \"/usr/lib/python3.8/asyncio/futures.py\", line 260, in __await__\r\n yield self # This tells Task to wait for completion.\r\n File \"/usr/lib/python3.8/asyncio/tasks.py\", line 349, in __wakeup\r\n future.result()\r\n File \"/usr/lib/python3.8/asyncio/futures.py\", line 178, in result\r\n raise self._exception\r\nplaywright.helper.TimeoutError: Timeout 30000ms exceeded.\r\n=========================== logs ===========================\r\n<launching> /home/user/.cache/ms-playwright/chromium-799411/chrome-linux/chrome \r\n<launched> pid=301672\r\n[err] [301699:301699:0914/061714.839670:ERROR:sandbox_linux.cc(374)] InitializeSandbox() called with multiple threads in process gpu-process.\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\nfrom typing import Dict, List, Union\n\nfrom playwright.browser import Browser\nfrom playwright.browser_context import BrowserContext\nfrom playwright.connection import ChannelOwner, from_channel\nfrom playwright.helper import (\n ColorScheme,\n Credentials,\n Env,\n Geolocation,\n IntSize,\n ProxyServer,\n locals_to_params,\n not_installed_error,\n)\nfrom playwright.network import serialize_headers\n\n\nclass BrowserType(ChannelOwner):\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n\n @property\n def name(self) -> str:\n return self._initializer[\"name\"]\n\n @property\n def executablePath(self) -> str:\n return self._initializer[\"executablePath\"]\n\n async def launch(\n self,\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n chromiumSandbox: bool = None,\n ) -> Browser:\n params = locals_to_params(locals())\n normalize_launch_params(params)\n try:\n return from_channel(await self._channel.send(\"launch\", params))\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def launchPersistentContext(\n self,\n userDataDir: Union[str, Path],\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n viewport: IntSize = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: Credentials = None,\n deviceScaleFactor: int = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n acceptDownloads: bool = None,\n chromiumSandbox: bool = None,\n ) -> BrowserContext:\n userDataDir = str(Path(userDataDir))\n params = locals_to_params(locals())\n if extraHTTPHeaders:\n params[\"extraHTTPHeaders\"] = serialize_headers(extraHTTPHeaders)\n normalize_launch_params(params)\n try:\n return from_channel(\n await self._channel.send(\"launchPersistentContext\", params)\n )\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def connect(\n self, wsEndpoint: str, slowMo: int = None, timeout: int = None\n ) -> Browser:\n return from_channel(\n await self._channel.send(\"connect\", locals_to_params(locals()))\n )\n\n\ndef normalize_launch_params(params: Dict) -> None:\n if \"env\" in params:\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"ignoreDefaultArgs\" in params:\n if isinstance(params[\"ignoreDefaultArgs\"], bool):\n params[\"ignoreAllDefaultArgs\"] = True\n del params[\"ignoreDefaultArgs\"]\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"executablePath\" in params:\n params[\"executablePath\"] = str(Path(params[\"executablePath\"]))\n if \"downloadsPath\" in params:\n params[\"downloadsPath\"] = str(Path(params[\"downloadsPath\"]))\n", "path": "playwright/browser_type.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\nfrom typing import Dict, List, Union\n\nfrom playwright.browser import Browser\nfrom playwright.browser_context import BrowserContext\nfrom playwright.connection import ChannelOwner, from_channel\nfrom playwright.helper import (\n ColorScheme,\n Credentials,\n Env,\n Geolocation,\n IntSize,\n ProxyServer,\n locals_to_params,\n not_installed_error,\n)\nfrom playwright.network import serialize_headers\n\n\nclass BrowserType(ChannelOwner):\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n\n @property\n def name(self) -> str:\n return self._initializer[\"name\"]\n\n @property\n def executablePath(self) -> str:\n return self._initializer[\"executablePath\"]\n\n async def launch(\n self,\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n chromiumSandbox: bool = None,\n ) -> Browser:\n params = locals_to_params(locals())\n normalize_launch_params(params)\n try:\n return from_channel(await self._channel.send(\"launch\", params))\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def launchPersistentContext(\n self,\n userDataDir: Union[str, Path],\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n viewport: IntSize = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: Credentials = None,\n deviceScaleFactor: int = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n acceptDownloads: bool = None,\n chromiumSandbox: bool = None,\n ) -> BrowserContext:\n userDataDir = str(Path(userDataDir))\n params = locals_to_params(locals())\n if extraHTTPHeaders:\n params[\"extraHTTPHeaders\"] = serialize_headers(extraHTTPHeaders)\n normalize_launch_params(params)\n try:\n return from_channel(\n await self._channel.send(\"launchPersistentContext\", params)\n )\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def connect(\n self, wsEndpoint: str, slowMo: int = None, timeout: int = None\n ) -> Browser:\n return from_channel(\n await self._channel.send(\"connect\", locals_to_params(locals()))\n )\n\n\ndef normalize_launch_params(params: Dict) -> None:\n if \"env\" in params:\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"ignoreDefaultArgs\" in params:\n if params[\"ignoreDefaultArgs\"] is True:\n params[\"ignoreAllDefaultArgs\"] = True\n del params[\"ignoreDefaultArgs\"]\n if \"executablePath\" in params:\n params[\"executablePath\"] = str(Path(params[\"executablePath\"]))\n if \"downloadsPath\" in params:\n params[\"downloadsPath\"] = str(Path(params[\"downloadsPath\"]))\n", "path": "playwright/browser_type.py"}]}
2,259
191
gh_patches_debug_2041
rasdani/github-patches
git_diff
Pyomo__pyomo-2633
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fixed Vars unpickle as stale ## Summary I'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale. ### Steps to reproduce the issue ``` from pyomo.environ import * import pickle m = ConcreteModel() m.x = Var(domain=Binary) m.x.fix(1) unpickle = pickle.loads(pickle.dumps(m)) m.x.pprint() unpickle.x.pprint() ``` ``` x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : 0 : 1 : 1 : True : False : Binary x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : 0 : 1 : 1 : True : True : Binary ``` ### Error Message It seems like these models should be identical, even up to stale-ness, right? ### Information on your system Pyomo version: main Python version: 3.8 Operating system: linux How Pyomo was installed (PyPI, conda, source): source Solver (if applicable): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyomo/core/staleflag.py` Content: ``` 1 # ___________________________________________________________________________ 2 # 3 # Pyomo: Python Optimization Modeling Objects 4 # Copyright (c) 2008-2022 5 # National Technology and Engineering Solutions of Sandia, LLC 6 # Under the terms of Contract DE-NA0003525 with National Technology and 7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain 8 # rights in this software. 9 # This software is distributed under the 3-clause BSD License. 10 # ___________________________________________________________________________ 11 12 class _StaleFlagManager(object): 13 def __init__(self): 14 self._current = 0 15 self.mark_all_as_stale() 16 17 def stale_mapper(self, encode, value): 18 if encode: 19 return self.is_stale(value) 20 else: 21 if value: 22 return 0 23 else: 24 self.get_flag(0) 25 26 def _get_flag(self, current_flag): 27 """Return the current global stale flag value""" 28 return self._current 29 30 def _get_flag_delayed(self, current_flag): 31 """Implement the "delayed" advancement of the global stale flag value 32 33 This will continue to return the current value of the state flag 34 until the first non-stale variable is updated (that it, it is 35 passed the current stale flag when called). This allows for 36 updating stale variable values without incrementing the global 37 stale flag, but will mark everything as stale as soon as a 38 non-stale variable value is changed. 39 40 """ 41 if current_flag == self._current: 42 self._current += 1 43 setattr(self, 'get_flag', getattr(self, '_get_flag')) 44 return self._current 45 46 def is_stale(self, val): 47 """Return ``True`` if the passed value indicated a stale variable""" 48 return val != self._current 49 50 def mark_all_as_stale(self, delayed=False): 51 """Advance the global stale flag, marking all variables as stale 52 53 This is generally called immediately before and after a batch 54 variable update (i.e. loading values from a solver result or 55 stored solution). Before the batch update 56 :meth:`mark_all_as_stale` is called with ``delayed=False``, 57 which immediately marks all variables as stale. After the batch 58 update, :meth:`mark_all_as_stale` is typically called with 59 ``delayed=True``. This allows additional stale variables to be 60 updated without advancing the global flag, but as soon as any 61 non-stale variable has its value changed, then the flag is 62 advanced and all other variables become stale. 63 64 """ 65 if delayed: 66 setattr(self, 'get_flag', getattr(self, '_get_flag_delayed')) 67 else: 68 setattr(self, 'get_flag', getattr(self, '_get_flag')) 69 self._current += 1 70 71 StaleFlagManager = _StaleFlagManager() 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py --- a/pyomo/core/staleflag.py +++ b/pyomo/core/staleflag.py @@ -21,7 +21,7 @@ if value: return 0 else: - self.get_flag(0) + return self.get_flag(0) def _get_flag(self, current_flag): """Return the current global stale flag value"""
{"golden_diff": "diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py\n--- a/pyomo/core/staleflag.py\n+++ b/pyomo/core/staleflag.py\n@@ -21,7 +21,7 @@\n if value:\n return 0\n else:\n- self.get_flag(0)\n+ return self.get_flag(0)\n \n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n", "issue": "Fixed Vars unpickle as stale\n## Summary\r\n\r\nI'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```\r\nfrom pyomo.environ import *\r\nimport pickle\r\n\r\nm = ConcreteModel()\r\nm.x = Var(domain=Binary)\r\nm.x.fix(1)\r\n\r\nunpickle = pickle.loads(pickle.dumps(m))\r\n\r\nm.x.pprint()\r\nunpickle.x.pprint()\r\n```\r\n\r\n```\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : False : Binary\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : True : Binary\r\n```\r\n\r\n### Error Message\r\n\r\nIt seems like these models should be identical, even up to stale-ness, right?\r\n\r\n### Information on your system\r\n\r\nPyomo version: main\r\nPython version: 3.8\r\nOperating system: linux\r\nHow Pyomo was installed (PyPI, conda, source): source \r\nSolver (if applicable):\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n return self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}]}
1,314
102
gh_patches_debug_40929
rasdani/github-patches
git_diff
getsentry__sentry-python-449
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problem with Popen arguments passed as map Greetings! I am having an issue when using the package `sorl.thumbnail` on a Django project. That package produces thumbnails from images using system installed programs (called engines.) When `sorl.thumbnail` makes a `Popen` call here: https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/convert_engine.py#L57 The arguments that are received in your patched `Popen` are not being interpreted correctly. This is the suspect of the problem: https://github.com/getsentry/sentry-python/blob/2c0a2ea96882adf69742bd7e79e588564ff5b85c/sentry_sdk/integrations/stdlib.py#L143 Arguments are handled in such a way that the original `popen` crashes when trying to access `args[0]`, as the `args` parameter is interpreted (or actually becomes) an empty list. In my opinion, the problem arises because in `sorl.thumbnail` the arguments to `Popen` are passed to a `map` function first, so that a `map` object is what gets passed as arguments. I am not sure what's the effect of a `map` object being received in your `*a` of the patched `Popen`. Here's the relevant part of the traceback I am getting: ``` File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py", line 131, in get_thumbnail thumbnail) File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py", line 164, in _create_thumbnail default.engine.write(image, options, thumbnail) File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/engines/convert_engine.py", line 57, in write p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sentry_sdk/integrations/stdlib.py", line 165, in sentry_patched_popen_init return old_popen_init(self, *a, **kw) File "/usr/lib/python3.6/subprocess.py", line 729, in __init__ restore_signals, start_new_session) File "/usr/lib/python3.6/subprocess.py", line 1248, in _execute_child executable = args[0] IndexError: list index out of range ``` If I remove the Sentry integration altogether from the Django project, `sorl.thumbnail` is able to effectively call `Popen` with the "mapped" arguments, and everything works. Thanks in advance! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/stdlib.py` Content: ``` 1 import os 2 import subprocess 3 import sys 4 import platform 5 6 from sentry_sdk.hub import Hub 7 from sentry_sdk.integrations import Integration 8 from sentry_sdk.scope import add_global_event_processor 9 from sentry_sdk.tracing import EnvironHeaders, record_http_request 10 11 try: 12 from httplib import HTTPConnection # type: ignore 13 except ImportError: 14 from http.client import HTTPConnection 15 16 _RUNTIME_CONTEXT = { 17 "name": platform.python_implementation(), 18 "version": "%s.%s.%s" % (sys.version_info[:3]), 19 "build": sys.version, 20 } 21 22 23 class StdlibIntegration(Integration): 24 identifier = "stdlib" 25 26 @staticmethod 27 def setup_once(): 28 # type: () -> None 29 _install_httplib() 30 _install_subprocess() 31 32 @add_global_event_processor 33 def add_python_runtime_context(event, hint): 34 if Hub.current.get_integration(StdlibIntegration) is not None: 35 contexts = event.setdefault("contexts", {}) 36 if isinstance(contexts, dict) and "runtime" not in contexts: 37 contexts["runtime"] = _RUNTIME_CONTEXT 38 39 return event 40 41 42 def _install_httplib(): 43 # type: () -> None 44 real_putrequest = HTTPConnection.putrequest 45 real_getresponse = HTTPConnection.getresponse 46 47 def putrequest(self, method, url, *args, **kwargs): 48 hub = Hub.current 49 if hub.get_integration(StdlibIntegration) is None: 50 return real_putrequest(self, method, url, *args, **kwargs) 51 52 host = self.host 53 port = self.port 54 default_port = self.default_port 55 56 real_url = url 57 if not real_url.startswith(("http://", "https://")): 58 real_url = "%s://%s%s%s" % ( 59 default_port == 443 and "https" or "http", 60 host, 61 port != default_port and ":%s" % port or "", 62 url, 63 ) 64 65 recorder = record_http_request(hub, real_url, method) 66 data_dict = recorder.__enter__() 67 68 try: 69 rv = real_putrequest(self, method, url, *args, **kwargs) 70 71 for key, value in hub.iter_trace_propagation_headers(): 72 self.putheader(key, value) 73 except Exception: 74 recorder.__exit__(*sys.exc_info()) 75 raise 76 77 self._sentrysdk_recorder = recorder 78 self._sentrysdk_data_dict = data_dict 79 80 return rv 81 82 def getresponse(self, *args, **kwargs): 83 recorder = getattr(self, "_sentrysdk_recorder", None) 84 85 if recorder is None: 86 return real_getresponse(self, *args, **kwargs) 87 88 data_dict = getattr(self, "_sentrysdk_data_dict", None) 89 90 try: 91 rv = real_getresponse(self, *args, **kwargs) 92 93 if data_dict is not None: 94 data_dict["httplib_response"] = rv 95 data_dict["status_code"] = rv.status 96 data_dict["reason"] = rv.reason 97 except TypeError: 98 # python-requests provokes a typeerror to discover py3 vs py2 differences 99 # 100 # > TypeError("getresponse() got an unexpected keyword argument 'buffering'") 101 raise 102 except Exception: 103 recorder.__exit__(*sys.exc_info()) 104 raise 105 else: 106 recorder.__exit__(None, None, None) 107 108 return rv 109 110 HTTPConnection.putrequest = putrequest 111 HTTPConnection.getresponse = getresponse 112 113 114 def _init_argument(args, kwargs, name, position, setdefault_callback=None): 115 """ 116 given (*args, **kwargs) of a function call, retrieve (and optionally set a 117 default for) an argument by either name or position. 118 119 This is useful for wrapping functions with complex type signatures and 120 extracting a few arguments without needing to redefine that function's 121 entire type signature. 122 """ 123 124 if name in kwargs: 125 rv = kwargs[name] 126 if rv is None and setdefault_callback is not None: 127 rv = kwargs[name] = setdefault_callback() 128 elif position < len(args): 129 rv = args[position] 130 if rv is None and setdefault_callback is not None: 131 rv = args[position] = setdefault_callback() 132 else: 133 rv = setdefault_callback and setdefault_callback() 134 if rv is not None: 135 kwargs[name] = rv 136 137 return rv 138 139 140 def _install_subprocess(): 141 old_popen_init = subprocess.Popen.__init__ 142 143 def sentry_patched_popen_init(self, *a, **kw): 144 hub = Hub.current 145 if hub.get_integration(StdlibIntegration) is None: 146 return old_popen_init(self, *a, **kw) 147 148 # do not setdefault! args is required by Popen, doing setdefault would 149 # make invalid calls valid 150 args = _init_argument(a, kw, "args", 0) or [] 151 cwd = _init_argument(a, kw, "cwd", 10) 152 153 env = None 154 155 for k, v in hub.iter_trace_propagation_headers(): 156 if env is None: 157 env = _init_argument(a, kw, "env", 11, lambda: dict(os.environ)) 158 env["SUBPROCESS_" + k.upper().replace("-", "_")] = v 159 160 with hub.span(op="subprocess", description=" ".join(map(str, args))) as span: 161 span.set_tag("subprocess.cwd", cwd) 162 163 return old_popen_init(self, *a, **kw) 164 165 subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore 166 167 168 def get_subprocess_traceparent_headers(): 169 return EnvironHeaders(os.environ, prefix="SUBPROCESS_") 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py --- a/sentry_sdk/integrations/stdlib.py +++ b/sentry_sdk/integrations/stdlib.py @@ -7,12 +7,15 @@ from sentry_sdk.integrations import Integration from sentry_sdk.scope import add_global_event_processor from sentry_sdk.tracing import EnvironHeaders, record_http_request +from sentry_sdk.utils import capture_internal_exceptions, safe_repr + try: from httplib import HTTPConnection # type: ignore except ImportError: from http.client import HTTPConnection + _RUNTIME_CONTEXT = { "name": platform.python_implementation(), "version": "%s.%s.%s" % (sys.version_info[:3]), @@ -123,14 +126,18 @@ if name in kwargs: rv = kwargs[name] - if rv is None and setdefault_callback is not None: - rv = kwargs[name] = setdefault_callback() + if setdefault_callback is not None: + rv = setdefault_callback(rv) + if rv is not None: + kwargs[name] = rv elif position < len(args): rv = args[position] - if rv is None and setdefault_callback is not None: - rv = args[position] = setdefault_callback() + if setdefault_callback is not None: + rv = setdefault_callback(rv) + if rv is not None: + args[position] = rv else: - rv = setdefault_callback and setdefault_callback() + rv = setdefault_callback and setdefault_callback(None) if rv is not None: kwargs[name] = rv @@ -145,20 +152,37 @@ if hub.get_integration(StdlibIntegration) is None: return old_popen_init(self, *a, **kw) - # do not setdefault! args is required by Popen, doing setdefault would - # make invalid calls valid + # Convert from tuple to list to be able to set values. + a = list(a) + args = _init_argument(a, kw, "args", 0) or [] - cwd = _init_argument(a, kw, "cwd", 10) + cwd = _init_argument(a, kw, "cwd", 9) + + # if args is not a list or tuple (and e.g. some iterator instead), + # let's not use it at all. There are too many things that can go wrong + # when trying to collect an iterator into a list and setting that list + # into `a` again. + # + # Also invocations where `args` is not a sequence are not actually + # legal. They just happen to work under CPython. + description = None + + if isinstance(args, (list, tuple)) and len(args) < 100: + with capture_internal_exceptions(): + description = " ".join(map(str, args)) + + if description is None: + description = safe_repr(args) env = None for k, v in hub.iter_trace_propagation_headers(): if env is None: - env = _init_argument(a, kw, "env", 11, lambda: dict(os.environ)) + env = _init_argument(a, kw, "env", 10, lambda x: dict(x or os.environ)) env["SUBPROCESS_" + k.upper().replace("-", "_")] = v - with hub.span(op="subprocess", description=" ".join(map(str, args))) as span: - span.set_tag("subprocess.cwd", cwd) + with hub.span(op="subprocess", description=description) as span: + span.set_data("subprocess.cwd", cwd) return old_popen_init(self, *a, **kw)
{"golden_diff": "diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py\n--- a/sentry_sdk/integrations/stdlib.py\n+++ b/sentry_sdk/integrations/stdlib.py\n@@ -7,12 +7,15 @@\n from sentry_sdk.integrations import Integration\n from sentry_sdk.scope import add_global_event_processor\n from sentry_sdk.tracing import EnvironHeaders, record_http_request\n+from sentry_sdk.utils import capture_internal_exceptions, safe_repr\n+\n \n try:\n from httplib import HTTPConnection # type: ignore\n except ImportError:\n from http.client import HTTPConnection\n \n+\n _RUNTIME_CONTEXT = {\n \"name\": platform.python_implementation(),\n \"version\": \"%s.%s.%s\" % (sys.version_info[:3]),\n@@ -123,14 +126,18 @@\n \n if name in kwargs:\n rv = kwargs[name]\n- if rv is None and setdefault_callback is not None:\n- rv = kwargs[name] = setdefault_callback()\n+ if setdefault_callback is not None:\n+ rv = setdefault_callback(rv)\n+ if rv is not None:\n+ kwargs[name] = rv\n elif position < len(args):\n rv = args[position]\n- if rv is None and setdefault_callback is not None:\n- rv = args[position] = setdefault_callback()\n+ if setdefault_callback is not None:\n+ rv = setdefault_callback(rv)\n+ if rv is not None:\n+ args[position] = rv\n else:\n- rv = setdefault_callback and setdefault_callback()\n+ rv = setdefault_callback and setdefault_callback(None)\n if rv is not None:\n kwargs[name] = rv\n \n@@ -145,20 +152,37 @@\n if hub.get_integration(StdlibIntegration) is None:\n return old_popen_init(self, *a, **kw)\n \n- # do not setdefault! args is required by Popen, doing setdefault would\n- # make invalid calls valid\n+ # Convert from tuple to list to be able to set values.\n+ a = list(a)\n+\n args = _init_argument(a, kw, \"args\", 0) or []\n- cwd = _init_argument(a, kw, \"cwd\", 10)\n+ cwd = _init_argument(a, kw, \"cwd\", 9)\n+\n+ # if args is not a list or tuple (and e.g. some iterator instead),\n+ # let's not use it at all. There are too many things that can go wrong\n+ # when trying to collect an iterator into a list and setting that list\n+ # into `a` again.\n+ #\n+ # Also invocations where `args` is not a sequence are not actually\n+ # legal. They just happen to work under CPython.\n+ description = None\n+\n+ if isinstance(args, (list, tuple)) and len(args) < 100:\n+ with capture_internal_exceptions():\n+ description = \" \".join(map(str, args))\n+\n+ if description is None:\n+ description = safe_repr(args)\n \n env = None\n \n for k, v in hub.iter_trace_propagation_headers():\n if env is None:\n- env = _init_argument(a, kw, \"env\", 11, lambda: dict(os.environ))\n+ env = _init_argument(a, kw, \"env\", 10, lambda x: dict(x or os.environ))\n env[\"SUBPROCESS_\" + k.upper().replace(\"-\", \"_\")] = v\n \n- with hub.span(op=\"subprocess\", description=\" \".join(map(str, args))) as span:\n- span.set_tag(\"subprocess.cwd\", cwd)\n+ with hub.span(op=\"subprocess\", description=description) as span:\n+ span.set_data(\"subprocess.cwd\", cwd)\n \n return old_popen_init(self, *a, **kw)\n", "issue": "Problem with Popen arguments passed as map\nGreetings!\r\n\r\nI am having an issue when using the package `sorl.thumbnail` on a Django project. That package produces thumbnails from images using system installed programs (called engines.)\r\n\r\nWhen `sorl.thumbnail` makes a `Popen` call here:\r\n\r\nhttps://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/convert_engine.py#L57\r\n\r\nThe arguments that are received in your patched `Popen` are not being interpreted correctly. This is the suspect of the problem:\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/2c0a2ea96882adf69742bd7e79e588564ff5b85c/sentry_sdk/integrations/stdlib.py#L143\r\n\r\nArguments are handled in such a way that the original `popen` crashes when trying to access `args[0]`, as the `args` parameter is interpreted (or actually becomes) an empty list.\r\n\r\nIn my opinion, the problem arises because in `sorl.thumbnail` the arguments to `Popen` are passed to a `map` function first, so that a `map` object is what gets passed as arguments. I am not sure what's the effect of a `map` object being received in your `*a` of the patched `Popen`.\r\n\r\nHere's the relevant part of the traceback I am getting:\r\n\r\n```\r\nFile \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py\", line 131, in get_thumbnail\r\n thumbnail)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py\", line 164, in _create_thumbnail\r\n default.engine.write(image, options, thumbnail)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/engines/convert_engine.py\", line 57, in write\r\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sentry_sdk/integrations/stdlib.py\", line 165, in sentry_patched_popen_init\r\n return old_popen_init(self, *a, **kw)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 729, in __init__\r\n restore_signals, start_new_session)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 1248, in _execute_child\r\n executable = args[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nIf I remove the Sentry integration altogether from the Django project, `sorl.thumbnail` is able to effectively call `Popen` with the \"mapped\" arguments, and everything works.\r\n\r\nThanks in advance!\n", "before_files": [{"content": "import os\nimport subprocess\nimport sys\nimport platform\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.scope import add_global_event_processor\nfrom sentry_sdk.tracing import EnvironHeaders, record_http_request\n\ntry:\n from httplib import HTTPConnection # type: ignore\nexcept ImportError:\n from http.client import HTTPConnection\n\n_RUNTIME_CONTEXT = {\n \"name\": platform.python_implementation(),\n \"version\": \"%s.%s.%s\" % (sys.version_info[:3]),\n \"build\": sys.version,\n}\n\n\nclass StdlibIntegration(Integration):\n identifier = \"stdlib\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n _install_httplib()\n _install_subprocess()\n\n @add_global_event_processor\n def add_python_runtime_context(event, hint):\n if Hub.current.get_integration(StdlibIntegration) is not None:\n contexts = event.setdefault(\"contexts\", {})\n if isinstance(contexts, dict) and \"runtime\" not in contexts:\n contexts[\"runtime\"] = _RUNTIME_CONTEXT\n\n return event\n\n\ndef _install_httplib():\n # type: () -> None\n real_putrequest = HTTPConnection.putrequest\n real_getresponse = HTTPConnection.getresponse\n\n def putrequest(self, method, url, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return real_putrequest(self, method, url, *args, **kwargs)\n\n host = self.host\n port = self.port\n default_port = self.default_port\n\n real_url = url\n if not real_url.startswith((\"http://\", \"https://\")):\n real_url = \"%s://%s%s%s\" % (\n default_port == 443 and \"https\" or \"http\",\n host,\n port != default_port and \":%s\" % port or \"\",\n url,\n )\n\n recorder = record_http_request(hub, real_url, method)\n data_dict = recorder.__enter__()\n\n try:\n rv = real_putrequest(self, method, url, *args, **kwargs)\n\n for key, value in hub.iter_trace_propagation_headers():\n self.putheader(key, value)\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n\n self._sentrysdk_recorder = recorder\n self._sentrysdk_data_dict = data_dict\n\n return rv\n\n def getresponse(self, *args, **kwargs):\n recorder = getattr(self, \"_sentrysdk_recorder\", None)\n\n if recorder is None:\n return real_getresponse(self, *args, **kwargs)\n\n data_dict = getattr(self, \"_sentrysdk_data_dict\", None)\n\n try:\n rv = real_getresponse(self, *args, **kwargs)\n\n if data_dict is not None:\n data_dict[\"httplib_response\"] = rv\n data_dict[\"status_code\"] = rv.status\n data_dict[\"reason\"] = rv.reason\n except TypeError:\n # python-requests provokes a typeerror to discover py3 vs py2 differences\n #\n # > TypeError(\"getresponse() got an unexpected keyword argument 'buffering'\")\n raise\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n else:\n recorder.__exit__(None, None, None)\n\n return rv\n\n HTTPConnection.putrequest = putrequest\n HTTPConnection.getresponse = getresponse\n\n\ndef _init_argument(args, kwargs, name, position, setdefault_callback=None):\n \"\"\"\n given (*args, **kwargs) of a function call, retrieve (and optionally set a\n default for) an argument by either name or position.\n\n This is useful for wrapping functions with complex type signatures and\n extracting a few arguments without needing to redefine that function's\n entire type signature.\n \"\"\"\n\n if name in kwargs:\n rv = kwargs[name]\n if rv is None and setdefault_callback is not None:\n rv = kwargs[name] = setdefault_callback()\n elif position < len(args):\n rv = args[position]\n if rv is None and setdefault_callback is not None:\n rv = args[position] = setdefault_callback()\n else:\n rv = setdefault_callback and setdefault_callback()\n if rv is not None:\n kwargs[name] = rv\n\n return rv\n\n\ndef _install_subprocess():\n old_popen_init = subprocess.Popen.__init__\n\n def sentry_patched_popen_init(self, *a, **kw):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return old_popen_init(self, *a, **kw)\n\n # do not setdefault! args is required by Popen, doing setdefault would\n # make invalid calls valid\n args = _init_argument(a, kw, \"args\", 0) or []\n cwd = _init_argument(a, kw, \"cwd\", 10)\n\n env = None\n\n for k, v in hub.iter_trace_propagation_headers():\n if env is None:\n env = _init_argument(a, kw, \"env\", 11, lambda: dict(os.environ))\n env[\"SUBPROCESS_\" + k.upper().replace(\"-\", \"_\")] = v\n\n with hub.span(op=\"subprocess\", description=\" \".join(map(str, args))) as span:\n span.set_tag(\"subprocess.cwd\", cwd)\n\n return old_popen_init(self, *a, **kw)\n\n subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore\n\n\ndef get_subprocess_traceparent_headers():\n return EnvironHeaders(os.environ, prefix=\"SUBPROCESS_\")\n", "path": "sentry_sdk/integrations/stdlib.py"}], "after_files": [{"content": "import os\nimport subprocess\nimport sys\nimport platform\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.scope import add_global_event_processor\nfrom sentry_sdk.tracing import EnvironHeaders, record_http_request\nfrom sentry_sdk.utils import capture_internal_exceptions, safe_repr\n\n\ntry:\n from httplib import HTTPConnection # type: ignore\nexcept ImportError:\n from http.client import HTTPConnection\n\n\n_RUNTIME_CONTEXT = {\n \"name\": platform.python_implementation(),\n \"version\": \"%s.%s.%s\" % (sys.version_info[:3]),\n \"build\": sys.version,\n}\n\n\nclass StdlibIntegration(Integration):\n identifier = \"stdlib\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n _install_httplib()\n _install_subprocess()\n\n @add_global_event_processor\n def add_python_runtime_context(event, hint):\n if Hub.current.get_integration(StdlibIntegration) is not None:\n contexts = event.setdefault(\"contexts\", {})\n if isinstance(contexts, dict) and \"runtime\" not in contexts:\n contexts[\"runtime\"] = _RUNTIME_CONTEXT\n\n return event\n\n\ndef _install_httplib():\n # type: () -> None\n real_putrequest = HTTPConnection.putrequest\n real_getresponse = HTTPConnection.getresponse\n\n def putrequest(self, method, url, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return real_putrequest(self, method, url, *args, **kwargs)\n\n host = self.host\n port = self.port\n default_port = self.default_port\n\n real_url = url\n if not real_url.startswith((\"http://\", \"https://\")):\n real_url = \"%s://%s%s%s\" % (\n default_port == 443 and \"https\" or \"http\",\n host,\n port != default_port and \":%s\" % port or \"\",\n url,\n )\n\n recorder = record_http_request(hub, real_url, method)\n data_dict = recorder.__enter__()\n\n try:\n rv = real_putrequest(self, method, url, *args, **kwargs)\n\n for key, value in hub.iter_trace_propagation_headers():\n self.putheader(key, value)\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n\n self._sentrysdk_recorder = recorder\n self._sentrysdk_data_dict = data_dict\n\n return rv\n\n def getresponse(self, *args, **kwargs):\n recorder = getattr(self, \"_sentrysdk_recorder\", None)\n\n if recorder is None:\n return real_getresponse(self, *args, **kwargs)\n\n data_dict = getattr(self, \"_sentrysdk_data_dict\", None)\n\n try:\n rv = real_getresponse(self, *args, **kwargs)\n\n if data_dict is not None:\n data_dict[\"httplib_response\"] = rv\n data_dict[\"status_code\"] = rv.status\n data_dict[\"reason\"] = rv.reason\n except TypeError:\n # python-requests provokes a typeerror to discover py3 vs py2 differences\n #\n # > TypeError(\"getresponse() got an unexpected keyword argument 'buffering'\")\n raise\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n else:\n recorder.__exit__(None, None, None)\n\n return rv\n\n HTTPConnection.putrequest = putrequest\n HTTPConnection.getresponse = getresponse\n\n\ndef _init_argument(args, kwargs, name, position, setdefault_callback=None):\n \"\"\"\n given (*args, **kwargs) of a function call, retrieve (and optionally set a\n default for) an argument by either name or position.\n\n This is useful for wrapping functions with complex type signatures and\n extracting a few arguments without needing to redefine that function's\n entire type signature.\n \"\"\"\n\n if name in kwargs:\n rv = kwargs[name]\n if setdefault_callback is not None:\n rv = setdefault_callback(rv)\n if rv is not None:\n kwargs[name] = rv\n elif position < len(args):\n rv = args[position]\n if setdefault_callback is not None:\n rv = setdefault_callback(rv)\n if rv is not None:\n args[position] = rv\n else:\n rv = setdefault_callback and setdefault_callback(None)\n if rv is not None:\n kwargs[name] = rv\n\n return rv\n\n\ndef _install_subprocess():\n old_popen_init = subprocess.Popen.__init__\n\n def sentry_patched_popen_init(self, *a, **kw):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return old_popen_init(self, *a, **kw)\n\n # Convert from tuple to list to be able to set values.\n a = list(a)\n\n args = _init_argument(a, kw, \"args\", 0) or []\n cwd = _init_argument(a, kw, \"cwd\", 9)\n\n # if args is not a list or tuple (and e.g. some iterator instead),\n # let's not use it at all. There are too many things that can go wrong\n # when trying to collect an iterator into a list and setting that list\n # into `a` again.\n #\n # Also invocations where `args` is not a sequence are not actually\n # legal. They just happen to work under CPython.\n description = None\n\n if isinstance(args, (list, tuple)) and len(args) < 100:\n with capture_internal_exceptions():\n description = \" \".join(map(str, args))\n\n if description is None:\n description = safe_repr(args)\n\n env = None\n\n for k, v in hub.iter_trace_propagation_headers():\n if env is None:\n env = _init_argument(a, kw, \"env\", 10, lambda x: dict(x or os.environ))\n env[\"SUBPROCESS_\" + k.upper().replace(\"-\", \"_\")] = v\n\n with hub.span(op=\"subprocess\", description=description) as span:\n span.set_data(\"subprocess.cwd\", cwd)\n\n return old_popen_init(self, *a, **kw)\n\n subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore\n\n\ndef get_subprocess_traceparent_headers():\n return EnvironHeaders(os.environ, prefix=\"SUBPROCESS_\")\n", "path": "sentry_sdk/integrations/stdlib.py"}]}
2,586
864
gh_patches_debug_54451
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-2766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] `adjoint(adjoint(op))` returns identity ### Expected behavior When queuing `adjoint(adjoint(op))` in a tape, op should be added ### Actual behavior op and adjoint(op) are queued ### Additional information Here is a simple example, where the original circuit contains an adjoint and is taken to a new circuit that executes the original and its adjoint, Since `adjoint(adjoint())` becomes identity, the original circuit is restored (instead of the identity, as expected: ```python with qml.tape.QuantumTape() as circuit: qml.adjoint(qml.RX(0.5, wires=0)) qml.expval(qml.PauliZ(0)) base_ops = circuit.expand().copy(copy_operations=True).operations new_list_of_ops = [op for op in base_ops] new_list_of_ops += [adjoint(op) for op in base_ops[::-1]] with QuantumTape() as new_circuit: for op in new_list_of_ops: apply(op) for meas in circuit.measurements: apply(meas) dev = qml.device("default.qubit", wires=range(2)) >>> print(qml.execute([circuit], dev, gradient_fn=None), qml.execute([new_circuit], dev, gradient_fn=None)) [array([0.87758256])] [array([0.87758256])] ``` But the latter should yield 1 since it is supposed to be the identity. ### Source code _No response_ ### Tracebacks _No response_ ### System information ```shell 0.24.0.dev0 ``` ### Existing GitHub issues - [X] I have searched existing GitHub issues to make sure the issue does not already exist. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pennylane/ops/op_math/adjoint_class.py` Content: ``` 1 # Copyright 2018-2022 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This submodule defines the symbolic operation that indicates the adjoint of an operator. 16 """ 17 from pennylane.operation import Operator, Operation, AdjointUndefinedError, Observable 18 from pennylane.queuing import QueuingContext 19 from pennylane.math import transpose, conj 20 21 22 # pylint: disable=no-member 23 class AdjointOperation(Operation): 24 """This mixin class is dynamically added to an ``Adjoint`` instance if the provided base class is an ``Operation``. 25 26 .. warning:: 27 This mixin class should never be initialized independent of ``Adjoint``. 28 29 Overriding the dunder method ``__new__`` in ``Adjoint`` allows us to customize the creation of an instance and dynamically 30 add in parent classes. 31 32 .. note:: Once the ``Operation`` class does not contain any unique logic any more, this mixin class can be removed. 33 """ 34 35 # This inverse behavior only needs to temporarily patch behavior until in-place inversion is removed. 36 37 @property 38 def _inverse(self): 39 return self.base._inverse # pylint: disable=protected-access 40 41 @_inverse.setter 42 def _inverse(self, boolean): 43 self.base._inverse = boolean # pylint: disable=protected-access 44 # refresh name as base_name got updated. 45 self._name = f"Adjoint({self.base.name})" 46 47 def inv(self): 48 self.base.inv() 49 # refresh name as base_name got updated. 50 self._name = f"Adjoint({self.base.name})" 51 return self 52 53 @property 54 def base_name(self): 55 return self._name 56 57 @property 58 def name(self): 59 return self._name 60 61 # pylint: disable=missing-function-docstring 62 @property 63 def basis(self): 64 return self.base.basis 65 66 @property 67 def control_wires(self): 68 return self.base.control_wires 69 70 def single_qubit_rot_angles(self): 71 omega, theta, phi = self.base.single_qubit_rot_angles() 72 return [-phi, -theta, -omega] 73 74 @property 75 def grad_method(self): 76 return self.base.grad_method 77 78 # pylint: disable=missing-function-docstring 79 @property 80 def grad_recipe(self): 81 return self.base.grad_recipe 82 83 def get_parameter_shift(self, idx): 84 return self.base.get_parameter_shift(idx) 85 86 @property 87 def parameter_frequencies(self): 88 return self.base.parameter_frequencies 89 90 def generator(self): 91 return -1.0 * self.base.generator() 92 93 94 # pylint: disable=too-many-public-methods 95 class Adjoint(Operator): 96 """ 97 The Adjoint of an operator. 98 99 Args: 100 base (~.operation.Operator): The operator that is adjointed. 101 102 .. seealso:: :func:`~.adjoint`, :meth:`~.operation.Operator.adjoint` 103 104 This is a *developer*-facing class, and the :func:`~.adjoint` transform should be used to construct instances 105 of this class. 106 107 **Example** 108 109 >>> op = Adjoint(qml.S(0)) 110 >>> op.name 111 'Adjoint(S)' 112 >>> qml.matrix(op) 113 array([[1.-0.j, 0.-0.j], 114 [0.-0.j, 0.-1.j]]) 115 >>> qml.generator(Adjoint(qml.RX(1.0, wires=0))) 116 (PauliX(wires=[0]), 0.5) 117 >>> Adjoint(qml.RX(1.234, wires=0)).data 118 [1.234] 119 120 .. details:: 121 :title: Developer Details 122 123 This class mixes in parent classes based on the inheritance tree of the provided ``Operator``. For example, when 124 provided an ``Operation``, the instance will inherit from ``Operation`` and the ``AdjointOperation`` mixin. 125 126 >>> op = Adjoint(qml.RX(1.234, wires=0)) 127 >>> isinstance(op, qml.operation.Operation) 128 True 129 >>> isinstance(op, AdjointOperation) 130 True 131 >>> op.grad_method 132 'A' 133 134 If the base class is an ``Observable`` instead, the ``Adjoint`` will be an ``Observable`` as well. 135 136 >>> op = Adjoint(1.0 * qml.PauliX(0)) 137 >>> isinstance(op, qml.operation.Observable) 138 True 139 >>> isinstance(op, qml.operation.Operation) 140 False 141 >>> Adjoint(qml.PauliX(0)) @ qml.PauliY(1) 142 Adjoint(PauliX)(wires=[0]) @ PauliY(wires=[1]) 143 144 """ 145 146 _operation_type = None # type if base inherits from operation and not observable 147 _operation_observable_type = None # type if base inherits from both operation and observable 148 _observable_type = None # type if base inherits from observable and not operation 149 150 # pylint: disable=unused-argument 151 def __new__(cls, base=None, do_queue=True, id=None): 152 """Mixes in parents based on inheritance structure of base. 153 154 Though all the types will be named "Adjoint", their *identity* and location in memory will be different 155 based on ``base``'s inheritance. We cache the different types in private class variables so that: 156 157 >>> Adjoint(op).__class__ is Adjoint(op).__class__ 158 True 159 >>> type(Adjoint(op)) == type(Adjoint(op)) 160 True 161 >>> Adjoint(qml.RX(1.2, wires=0)).__class__ is Adjoint._operation_type 162 True 163 >>> Adjoint(qml.PauliX(0)).__class__ is Adjoint._operation_observable_type 164 True 165 166 """ 167 168 if isinstance(base, Operation): 169 if isinstance(base, Observable): 170 if cls._operation_observable_type is None: 171 class_bases = (AdjointOperation, Adjoint, Observable, Operation) 172 cls._operation_observable_type = type( 173 "Adjoint", class_bases, dict(cls.__dict__) 174 ) 175 return object.__new__(cls._operation_observable_type) 176 177 # not an observable 178 if cls._operation_type is None: 179 class_bases = (AdjointOperation, Adjoint, Operation) 180 cls._operation_type = type("Adjoint", class_bases, dict(cls.__dict__)) 181 return object.__new__(cls._operation_type) 182 183 if isinstance(base, Observable): 184 if cls._observable_type is None: 185 class_bases = (Adjoint, Observable) 186 cls._observable_type = type("Adjoint", class_bases, dict(cls.__dict__)) 187 return object.__new__(cls._observable_type) 188 189 return object.__new__(Adjoint) 190 191 # pylint: disable=attribute-defined-outside-init 192 def __copy__(self): 193 # this method needs to be overwritten becuase the base must be copied too. 194 copied_op = object.__new__(type(self)) 195 # copied_op must maintain inheritance structure of self 196 # For example, it must keep AdjointOperation if self has it 197 # this way preserves inheritance structure 198 199 copied_base = self.base.__copy__() 200 copied_op._hyperparameters = {"base": copied_base} 201 for attr, value in vars(self).items(): 202 if attr not in {"data", "base", "_hyperparameters"}: 203 setattr(copied_op, attr, value) 204 205 return copied_op 206 207 # pylint: disable=super-init-not-called 208 def __init__(self, base=None, do_queue=True, id=None): 209 self.hyperparameters["base"] = base 210 self._id = id 211 self.queue_idx = None 212 213 self._name = f"Adjoint({self.base.name})" 214 215 if do_queue: 216 self.queue() 217 218 @property 219 def base(self): 220 """The operator that is adjointed.""" 221 return self.hyperparameters["base"] 222 223 @property 224 def data(self): 225 """Trainable parameters that the operator depends on.""" 226 return self.base.data 227 228 @data.setter 229 def data(self, new_data): 230 """Allows us to set base operation parameters.""" 231 self.base.data = new_data 232 233 @property 234 def parameters(self): 235 return self.base.parameters 236 237 @property 238 def num_params(self): 239 return self.base.num_params 240 241 @property 242 def wires(self): 243 return self.base.wires 244 245 # pylint: disable=protected-access 246 @property 247 def _wires(self): 248 return self.base._wires 249 250 # pylint: disable=protected-access 251 @_wires.setter 252 def _wires(self, new_wires): 253 # we should have a better way of updating wires than accessing a private attribute. 254 self.base._wires = new_wires 255 256 @property 257 def num_wires(self): 258 return self.base.num_wires 259 260 @property 261 def batch_size(self): 262 return self.base.batch_size 263 264 @property 265 def ndim_params(self): 266 return self.base.ndim_params 267 268 @property 269 def is_hermitian(self): 270 return self.base.is_hermitian 271 272 def queue(self, context=QueuingContext): 273 context.safe_update_info(self.base, owner=self) 274 context.append(self, owns=self.base) 275 276 return self 277 278 def label(self, decimals=None, base_label=None, cache=None): 279 return self.base.label(decimals, base_label, cache=cache) + "†" 280 281 # pylint: disable=arguments-differ 282 @staticmethod 283 def compute_matrix(*params, base=None): 284 base_matrix = base.compute_matrix(*params, **base.hyperparameters) 285 return transpose(conj(base_matrix)) 286 287 def decomposition(self): 288 try: 289 return [self.base.adjoint()] 290 except AdjointUndefinedError: 291 base_decomp = self.base.decomposition() 292 return [Adjoint(op) for op in reversed(base_decomp)] 293 294 # pylint: disable=arguments-differ 295 @staticmethod 296 def compute_sparse_matrix(*params, base=None): 297 base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters) 298 return transpose(conj(base_matrix)).tocsr() 299 300 def eigvals(self): 301 # Cannot define ``compute_eigvals`` because Hermitian only defines ``eigvals`` 302 return conj(self.base.eigvals()) 303 304 def diagonalizing_gates(self): 305 return self.base.diagonalizing_gates() 306 307 # pylint: disable=arguments-renamed, invalid-overridden-method 308 @property 309 def has_matrix(self): 310 return self.base.has_matrix 311 312 def adjoint(self): 313 return self.base 314 315 @property 316 def _queue_category(self): 317 """Used for sorting objects into their respective lists in `QuantumTape` objects. 318 319 This property is a temporary solution that should not exist long-term and should not be 320 used outside of ``QuantumTape._process_queue``. 321 322 Returns ``_queue_cateogory`` for base operator. 323 """ 324 return self.base._queue_category # pylint: disable=protected-access 325 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pennylane/ops/op_math/adjoint_class.py b/pennylane/ops/op_math/adjoint_class.py --- a/pennylane/ops/op_math/adjoint_class.py +++ b/pennylane/ops/op_math/adjoint_class.py @@ -310,7 +310,7 @@ return self.base.has_matrix def adjoint(self): - return self.base + return self.base.queue() @property def _queue_category(self):
{"golden_diff": "diff --git a/pennylane/ops/op_math/adjoint_class.py b/pennylane/ops/op_math/adjoint_class.py\n--- a/pennylane/ops/op_math/adjoint_class.py\n+++ b/pennylane/ops/op_math/adjoint_class.py\n@@ -310,7 +310,7 @@\n return self.base.has_matrix\n \n def adjoint(self):\n- return self.base\n+ return self.base.queue()\n \n @property\n def _queue_category(self):\n", "issue": "[BUG] `adjoint(adjoint(op))` returns identity\n### Expected behavior\n\nWhen queuing `adjoint(adjoint(op))` in a tape, op should be added\n\n### Actual behavior\n\nop and adjoint(op) are queued\n\n### Additional information\n\nHere is a simple example, where the original circuit contains an adjoint and is taken to a new circuit that executes the original and its adjoint, Since `adjoint(adjoint())` becomes identity, the original circuit is restored (instead of the identity, as expected:\r\n\r\n```python\r\nwith qml.tape.QuantumTape() as circuit:\r\n qml.adjoint(qml.RX(0.5, wires=0))\r\n qml.expval(qml.PauliZ(0))\r\n\r\nbase_ops = circuit.expand().copy(copy_operations=True).operations\r\n\r\nnew_list_of_ops = [op for op in base_ops]\r\nnew_list_of_ops += [adjoint(op) for op in base_ops[::-1]]\r\n\r\nwith QuantumTape() as new_circuit:\r\n for op in new_list_of_ops:\r\n apply(op)\r\n\r\n for meas in circuit.measurements:\r\n apply(meas)\r\n\r\ndev = qml.device(\"default.qubit\", wires=range(2))\r\n>>> print(qml.execute([circuit], dev, gradient_fn=None), qml.execute([new_circuit], dev, gradient_fn=None))\r\n[array([0.87758256])] [array([0.87758256])]\r\n```\r\nBut the latter should yield 1 since it is supposed to be the identity.\n\n### Source code\n\n_No response_\n\n### Tracebacks\n\n_No response_\n\n### System information\n\n```shell\n0.24.0.dev0\n```\n\n\n### Existing GitHub issues\n\n- [X] I have searched existing GitHub issues to make sure the issue does not already exist.\n", "before_files": [{"content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule defines the symbolic operation that indicates the adjoint of an operator.\n\"\"\"\nfrom pennylane.operation import Operator, Operation, AdjointUndefinedError, Observable\nfrom pennylane.queuing import QueuingContext\nfrom pennylane.math import transpose, conj\n\n\n# pylint: disable=no-member\nclass AdjointOperation(Operation):\n \"\"\"This mixin class is dynamically added to an ``Adjoint`` instance if the provided base class is an ``Operation``.\n\n .. warning::\n This mixin class should never be initialized independent of ``Adjoint``.\n\n Overriding the dunder method ``__new__`` in ``Adjoint`` allows us to customize the creation of an instance and dynamically\n add in parent classes.\n\n .. note:: Once the ``Operation`` class does not contain any unique logic any more, this mixin class can be removed.\n \"\"\"\n\n # This inverse behavior only needs to temporarily patch behavior until in-place inversion is removed.\n\n @property\n def _inverse(self):\n return self.base._inverse # pylint: disable=protected-access\n\n @_inverse.setter\n def _inverse(self, boolean):\n self.base._inverse = boolean # pylint: disable=protected-access\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n\n def inv(self):\n self.base.inv()\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n return self\n\n @property\n def base_name(self):\n return self._name\n\n @property\n def name(self):\n return self._name\n\n # pylint: disable=missing-function-docstring\n @property\n def basis(self):\n return self.base.basis\n\n @property\n def control_wires(self):\n return self.base.control_wires\n\n def single_qubit_rot_angles(self):\n omega, theta, phi = self.base.single_qubit_rot_angles()\n return [-phi, -theta, -omega]\n\n @property\n def grad_method(self):\n return self.base.grad_method\n\n # pylint: disable=missing-function-docstring\n @property\n def grad_recipe(self):\n return self.base.grad_recipe\n\n def get_parameter_shift(self, idx):\n return self.base.get_parameter_shift(idx)\n\n @property\n def parameter_frequencies(self):\n return self.base.parameter_frequencies\n\n def generator(self):\n return -1.0 * self.base.generator()\n\n\n# pylint: disable=too-many-public-methods\nclass Adjoint(Operator):\n \"\"\"\n The Adjoint of an operator.\n\n Args:\n base (~.operation.Operator): The operator that is adjointed.\n\n .. seealso:: :func:`~.adjoint`, :meth:`~.operation.Operator.adjoint`\n\n This is a *developer*-facing class, and the :func:`~.adjoint` transform should be used to construct instances\n of this class.\n\n **Example**\n\n >>> op = Adjoint(qml.S(0))\n >>> op.name\n 'Adjoint(S)'\n >>> qml.matrix(op)\n array([[1.-0.j, 0.-0.j],\n [0.-0.j, 0.-1.j]])\n >>> qml.generator(Adjoint(qml.RX(1.0, wires=0)))\n (PauliX(wires=[0]), 0.5)\n >>> Adjoint(qml.RX(1.234, wires=0)).data\n [1.234]\n\n .. details::\n :title: Developer Details\n\n This class mixes in parent classes based on the inheritance tree of the provided ``Operator``. For example, when\n provided an ``Operation``, the instance will inherit from ``Operation`` and the ``AdjointOperation`` mixin.\n\n >>> op = Adjoint(qml.RX(1.234, wires=0))\n >>> isinstance(op, qml.operation.Operation)\n True\n >>> isinstance(op, AdjointOperation)\n True\n >>> op.grad_method\n 'A'\n\n If the base class is an ``Observable`` instead, the ``Adjoint`` will be an ``Observable`` as well.\n\n >>> op = Adjoint(1.0 * qml.PauliX(0))\n >>> isinstance(op, qml.operation.Observable)\n True\n >>> isinstance(op, qml.operation.Operation)\n False\n >>> Adjoint(qml.PauliX(0)) @ qml.PauliY(1)\n Adjoint(PauliX)(wires=[0]) @ PauliY(wires=[1])\n\n \"\"\"\n\n _operation_type = None # type if base inherits from operation and not observable\n _operation_observable_type = None # type if base inherits from both operation and observable\n _observable_type = None # type if base inherits from observable and not operation\n\n # pylint: disable=unused-argument\n def __new__(cls, base=None, do_queue=True, id=None):\n \"\"\"Mixes in parents based on inheritance structure of base.\n\n Though all the types will be named \"Adjoint\", their *identity* and location in memory will be different\n based on ``base``'s inheritance. We cache the different types in private class variables so that:\n\n >>> Adjoint(op).__class__ is Adjoint(op).__class__\n True\n >>> type(Adjoint(op)) == type(Adjoint(op))\n True\n >>> Adjoint(qml.RX(1.2, wires=0)).__class__ is Adjoint._operation_type\n True\n >>> Adjoint(qml.PauliX(0)).__class__ is Adjoint._operation_observable_type\n True\n\n \"\"\"\n\n if isinstance(base, Operation):\n if isinstance(base, Observable):\n if cls._operation_observable_type is None:\n class_bases = (AdjointOperation, Adjoint, Observable, Operation)\n cls._operation_observable_type = type(\n \"Adjoint\", class_bases, dict(cls.__dict__)\n )\n return object.__new__(cls._operation_observable_type)\n\n # not an observable\n if cls._operation_type is None:\n class_bases = (AdjointOperation, Adjoint, Operation)\n cls._operation_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._operation_type)\n\n if isinstance(base, Observable):\n if cls._observable_type is None:\n class_bases = (Adjoint, Observable)\n cls._observable_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._observable_type)\n\n return object.__new__(Adjoint)\n\n # pylint: disable=attribute-defined-outside-init\n def __copy__(self):\n # this method needs to be overwritten becuase the base must be copied too.\n copied_op = object.__new__(type(self))\n # copied_op must maintain inheritance structure of self\n # For example, it must keep AdjointOperation if self has it\n # this way preserves inheritance structure\n\n copied_base = self.base.__copy__()\n copied_op._hyperparameters = {\"base\": copied_base}\n for attr, value in vars(self).items():\n if attr not in {\"data\", \"base\", \"_hyperparameters\"}:\n setattr(copied_op, attr, value)\n\n return copied_op\n\n # pylint: disable=super-init-not-called\n def __init__(self, base=None, do_queue=True, id=None):\n self.hyperparameters[\"base\"] = base\n self._id = id\n self.queue_idx = None\n\n self._name = f\"Adjoint({self.base.name})\"\n\n if do_queue:\n self.queue()\n\n @property\n def base(self):\n \"\"\"The operator that is adjointed.\"\"\"\n return self.hyperparameters[\"base\"]\n\n @property\n def data(self):\n \"\"\"Trainable parameters that the operator depends on.\"\"\"\n return self.base.data\n\n @data.setter\n def data(self, new_data):\n \"\"\"Allows us to set base operation parameters.\"\"\"\n self.base.data = new_data\n\n @property\n def parameters(self):\n return self.base.parameters\n\n @property\n def num_params(self):\n return self.base.num_params\n\n @property\n def wires(self):\n return self.base.wires\n\n # pylint: disable=protected-access\n @property\n def _wires(self):\n return self.base._wires\n\n # pylint: disable=protected-access\n @_wires.setter\n def _wires(self, new_wires):\n # we should have a better way of updating wires than accessing a private attribute.\n self.base._wires = new_wires\n\n @property\n def num_wires(self):\n return self.base.num_wires\n\n @property\n def batch_size(self):\n return self.base.batch_size\n\n @property\n def ndim_params(self):\n return self.base.ndim_params\n\n @property\n def is_hermitian(self):\n return self.base.is_hermitian\n\n def queue(self, context=QueuingContext):\n context.safe_update_info(self.base, owner=self)\n context.append(self, owns=self.base)\n\n return self\n\n def label(self, decimals=None, base_label=None, cache=None):\n return self.base.label(decimals, base_label, cache=cache) + \"\u2020\"\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_matrix(*params, base=None):\n base_matrix = base.compute_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix))\n\n def decomposition(self):\n try:\n return [self.base.adjoint()]\n except AdjointUndefinedError:\n base_decomp = self.base.decomposition()\n return [Adjoint(op) for op in reversed(base_decomp)]\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_sparse_matrix(*params, base=None):\n base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix)).tocsr()\n\n def eigvals(self):\n # Cannot define ``compute_eigvals`` because Hermitian only defines ``eigvals``\n return conj(self.base.eigvals())\n\n def diagonalizing_gates(self):\n return self.base.diagonalizing_gates()\n\n # pylint: disable=arguments-renamed, invalid-overridden-method\n @property\n def has_matrix(self):\n return self.base.has_matrix\n\n def adjoint(self):\n return self.base\n\n @property\n def _queue_category(self):\n \"\"\"Used for sorting objects into their respective lists in `QuantumTape` objects.\n\n This property is a temporary solution that should not exist long-term and should not be\n used outside of ``QuantumTape._process_queue``.\n\n Returns ``_queue_cateogory`` for base operator.\n \"\"\"\n return self.base._queue_category # pylint: disable=protected-access\n", "path": "pennylane/ops/op_math/adjoint_class.py"}], "after_files": [{"content": "# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule defines the symbolic operation that indicates the adjoint of an operator.\n\"\"\"\nfrom pennylane.operation import Operator, Operation, AdjointUndefinedError, Observable\nfrom pennylane.queuing import QueuingContext\nfrom pennylane.math import transpose, conj\n\n\n# pylint: disable=no-member\nclass AdjointOperation(Operation):\n \"\"\"This mixin class is dynamically added to an ``Adjoint`` instance if the provided base class is an ``Operation``.\n\n .. warning::\n This mixin class should never be initialized independent of ``Adjoint``.\n\n Overriding the dunder method ``__new__`` in ``Adjoint`` allows us to customize the creation of an instance and dynamically\n add in parent classes.\n\n .. note:: Once the ``Operation`` class does not contain any unique logic any more, this mixin class can be removed.\n \"\"\"\n\n # This inverse behavior only needs to temporarily patch behavior until in-place inversion is removed.\n\n @property\n def _inverse(self):\n return self.base._inverse # pylint: disable=protected-access\n\n @_inverse.setter\n def _inverse(self, boolean):\n self.base._inverse = boolean # pylint: disable=protected-access\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n\n def inv(self):\n self.base.inv()\n # refresh name as base_name got updated.\n self._name = f\"Adjoint({self.base.name})\"\n return self\n\n @property\n def base_name(self):\n return self._name\n\n @property\n def name(self):\n return self._name\n\n # pylint: disable=missing-function-docstring\n @property\n def basis(self):\n return self.base.basis\n\n @property\n def control_wires(self):\n return self.base.control_wires\n\n def single_qubit_rot_angles(self):\n omega, theta, phi = self.base.single_qubit_rot_angles()\n return [-phi, -theta, -omega]\n\n @property\n def grad_method(self):\n return self.base.grad_method\n\n # pylint: disable=missing-function-docstring\n @property\n def grad_recipe(self):\n return self.base.grad_recipe\n\n def get_parameter_shift(self, idx):\n return self.base.get_parameter_shift(idx)\n\n @property\n def parameter_frequencies(self):\n return self.base.parameter_frequencies\n\n def generator(self):\n return -1.0 * self.base.generator()\n\n\n# pylint: disable=too-many-public-methods\nclass Adjoint(Operator):\n \"\"\"\n The Adjoint of an operator.\n\n Args:\n base (~.operation.Operator): The operator that is adjointed.\n\n .. seealso:: :func:`~.adjoint`, :meth:`~.operation.Operator.adjoint`\n\n This is a *developer*-facing class, and the :func:`~.adjoint` transform should be used to construct instances\n of this class.\n\n **Example**\n\n >>> op = Adjoint(qml.S(0))\n >>> op.name\n 'Adjoint(S)'\n >>> qml.matrix(op)\n array([[1.-0.j, 0.-0.j],\n [0.-0.j, 0.-1.j]])\n >>> qml.generator(Adjoint(qml.RX(1.0, wires=0)))\n (PauliX(wires=[0]), 0.5)\n >>> Adjoint(qml.RX(1.234, wires=0)).data\n [1.234]\n\n .. details::\n :title: Developer Details\n\n This class mixes in parent classes based on the inheritance tree of the provided ``Operator``. For example, when\n provided an ``Operation``, the instance will inherit from ``Operation`` and the ``AdjointOperation`` mixin.\n\n >>> op = Adjoint(qml.RX(1.234, wires=0))\n >>> isinstance(op, qml.operation.Operation)\n True\n >>> isinstance(op, AdjointOperation)\n True\n >>> op.grad_method\n 'A'\n\n If the base class is an ``Observable`` instead, the ``Adjoint`` will be an ``Observable`` as well.\n\n >>> op = Adjoint(1.0 * qml.PauliX(0))\n >>> isinstance(op, qml.operation.Observable)\n True\n >>> isinstance(op, qml.operation.Operation)\n False\n >>> Adjoint(qml.PauliX(0)) @ qml.PauliY(1)\n Adjoint(PauliX)(wires=[0]) @ PauliY(wires=[1])\n\n \"\"\"\n\n _operation_type = None # type if base inherits from operation and not observable\n _operation_observable_type = None # type if base inherits from both operation and observable\n _observable_type = None # type if base inherits from observable and not operation\n\n # pylint: disable=unused-argument\n def __new__(cls, base=None, do_queue=True, id=None):\n \"\"\"Mixes in parents based on inheritance structure of base.\n\n Though all the types will be named \"Adjoint\", their *identity* and location in memory will be different\n based on ``base``'s inheritance. We cache the different types in private class variables so that:\n\n >>> Adjoint(op).__class__ is Adjoint(op).__class__\n True\n >>> type(Adjoint(op)) == type(Adjoint(op))\n True\n >>> Adjoint(qml.RX(1.2, wires=0)).__class__ is Adjoint._operation_type\n True\n >>> Adjoint(qml.PauliX(0)).__class__ is Adjoint._operation_observable_type\n True\n\n \"\"\"\n\n if isinstance(base, Operation):\n if isinstance(base, Observable):\n if cls._operation_observable_type is None:\n class_bases = (AdjointOperation, Adjoint, Observable, Operation)\n cls._operation_observable_type = type(\n \"Adjoint\", class_bases, dict(cls.__dict__)\n )\n return object.__new__(cls._operation_observable_type)\n\n # not an observable\n if cls._operation_type is None:\n class_bases = (AdjointOperation, Adjoint, Operation)\n cls._operation_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._operation_type)\n\n if isinstance(base, Observable):\n if cls._observable_type is None:\n class_bases = (Adjoint, Observable)\n cls._observable_type = type(\"Adjoint\", class_bases, dict(cls.__dict__))\n return object.__new__(cls._observable_type)\n\n return object.__new__(Adjoint)\n\n # pylint: disable=attribute-defined-outside-init\n def __copy__(self):\n # this method needs to be overwritten becuase the base must be copied too.\n copied_op = object.__new__(type(self))\n # copied_op must maintain inheritance structure of self\n # For example, it must keep AdjointOperation if self has it\n # this way preserves inheritance structure\n\n copied_base = self.base.__copy__()\n copied_op._hyperparameters = {\"base\": copied_base}\n for attr, value in vars(self).items():\n if attr not in {\"data\", \"base\", \"_hyperparameters\"}:\n setattr(copied_op, attr, value)\n\n return copied_op\n\n # pylint: disable=super-init-not-called\n def __init__(self, base=None, do_queue=True, id=None):\n self.hyperparameters[\"base\"] = base\n self._id = id\n self.queue_idx = None\n\n self._name = f\"Adjoint({self.base.name})\"\n\n if do_queue:\n self.queue()\n\n @property\n def base(self):\n \"\"\"The operator that is adjointed.\"\"\"\n return self.hyperparameters[\"base\"]\n\n @property\n def data(self):\n \"\"\"Trainable parameters that the operator depends on.\"\"\"\n return self.base.data\n\n @data.setter\n def data(self, new_data):\n \"\"\"Allows us to set base operation parameters.\"\"\"\n self.base.data = new_data\n\n @property\n def parameters(self):\n return self.base.parameters\n\n @property\n def num_params(self):\n return self.base.num_params\n\n @property\n def wires(self):\n return self.base.wires\n\n # pylint: disable=protected-access\n @property\n def _wires(self):\n return self.base._wires\n\n # pylint: disable=protected-access\n @_wires.setter\n def _wires(self, new_wires):\n # we should have a better way of updating wires than accessing a private attribute.\n self.base._wires = new_wires\n\n @property\n def num_wires(self):\n return self.base.num_wires\n\n @property\n def batch_size(self):\n return self.base.batch_size\n\n @property\n def ndim_params(self):\n return self.base.ndim_params\n\n @property\n def is_hermitian(self):\n return self.base.is_hermitian\n\n def queue(self, context=QueuingContext):\n context.safe_update_info(self.base, owner=self)\n context.append(self, owns=self.base)\n\n return self\n\n def label(self, decimals=None, base_label=None, cache=None):\n return self.base.label(decimals, base_label, cache=cache) + \"\u2020\"\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_matrix(*params, base=None):\n base_matrix = base.compute_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix))\n\n def decomposition(self):\n try:\n return [self.base.adjoint()]\n except AdjointUndefinedError:\n base_decomp = self.base.decomposition()\n return [Adjoint(op) for op in reversed(base_decomp)]\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_sparse_matrix(*params, base=None):\n base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters)\n return transpose(conj(base_matrix)).tocsr()\n\n def eigvals(self):\n # Cannot define ``compute_eigvals`` because Hermitian only defines ``eigvals``\n return conj(self.base.eigvals())\n\n def diagonalizing_gates(self):\n return self.base.diagonalizing_gates()\n\n # pylint: disable=arguments-renamed, invalid-overridden-method\n @property\n def has_matrix(self):\n return self.base.has_matrix\n\n def adjoint(self):\n return self.base.queue()\n\n @property\n def _queue_category(self):\n \"\"\"Used for sorting objects into their respective lists in `QuantumTape` objects.\n\n This property is a temporary solution that should not exist long-term and should not be\n used outside of ``QuantumTape._process_queue``.\n\n Returns ``_queue_cateogory`` for base operator.\n \"\"\"\n return self.base._queue_category # pylint: disable=protected-access\n", "path": "pennylane/ops/op_math/adjoint_class.py"}]}
4,079
116
gh_patches_debug_33845
rasdani/github-patches
git_diff
great-expectations__great_expectations-6677
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/rule_based_profiler/helpers/cardinality_checker.py` Content: ``` 1 import abc 2 import enum 3 from dataclasses import dataclass 4 from typing import Optional, Tuple, Union 5 6 from great_expectations.core.util import convert_to_json_serializable 7 from great_expectations.exceptions import ProfilerConfigurationError 8 from great_expectations.types import SerializableDictDot 9 10 11 @dataclass(frozen=True) 12 class CardinalityLimit(abc.ABC, SerializableDictDot): 13 name: str 14 15 16 @dataclass(frozen=True) 17 class RelativeCardinalityLimit(CardinalityLimit): 18 max_proportion_unique: float 19 metric_name_defining_limit: str = "column.unique_proportion" 20 21 def to_json_dict(self) -> dict: 22 return convert_to_json_serializable( 23 { 24 "name": self.name, 25 "max_proportion_unique": self.max_proportion_unique, 26 "metric_name_defining_limit": self.metric_name_defining_limit, 27 } 28 ) 29 30 31 @dataclass(frozen=True) 32 class AbsoluteCardinalityLimit(CardinalityLimit): 33 max_unique_values: int 34 metric_name_defining_limit: str = "column.distinct_values.count" 35 36 def to_json_dict(self) -> dict: 37 return convert_to_json_serializable( 38 { 39 "name": self.name, 40 "max_proportion_unique": self.max_unique_values, 41 "metric_name_defining_limit": self.metric_name_defining_limit, 42 } 43 ) 44 45 46 class CardinalityLimitMode(enum.Enum): 47 """Preset limits based on unique values (cardinality) 48 49 Defines relative (ratio) and absolute number of records (table rows) that 50 correspond to each cardinality category. 51 52 Used to determine appropriate Expectation configurations based on data. 53 """ 54 55 ZERO = AbsoluteCardinalityLimit("ZERO", 0) 56 ONE = AbsoluteCardinalityLimit("ONE", 1) 57 TWO = AbsoluteCardinalityLimit("TWO", 2) 58 VERY_FEW = AbsoluteCardinalityLimit("VERY_FEW", 10) 59 FEW = AbsoluteCardinalityLimit("FEW", 100) 60 SOME = AbsoluteCardinalityLimit("SOME", 1000) 61 MANY = AbsoluteCardinalityLimit("MANY", 10000) 62 VERY_MANY = AbsoluteCardinalityLimit("VERY_MANY", 100000) 63 UNIQUE = RelativeCardinalityLimit("UNIQUE", 1.0) 64 ABS_10 = AbsoluteCardinalityLimit("ABS_10", 10) 65 ABS_100 = AbsoluteCardinalityLimit("ABS_100", 100) 66 ABS_1000 = AbsoluteCardinalityLimit("ABS_1000", 1000) 67 ABS_10_000 = AbsoluteCardinalityLimit("ABS_10_000", int(1e4)) 68 ABS_100_000 = AbsoluteCardinalityLimit("ABS_100_000", int(1e5)) 69 ABS_1_000_000 = AbsoluteCardinalityLimit("ABS_1_000_000", int(1e6)) 70 ABS_10_000_000 = AbsoluteCardinalityLimit("ABS_10_000_000", int(1e7)) 71 ABS_100_000_000 = AbsoluteCardinalityLimit("ABS_100_000_000", int(1e8)) 72 ABS_1_000_000_000 = AbsoluteCardinalityLimit("ABS_1_000_000_000", int(1e9)) 73 REL_0 = RelativeCardinalityLimit("REL_0", 0.0) 74 REL_001 = RelativeCardinalityLimit("REL_001", 1e-5) 75 REL_01 = RelativeCardinalityLimit("REL_01", 1e-4) 76 REL_0_1 = RelativeCardinalityLimit("REL_0_1", 1e-3) 77 REL_1 = RelativeCardinalityLimit("REL_1", 1e-2) 78 REL_10 = RelativeCardinalityLimit("REL_10", 0.10) 79 REL_25 = RelativeCardinalityLimit("REL_25", 0.25) 80 REL_50 = RelativeCardinalityLimit("REL_50", 0.50) 81 REL_75 = RelativeCardinalityLimit("REL_75", 0.75) 82 REL_100 = RelativeCardinalityLimit("REL_100", 1.0) 83 ONE_PCT = RelativeCardinalityLimit("ONE_PCT", 0.01) 84 TEN_PCT = RelativeCardinalityLimit("TEN_PCT", 0.10) 85 86 87 class CardinalityChecker: 88 """Handles cardinality checking given cardinality limit mode and measured value. 89 90 This class also validates cardinality limit settings and converts from 91 various types of settings. You can choose one of the attributes listed 92 below to create an instance. 93 94 Attributes: 95 cardinality_limit_mode: CardinalityLimitMode or string name of the mode 96 defining the maximum allowable cardinality. 97 max_unique_values: number of max unique rows for a custom 98 cardinality limit. 99 max_proportion_unique: proportion of unique values for a 100 custom cardinality limit. 101 """ 102 103 SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES: Tuple[ 104 Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit] 105 ] = ( 106 AbsoluteCardinalityLimit, 107 RelativeCardinalityLimit, 108 ) 109 SUPPORTED_LIMIT_MODE_CLASS_NAMES: Tuple[str] = ( 110 mode.__name__ for mode in SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES 111 ) 112 SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS: Tuple[str] = ( 113 mode.name for mode in CardinalityLimitMode 114 ) 115 116 def __init__( 117 self, 118 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None, 119 max_unique_values: Optional[int] = None, 120 max_proportion_unique: Optional[float] = None, 121 ) -> None: 122 self._cardinality_limit_mode = self._convert_to_cardinality_limit_mode( 123 cardinality_limit_mode=cardinality_limit_mode, 124 max_unique_values=max_unique_values, 125 max_proportion_unique=max_proportion_unique, 126 ) 127 128 @property 129 def cardinality_limit_mode( 130 self, 131 ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]: 132 return self._cardinality_limit_mode 133 134 def cardinality_within_limit(self, metric_value: float) -> bool: 135 """Determine if the cardinality is within configured limit. 136 137 The metric_value supplied should be either a proportion of unique values 138 or number of unique values based on the configured cardinality limit. 139 140 Args: 141 metric_value: int if number of unique values, float if proportion 142 of unique values. 143 144 Returns: 145 Boolean of whether the cardinality is within the configured limit 146 """ 147 self._validate_metric_value(metric_value=metric_value) 148 if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit): 149 return metric_value <= self._cardinality_limit_mode.max_unique_values 150 elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit): 151 return metric_value <= self._cardinality_limit_mode.max_proportion_unique 152 153 @staticmethod 154 def _validate_metric_value(metric_value: float) -> None: 155 if not isinstance(metric_value, (int, float)): 156 raise ProfilerConfigurationError( 157 f"Value of measured cardinality must be of type int or float, you provided {type(metric_value)}" 158 ) 159 160 if metric_value < 0.00: 161 raise ProfilerConfigurationError( 162 f"Value of cardinality (number of rows or percent unique) should be greater than 0.00, your value is {metric_value}" 163 ) 164 165 @staticmethod 166 def _convert_to_cardinality_limit_mode( 167 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None, 168 max_unique_values: Optional[int] = None, 169 max_proportion_unique: Optional[float] = None, 170 ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]: 171 validate_input_parameters( 172 cardinality_limit_mode=cardinality_limit_mode, 173 max_unique_values=max_unique_values, 174 max_proportion_unique=max_proportion_unique, 175 ) 176 177 if cardinality_limit_mode is not None: 178 if isinstance(cardinality_limit_mode, str): 179 try: 180 return CardinalityLimitMode[cardinality_limit_mode.upper()].value 181 except KeyError: 182 raise ProfilerConfigurationError( 183 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}" 184 ) 185 elif isinstance(cardinality_limit_mode, dict): 186 validate_input_parameters( 187 cardinality_limit_mode=cardinality_limit_mode.get("name"), 188 max_unique_values=cardinality_limit_mode.get("max_unique_values"), 189 max_proportion_unique=cardinality_limit_mode.get( 190 "max_proportion_unique" 191 ), 192 required_num_supplied_params=2, 193 ) 194 try: 195 return AbsoluteCardinalityLimit( 196 name=cardinality_limit_mode["name"], 197 max_unique_values=cardinality_limit_mode["max_unique_values"], 198 metric_name_defining_limit=cardinality_limit_mode[ 199 "metric_name_defining_limit" 200 ], 201 ) 202 except (KeyError, ValueError): 203 try: 204 return RelativeCardinalityLimit( 205 name=cardinality_limit_mode["name"], 206 max_proportion_unique=cardinality_limit_mode[ 207 "max_proportion_unique" 208 ], 209 metric_name_defining_limit=cardinality_limit_mode[ 210 "metric_name_defining_limit" 211 ], 212 ) 213 except (KeyError, ValueError): 214 raise ProfilerConfigurationError( 215 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}" 216 ) 217 else: 218 return cardinality_limit_mode.value 219 220 if max_unique_values is not None: 221 return AbsoluteCardinalityLimit( 222 name=f"CUSTOM_ABS_{max_unique_values}", 223 max_unique_values=max_unique_values, 224 ) 225 226 if max_proportion_unique is not None: 227 return RelativeCardinalityLimit( 228 name=f"CUSTOM_REL_{max_proportion_unique}", 229 max_proportion_unique=max_proportion_unique, 230 ) 231 232 233 def validate_input_parameters( 234 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None, 235 max_unique_values: Optional[int] = None, 236 max_proportion_unique: Optional[int] = None, 237 required_num_supplied_params: int = 1, 238 ) -> None: 239 num_supplied_params: int = sum( 240 [ 241 0 if param is None else 1 242 for param in ( 243 cardinality_limit_mode, 244 max_unique_values, 245 max_proportion_unique, 246 ) 247 ] 248 ) 249 if num_supplied_params != required_num_supplied_params: 250 raise ProfilerConfigurationError( 251 f"Please pass ONE of the following parameters: cardinality_limit_mode, max_unique_values, max_proportion_unique, you passed {num_supplied_params} parameters." 252 ) 253 254 if cardinality_limit_mode is not None: 255 if not isinstance(cardinality_limit_mode, (str, CardinalityLimitMode, dict)): 256 raise ProfilerConfigurationError( 257 f"Please specify a supported cardinality limit type, supported classes are {','.join(CardinalityChecker.SUPPORTED_LIMIT_MODE_CLASS_NAMES)} and supported strings are {','.join(CardinalityChecker.SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS)}" 258 ) 259 260 if required_num_supplied_params == 2: 261 try: 262 return CardinalityLimitMode[cardinality_limit_mode.upper()].value 263 except KeyError: 264 raise ProfilerConfigurationError( 265 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}" 266 ) 267 268 if max_unique_values is not None: 269 if not isinstance(max_unique_values, int): 270 raise ProfilerConfigurationError( 271 f"Please specify an int, you specified a {type(max_unique_values)}" 272 ) 273 274 if max_proportion_unique is not None: 275 if not isinstance(max_proportion_unique, (float, int)): 276 raise ProfilerConfigurationError( 277 f"Please specify a float or int, you specified a {type(max_proportion_unique)}" 278 ) 279 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py --- a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py +++ b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py @@ -1,7 +1,7 @@ import abc import enum from dataclasses import dataclass -from typing import Optional, Tuple, Union +from typing import Optional, Tuple, Union, cast from great_expectations.core.util import convert_to_json_serializable from great_expectations.exceptions import ProfilerConfigurationError @@ -147,9 +147,14 @@ self._validate_metric_value(metric_value=metric_value) if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit): return metric_value <= self._cardinality_limit_mode.max_unique_values - elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit): + + if isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit): return metric_value <= self._cardinality_limit_mode.max_proportion_unique + raise ValueError( + f'Unknown "cardinality_limit_mode" mode "{self._cardinality_limit_mode}" encountered.' + ) + @staticmethod def _validate_metric_value(metric_value: float) -> None: if not isinstance(metric_value, (int, float)): @@ -215,7 +220,7 @@ f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}" ) else: - return cardinality_limit_mode.value + return cast(CardinalityLimitMode, cardinality_limit_mode).value if max_unique_values is not None: return AbsoluteCardinalityLimit(
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n--- a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n+++ b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n@@ -1,7 +1,7 @@\n import abc\n import enum\n from dataclasses import dataclass\n-from typing import Optional, Tuple, Union\n+from typing import Optional, Tuple, Union, cast\n \n from great_expectations.core.util import convert_to_json_serializable\n from great_expectations.exceptions import ProfilerConfigurationError\n@@ -147,9 +147,14 @@\n self._validate_metric_value(metric_value=metric_value)\n if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_unique_values\n- elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n+\n+ if isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_proportion_unique\n \n+ raise ValueError(\n+ f'Unknown \"cardinality_limit_mode\" mode \"{self._cardinality_limit_mode}\" encountered.'\n+ )\n+\n @staticmethod\n def _validate_metric_value(metric_value: float) -> None:\n if not isinstance(metric_value, (int, float)):\n@@ -215,7 +220,7 @@\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n else:\n- return cardinality_limit_mode.value\n+ return cast(CardinalityLimitMode, cardinality_limit_mode).value\n \n if max_unique_values is not None:\n return AbsoluteCardinalityLimit(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import abc\nimport enum\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nfrom great_expectations.core.util import convert_to_json_serializable\nfrom great_expectations.exceptions import ProfilerConfigurationError\nfrom great_expectations.types import SerializableDictDot\n\n\n@dataclass(frozen=True)\nclass CardinalityLimit(abc.ABC, SerializableDictDot):\n name: str\n\n\n@dataclass(frozen=True)\nclass RelativeCardinalityLimit(CardinalityLimit):\n max_proportion_unique: float\n metric_name_defining_limit: str = \"column.unique_proportion\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_proportion_unique,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\n@dataclass(frozen=True)\nclass AbsoluteCardinalityLimit(CardinalityLimit):\n max_unique_values: int\n metric_name_defining_limit: str = \"column.distinct_values.count\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_unique_values,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\nclass CardinalityLimitMode(enum.Enum):\n \"\"\"Preset limits based on unique values (cardinality)\n\n Defines relative (ratio) and absolute number of records (table rows) that\n correspond to each cardinality category.\n\n Used to determine appropriate Expectation configurations based on data.\n \"\"\"\n\n ZERO = AbsoluteCardinalityLimit(\"ZERO\", 0)\n ONE = AbsoluteCardinalityLimit(\"ONE\", 1)\n TWO = AbsoluteCardinalityLimit(\"TWO\", 2)\n VERY_FEW = AbsoluteCardinalityLimit(\"VERY_FEW\", 10)\n FEW = AbsoluteCardinalityLimit(\"FEW\", 100)\n SOME = AbsoluteCardinalityLimit(\"SOME\", 1000)\n MANY = AbsoluteCardinalityLimit(\"MANY\", 10000)\n VERY_MANY = AbsoluteCardinalityLimit(\"VERY_MANY\", 100000)\n UNIQUE = RelativeCardinalityLimit(\"UNIQUE\", 1.0)\n ABS_10 = AbsoluteCardinalityLimit(\"ABS_10\", 10)\n ABS_100 = AbsoluteCardinalityLimit(\"ABS_100\", 100)\n ABS_1000 = AbsoluteCardinalityLimit(\"ABS_1000\", 1000)\n ABS_10_000 = AbsoluteCardinalityLimit(\"ABS_10_000\", int(1e4))\n ABS_100_000 = AbsoluteCardinalityLimit(\"ABS_100_000\", int(1e5))\n ABS_1_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000\", int(1e6))\n ABS_10_000_000 = AbsoluteCardinalityLimit(\"ABS_10_000_000\", int(1e7))\n ABS_100_000_000 = AbsoluteCardinalityLimit(\"ABS_100_000_000\", int(1e8))\n ABS_1_000_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000_000\", int(1e9))\n REL_0 = RelativeCardinalityLimit(\"REL_0\", 0.0)\n REL_001 = RelativeCardinalityLimit(\"REL_001\", 1e-5)\n REL_01 = RelativeCardinalityLimit(\"REL_01\", 1e-4)\n REL_0_1 = RelativeCardinalityLimit(\"REL_0_1\", 1e-3)\n REL_1 = RelativeCardinalityLimit(\"REL_1\", 1e-2)\n REL_10 = RelativeCardinalityLimit(\"REL_10\", 0.10)\n REL_25 = RelativeCardinalityLimit(\"REL_25\", 0.25)\n REL_50 = RelativeCardinalityLimit(\"REL_50\", 0.50)\n REL_75 = RelativeCardinalityLimit(\"REL_75\", 0.75)\n REL_100 = RelativeCardinalityLimit(\"REL_100\", 1.0)\n ONE_PCT = RelativeCardinalityLimit(\"ONE_PCT\", 0.01)\n TEN_PCT = RelativeCardinalityLimit(\"TEN_PCT\", 0.10)\n\n\nclass CardinalityChecker:\n \"\"\"Handles cardinality checking given cardinality limit mode and measured value.\n\n This class also validates cardinality limit settings and converts from\n various types of settings. You can choose one of the attributes listed\n below to create an instance.\n\n Attributes:\n cardinality_limit_mode: CardinalityLimitMode or string name of the mode\n defining the maximum allowable cardinality.\n max_unique_values: number of max unique rows for a custom\n cardinality limit.\n max_proportion_unique: proportion of unique values for a\n custom cardinality limit.\n \"\"\"\n\n SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES: Tuple[\n Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]\n ] = (\n AbsoluteCardinalityLimit,\n RelativeCardinalityLimit,\n )\n SUPPORTED_LIMIT_MODE_CLASS_NAMES: Tuple[str] = (\n mode.__name__ for mode in SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES\n )\n SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS: Tuple[str] = (\n mode.name for mode in CardinalityLimitMode\n )\n\n def __init__(\n self,\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> None:\n self._cardinality_limit_mode = self._convert_to_cardinality_limit_mode(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n @property\n def cardinality_limit_mode(\n self,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n return self._cardinality_limit_mode\n\n def cardinality_within_limit(self, metric_value: float) -> bool:\n \"\"\"Determine if the cardinality is within configured limit.\n\n The metric_value supplied should be either a proportion of unique values\n or number of unique values based on the configured cardinality limit.\n\n Args:\n metric_value: int if number of unique values, float if proportion\n of unique values.\n\n Returns:\n Boolean of whether the cardinality is within the configured limit\n \"\"\"\n self._validate_metric_value(metric_value=metric_value)\n if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_unique_values\n elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_proportion_unique\n\n @staticmethod\n def _validate_metric_value(metric_value: float) -> None:\n if not isinstance(metric_value, (int, float)):\n raise ProfilerConfigurationError(\n f\"Value of measured cardinality must be of type int or float, you provided {type(metric_value)}\"\n )\n\n if metric_value < 0.00:\n raise ProfilerConfigurationError(\n f\"Value of cardinality (number of rows or percent unique) should be greater than 0.00, your value is {metric_value}\"\n )\n\n @staticmethod\n def _convert_to_cardinality_limit_mode(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n if cardinality_limit_mode is not None:\n if isinstance(cardinality_limit_mode, str):\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n elif isinstance(cardinality_limit_mode, dict):\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode.get(\"name\"),\n max_unique_values=cardinality_limit_mode.get(\"max_unique_values\"),\n max_proportion_unique=cardinality_limit_mode.get(\n \"max_proportion_unique\"\n ),\n required_num_supplied_params=2,\n )\n try:\n return AbsoluteCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_unique_values=cardinality_limit_mode[\"max_unique_values\"],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n try:\n return RelativeCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_proportion_unique=cardinality_limit_mode[\n \"max_proportion_unique\"\n ],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n else:\n return cardinality_limit_mode.value\n\n if max_unique_values is not None:\n return AbsoluteCardinalityLimit(\n name=f\"CUSTOM_ABS_{max_unique_values}\",\n max_unique_values=max_unique_values,\n )\n\n if max_proportion_unique is not None:\n return RelativeCardinalityLimit(\n name=f\"CUSTOM_REL_{max_proportion_unique}\",\n max_proportion_unique=max_proportion_unique,\n )\n\n\ndef validate_input_parameters(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[int] = None,\n required_num_supplied_params: int = 1,\n) -> None:\n num_supplied_params: int = sum(\n [\n 0 if param is None else 1\n for param in (\n cardinality_limit_mode,\n max_unique_values,\n max_proportion_unique,\n )\n ]\n )\n if num_supplied_params != required_num_supplied_params:\n raise ProfilerConfigurationError(\n f\"Please pass ONE of the following parameters: cardinality_limit_mode, max_unique_values, max_proportion_unique, you passed {num_supplied_params} parameters.\"\n )\n\n if cardinality_limit_mode is not None:\n if not isinstance(cardinality_limit_mode, (str, CardinalityLimitMode, dict)):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality limit type, supported classes are {','.join(CardinalityChecker.SUPPORTED_LIMIT_MODE_CLASS_NAMES)} and supported strings are {','.join(CardinalityChecker.SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS)}\"\n )\n\n if required_num_supplied_params == 2:\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n\n if max_unique_values is not None:\n if not isinstance(max_unique_values, int):\n raise ProfilerConfigurationError(\n f\"Please specify an int, you specified a {type(max_unique_values)}\"\n )\n\n if max_proportion_unique is not None:\n if not isinstance(max_proportion_unique, (float, int)):\n raise ProfilerConfigurationError(\n f\"Please specify a float or int, you specified a {type(max_proportion_unique)}\"\n )\n", "path": "great_expectations/rule_based_profiler/helpers/cardinality_checker.py"}], "after_files": [{"content": "import abc\nimport enum\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union, cast\n\nfrom great_expectations.core.util import convert_to_json_serializable\nfrom great_expectations.exceptions import ProfilerConfigurationError\nfrom great_expectations.types import SerializableDictDot\n\n\n@dataclass(frozen=True)\nclass CardinalityLimit(abc.ABC, SerializableDictDot):\n name: str\n\n\n@dataclass(frozen=True)\nclass RelativeCardinalityLimit(CardinalityLimit):\n max_proportion_unique: float\n metric_name_defining_limit: str = \"column.unique_proportion\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_proportion_unique,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\n@dataclass(frozen=True)\nclass AbsoluteCardinalityLimit(CardinalityLimit):\n max_unique_values: int\n metric_name_defining_limit: str = \"column.distinct_values.count\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_unique_values,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\nclass CardinalityLimitMode(enum.Enum):\n \"\"\"Preset limits based on unique values (cardinality)\n\n Defines relative (ratio) and absolute number of records (table rows) that\n correspond to each cardinality category.\n\n Used to determine appropriate Expectation configurations based on data.\n \"\"\"\n\n ZERO = AbsoluteCardinalityLimit(\"ZERO\", 0)\n ONE = AbsoluteCardinalityLimit(\"ONE\", 1)\n TWO = AbsoluteCardinalityLimit(\"TWO\", 2)\n VERY_FEW = AbsoluteCardinalityLimit(\"VERY_FEW\", 10)\n FEW = AbsoluteCardinalityLimit(\"FEW\", 100)\n SOME = AbsoluteCardinalityLimit(\"SOME\", 1000)\n MANY = AbsoluteCardinalityLimit(\"MANY\", 10000)\n VERY_MANY = AbsoluteCardinalityLimit(\"VERY_MANY\", 100000)\n UNIQUE = RelativeCardinalityLimit(\"UNIQUE\", 1.0)\n ABS_10 = AbsoluteCardinalityLimit(\"ABS_10\", 10)\n ABS_100 = AbsoluteCardinalityLimit(\"ABS_100\", 100)\n ABS_1000 = AbsoluteCardinalityLimit(\"ABS_1000\", 1000)\n ABS_10_000 = AbsoluteCardinalityLimit(\"ABS_10_000\", int(1e4))\n ABS_100_000 = AbsoluteCardinalityLimit(\"ABS_100_000\", int(1e5))\n ABS_1_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000\", int(1e6))\n ABS_10_000_000 = AbsoluteCardinalityLimit(\"ABS_10_000_000\", int(1e7))\n ABS_100_000_000 = AbsoluteCardinalityLimit(\"ABS_100_000_000\", int(1e8))\n ABS_1_000_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000_000\", int(1e9))\n REL_0 = RelativeCardinalityLimit(\"REL_0\", 0.0)\n REL_001 = RelativeCardinalityLimit(\"REL_001\", 1e-5)\n REL_01 = RelativeCardinalityLimit(\"REL_01\", 1e-4)\n REL_0_1 = RelativeCardinalityLimit(\"REL_0_1\", 1e-3)\n REL_1 = RelativeCardinalityLimit(\"REL_1\", 1e-2)\n REL_10 = RelativeCardinalityLimit(\"REL_10\", 0.10)\n REL_25 = RelativeCardinalityLimit(\"REL_25\", 0.25)\n REL_50 = RelativeCardinalityLimit(\"REL_50\", 0.50)\n REL_75 = RelativeCardinalityLimit(\"REL_75\", 0.75)\n REL_100 = RelativeCardinalityLimit(\"REL_100\", 1.0)\n ONE_PCT = RelativeCardinalityLimit(\"ONE_PCT\", 0.01)\n TEN_PCT = RelativeCardinalityLimit(\"TEN_PCT\", 0.10)\n\n\nclass CardinalityChecker:\n \"\"\"Handles cardinality checking given cardinality limit mode and measured value.\n\n This class also validates cardinality limit settings and converts from\n various types of settings. You can choose one of the attributes listed\n below to create an instance.\n\n Attributes:\n cardinality_limit_mode: CardinalityLimitMode or string name of the mode\n defining the maximum allowable cardinality.\n max_unique_values: number of max unique rows for a custom\n cardinality limit.\n max_proportion_unique: proportion of unique values for a\n custom cardinality limit.\n \"\"\"\n\n SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES: Tuple[\n Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]\n ] = (\n AbsoluteCardinalityLimit,\n RelativeCardinalityLimit,\n )\n SUPPORTED_LIMIT_MODE_CLASS_NAMES: Tuple[str] = (\n mode.__name__ for mode in SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES\n )\n SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS: Tuple[str] = (\n mode.name for mode in CardinalityLimitMode\n )\n\n def __init__(\n self,\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> None:\n self._cardinality_limit_mode = self._convert_to_cardinality_limit_mode(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n @property\n def cardinality_limit_mode(\n self,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n return self._cardinality_limit_mode\n\n def cardinality_within_limit(self, metric_value: float) -> bool:\n \"\"\"Determine if the cardinality is within configured limit.\n\n The metric_value supplied should be either a proportion of unique values\n or number of unique values based on the configured cardinality limit.\n\n Args:\n metric_value: int if number of unique values, float if proportion\n of unique values.\n\n Returns:\n Boolean of whether the cardinality is within the configured limit\n \"\"\"\n self._validate_metric_value(metric_value=metric_value)\n if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_unique_values\n\n if isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_proportion_unique\n\n raise ValueError(\n f'Unknown \"cardinality_limit_mode\" mode \"{self._cardinality_limit_mode}\" encountered.'\n )\n\n @staticmethod\n def _validate_metric_value(metric_value: float) -> None:\n if not isinstance(metric_value, (int, float)):\n raise ProfilerConfigurationError(\n f\"Value of measured cardinality must be of type int or float, you provided {type(metric_value)}\"\n )\n\n if metric_value < 0.00:\n raise ProfilerConfigurationError(\n f\"Value of cardinality (number of rows or percent unique) should be greater than 0.00, your value is {metric_value}\"\n )\n\n @staticmethod\n def _convert_to_cardinality_limit_mode(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n if cardinality_limit_mode is not None:\n if isinstance(cardinality_limit_mode, str):\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n elif isinstance(cardinality_limit_mode, dict):\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode.get(\"name\"),\n max_unique_values=cardinality_limit_mode.get(\"max_unique_values\"),\n max_proportion_unique=cardinality_limit_mode.get(\n \"max_proportion_unique\"\n ),\n required_num_supplied_params=2,\n )\n try:\n return AbsoluteCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_unique_values=cardinality_limit_mode[\"max_unique_values\"],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n try:\n return RelativeCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_proportion_unique=cardinality_limit_mode[\n \"max_proportion_unique\"\n ],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n else:\n return cast(CardinalityLimitMode, cardinality_limit_mode).value\n\n if max_unique_values is not None:\n return AbsoluteCardinalityLimit(\n name=f\"CUSTOM_ABS_{max_unique_values}\",\n max_unique_values=max_unique_values,\n )\n\n if max_proportion_unique is not None:\n return RelativeCardinalityLimit(\n name=f\"CUSTOM_REL_{max_proportion_unique}\",\n max_proportion_unique=max_proportion_unique,\n )\n\n\ndef validate_input_parameters(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[int] = None,\n required_num_supplied_params: int = 1,\n) -> None:\n num_supplied_params: int = sum(\n [\n 0 if param is None else 1\n for param in (\n cardinality_limit_mode,\n max_unique_values,\n max_proportion_unique,\n )\n ]\n )\n if num_supplied_params != required_num_supplied_params:\n raise ProfilerConfigurationError(\n f\"Please pass ONE of the following parameters: cardinality_limit_mode, max_unique_values, max_proportion_unique, you passed {num_supplied_params} parameters.\"\n )\n\n if cardinality_limit_mode is not None:\n if not isinstance(cardinality_limit_mode, (str, CardinalityLimitMode, dict)):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality limit type, supported classes are {','.join(CardinalityChecker.SUPPORTED_LIMIT_MODE_CLASS_NAMES)} and supported strings are {','.join(CardinalityChecker.SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS)}\"\n )\n\n if required_num_supplied_params == 2:\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n\n if max_unique_values is not None:\n if not isinstance(max_unique_values, int):\n raise ProfilerConfigurationError(\n f\"Please specify an int, you specified a {type(max_unique_values)}\"\n )\n\n if max_proportion_unique is not None:\n if not isinstance(max_proportion_unique, (float, int)):\n raise ProfilerConfigurationError(\n f\"Please specify a float or int, you specified a {type(max_proportion_unique)}\"\n )\n", "path": "great_expectations/rule_based_profiler/helpers/cardinality_checker.py"}]}
3,704
391
gh_patches_debug_32588
rasdani/github-patches
git_diff
sopel-irc__sopel-1503
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- reddit: shortlinks are not handled Reddit supports (and promotes in the comments page sidebar) a short link format for threads, `http(s)://redd.it/{id}`, where the `id` comes from here in a full-size link: `https://www.reddit.com/r/subreddit/comments/{id}/slugified_thread_title/`. It should be trivial to add detection and handling of these links in the `reddit` module. Not assigning to a milestone immediately, because I'd like to release this in a hypothetical version 6.7.0 *if* one happens before 7.0 (but none is planned). It does seem slightly more significant a change than would belong in one of the planned 6.6.x releases, unfortunately. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/reddit.py` Content: ``` 1 # coding=utf-8 2 # Author: Elsie Powell, embolalia.com 3 from __future__ import unicode_literals, absolute_import, print_function, division 4 5 from sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP 6 from sopel.formatting import bold, color, colors 7 from sopel.web import USER_AGENT 8 from sopel.tools import time 9 import datetime as dt 10 import praw 11 import re 12 import sys 13 if sys.version_info.major >= 3: 14 unicode = str 15 if sys.version_info.minor >= 4: 16 from html import unescape 17 else: 18 from html.parser import HTMLParser 19 unescape = HTMLParser().unescape 20 else: 21 from HTMLParser import HTMLParser 22 unescape = HTMLParser().unescape 23 24 25 domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com' 26 post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain 27 user_url = r'%s/u(ser)?/([\w-]+)' % domain 28 post_regex = re.compile(post_url) 29 user_regex = re.compile(user_url) 30 spoiler_subs = [ 31 'stevenuniverse', 32 'onepunchman', 33 ] 34 35 36 def setup(bot): 37 bot.register_url_callback(post_regex, rpost_info) 38 bot.register_url_callback(user_regex, redditor_info) 39 40 41 def shutdown(bot): 42 bot.unregister_url_callback(post_regex) 43 bot.unregister_url_callback(user_regex) 44 45 46 @rule('.*%s.*' % post_url) 47 def rpost_info(bot, trigger, match=None): 48 match = match or trigger 49 try: 50 r = praw.Reddit( 51 user_agent=USER_AGENT, 52 client_id='6EiphT6SSQq7FQ', 53 client_secret=None, 54 ) 55 s = r.submission(id=match.group(2)) 56 except Exception: 57 r = praw.Reddit(user_agent=USER_AGENT) 58 s = r.get_submission(submission_id=match.group(2)) 59 60 message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | ' 61 '{comments} comments | Posted by {author} | ' 62 'Created at {created}') 63 64 subreddit = s.subreddit.display_name 65 if s.is_self: 66 link = '(self.{})'.format(subreddit) 67 else: 68 link = '({}) to r/{}'.format(s.url, subreddit) 69 70 if s.over_18: 71 if subreddit.lower() in spoiler_subs: 72 nsfw = bold(color(' [SPOILERS]', colors.RED)) 73 else: 74 nsfw = bold(color(' [NSFW]', colors.RED)) 75 76 sfw = bot.db.get_channel_value(trigger.sender, 'sfw') 77 if sfw: 78 link = '(link hidden)' 79 bot.write(['KICK', trigger.sender, trigger.nick, 80 'Linking to NSFW content in a SFW channel.']) 81 else: 82 nsfw = '' 83 84 if s.author: 85 author = s.author.name 86 else: 87 author = '[deleted]' 88 89 tz = time.get_timezone(bot.db, bot.config, None, trigger.nick, 90 trigger.sender) 91 time_created = dt.datetime.utcfromtimestamp(s.created_utc) 92 created = time.format_time(bot.db, bot.config, tz, trigger.nick, 93 trigger.sender, time_created) 94 95 if s.score > 0: 96 point_color = colors.GREEN 97 else: 98 point_color = colors.RED 99 100 percent = color(unicode(s.upvote_ratio * 100) + '%', point_color) 101 102 title = unescape(s.title) 103 message = message.format( 104 title=title, link=link, nsfw=nsfw, points=s.score, percent=percent, 105 comments=s.num_comments, author=author, created=created) 106 107 bot.say(message) 108 109 110 # If you change this, you'll have to change some other things... 111 @commands('redditor') 112 @example('.redditor poem_for_your_sprog') 113 def redditor_info(bot, trigger, match=None): 114 """Show information about the given Redditor""" 115 commanded = re.match(bot.config.core.prefix + 'redditor', trigger) 116 r = praw.Reddit( 117 user_agent=USER_AGENT, 118 client_id='6EiphT6SSQq7FQ', 119 client_secret=None, 120 ) 121 match = match or trigger 122 try: 123 u = r.get_redditor(match.group(2)) 124 except Exception: # TODO: Be specific 125 if commanded: 126 bot.say('No such Redditor.') 127 return NOLIMIT 128 else: 129 return 130 # Fail silently if it wasn't an explicit command. 131 132 message = '[REDDITOR] ' + u.name 133 now = dt.datetime.utcnow() 134 cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc) 135 cakeday_start = cakeday_start.replace(year=now.year) 136 day = dt.timedelta(days=1) 137 year_div_by_400 = now.year % 400 == 0 138 year_div_by_100 = now.year % 100 == 0 139 year_div_by_4 = now.year % 4 == 0 140 is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4) 141 if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)): 142 # If cake day is 2/29 and it's not a leap year, cake day is 1/3. 143 # Cake day begins at exact account creation time. 144 is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day) 145 else: 146 is_cakeday = cakeday_start <= now <= cakeday_start + day 147 148 if is_cakeday: 149 message = message + ' | 13Cake day' 150 if commanded: 151 message = message + ' | https://reddit.com/u/' + u.name 152 if u.is_gold: 153 message = message + ' | 08Gold' 154 if u.is_mod: 155 message = message + ' | 05Mod' 156 message = message + (' | Link: ' + str(u.link_karma) + 157 ' | Comment: ' + str(u.comment_karma)) 158 159 bot.say(message) 160 161 162 # If you change the groups here, you'll have to change some things above. 163 @rule('.*%s.*' % user_url) 164 def auto_redditor_info(bot, trigger): 165 redditor_info(bot, trigger) 166 167 168 @require_chanmsg('.setsfw is only permitted in channels') 169 @commands('setsafeforwork', 'setsfw') 170 @example('.setsfw true') 171 @example('.setsfw false') 172 def update_channel(bot, trigger): 173 """ 174 Sets the Safe for Work status (true or false) for the current 175 channel. Defaults to false. 176 """ 177 if bot.channels[trigger.sender].privileges[trigger.nick] < OP: 178 return 179 else: 180 param = 'true' 181 if trigger.group(2) and trigger.group(3): 182 param = trigger.group(3).strip().lower() 183 sfw = param == 'true' 184 bot.db.set_channel_value(trigger.sender, 'sfw', sfw) 185 if sfw: 186 bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender) 187 else: 188 bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender) 189 190 191 @commands('getsafeforwork', 'getsfw') 192 @example('.getsfw [channel]') 193 def get_channel_sfw(bot, trigger): 194 """ 195 Gets the preferred channel's Safe for Work status, or the current 196 channel's status if no channel given. 197 """ 198 channel = trigger.group(2) 199 if not channel: 200 channel = trigger.sender 201 if channel.is_nick(): 202 return bot.say('.getsfw with no channel param is only permitted in channels') 203 204 channel = channel.strip() 205 206 sfw = bot.db.get_channel_value(channel, 'sfw') 207 if sfw: 208 bot.say('%s is flagged as SFW' % channel) 209 else: 210 bot.say('%s is flagged as NSFW' % channel) 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py --- a/sopel/modules/reddit.py +++ b/sopel/modules/reddit.py @@ -23,9 +23,11 @@ domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com' -post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain +post_url = r'%s/r/.*?/comments/([\w-]+)' % domain +short_post_url = r'https?://redd.it/([\w-]+)' user_url = r'%s/u(ser)?/([\w-]+)' % domain post_regex = re.compile(post_url) +short_post_regex = re.compile(short_post_url) user_regex = re.compile(user_url) spoiler_subs = [ 'stevenuniverse', @@ -35,15 +37,18 @@ def setup(bot): bot.register_url_callback(post_regex, rpost_info) + bot.register_url_callback(short_post_regex, rpost_info) bot.register_url_callback(user_regex, redditor_info) def shutdown(bot): bot.unregister_url_callback(post_regex) + bot.unregister_url_callback(short_post_regex) bot.unregister_url_callback(user_regex) @rule('.*%s.*' % post_url) +@rule('.*%s.*' % short_post_url) def rpost_info(bot, trigger, match=None): match = match or trigger try: @@ -52,10 +57,10 @@ client_id='6EiphT6SSQq7FQ', client_secret=None, ) - s = r.submission(id=match.group(2)) + s = r.submission(id=match.group(1)) except Exception: r = praw.Reddit(user_agent=USER_AGENT) - s = r.get_submission(submission_id=match.group(2)) + s = r.get_submission(submission_id=match.group(1)) message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | ' '{comments} comments | Posted by {author} | '
{"golden_diff": "diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\n--- a/sopel/modules/reddit.py\n+++ b/sopel/modules/reddit.py\n@@ -23,9 +23,11 @@\n \n \n domain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\n-post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n+post_url = r'%s/r/.*?/comments/([\\w-]+)' % domain\n+short_post_url = r'https?://redd.it/([\\w-]+)'\n user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n post_regex = re.compile(post_url)\n+short_post_regex = re.compile(short_post_url)\n user_regex = re.compile(user_url)\n spoiler_subs = [\n 'stevenuniverse',\n@@ -35,15 +37,18 @@\n \n def setup(bot):\n bot.register_url_callback(post_regex, rpost_info)\n+ bot.register_url_callback(short_post_regex, rpost_info)\n bot.register_url_callback(user_regex, redditor_info)\n \n \n def shutdown(bot):\n bot.unregister_url_callback(post_regex)\n+ bot.unregister_url_callback(short_post_regex)\n bot.unregister_url_callback(user_regex)\n \n \n @rule('.*%s.*' % post_url)\n+@rule('.*%s.*' % short_post_url)\n def rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n@@ -52,10 +57,10 @@\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n- s = r.submission(id=match.group(2))\n+ s = r.submission(id=match.group(1))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n- s = r.get_submission(submission_id=match.group(2))\n+ s = r.get_submission(submission_id=match.group(1))\n \n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n", "issue": "reddit: shortlinks are not handled\nReddit supports (and promotes in the comments page sidebar) a short link format for threads, `http(s)://redd.it/{id}`, where the `id` comes from here in a full-size link: `https://www.reddit.com/r/subreddit/comments/{id}/slugified_thread_title/`.\n\nIt should be trivial to add detection and handling of these links in the `reddit` module.\n\nNot assigning to a milestone immediately, because I'd like to release this in a hypothetical version 6.7.0 *if* one happens before 7.0 (but none is planned). It does seem slightly more significant a change than would belong in one of the planned 6.6.x releases, unfortunately.\n", "before_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\npost_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n bot.register_url_callback(post_regex, rpost_info)\n bot.register_url_callback(user_regex, redditor_info)\n\n\ndef shutdown(bot):\n bot.unregister_url_callback(post_regex)\n bot.unregister_url_callback(user_regex)\n\n\n@rule('.*%s.*' % post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(2))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(2))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}], "after_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\npost_url = r'%s/r/.*?/comments/([\\w-]+)' % domain\nshort_post_url = r'https?://redd.it/([\\w-]+)'\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nshort_post_regex = re.compile(short_post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n bot.register_url_callback(post_regex, rpost_info)\n bot.register_url_callback(short_post_regex, rpost_info)\n bot.register_url_callback(user_regex, redditor_info)\n\n\ndef shutdown(bot):\n bot.unregister_url_callback(post_regex)\n bot.unregister_url_callback(short_post_regex)\n bot.unregister_url_callback(user_regex)\n\n\n@rule('.*%s.*' % post_url)\n@rule('.*%s.*' % short_post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(1))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(1))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}]}
2,769
492
gh_patches_debug_11964
rasdani/github-patches
git_diff
urllib3__urllib3-1439
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'Failed to parse headers' warning logged when getting message/rfc822 content I've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it: ```py class TestOkayHeaders(SocketDummyServerTestCase): def _test_okay_header_parsing(self, header): self.start_response_handler(( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' ) + header + b'\r\n\r\n' ) pool = HTTPConnectionPool(self.host, self.port, retries=False) self.addCleanup(pool.close) with LogRecorder() as logs: pool.request('GET', '/') for record in logs: assert 'Failed to parse headers' not in record.msg def test_header_text_plain(self): self._test_okay_header_parsing(b'Content-type: text/plain') def test_header_message_rfc822(self): self._test_okay_header_parsing(b'Content-type: message/rfc822') ``` The test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged: ```py Failed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>] Traceback (most recent call last): File "/home/user/git/urllib3/src/urllib3/connectionpool.py", line 396, in _make_request assert_header_parsing(httplib_response.msg) File "/home/user/git/urllib3/src/urllib3/util/response.py", line 68, in assert_header_parsing raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) urllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>] ``` While retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/urllib3/util/response.py` Content: ``` 1 from __future__ import absolute_import 2 from ..packages.six.moves import http_client as httplib 3 4 from ..exceptions import HeaderParsingError 5 6 7 def is_fp_closed(obj): 8 """ 9 Checks whether a given file-like object is closed. 10 11 :param obj: 12 The file-like object to check. 13 """ 14 15 try: 16 # Check `isclosed()` first, in case Python3 doesn't set `closed`. 17 # GH Issue #928 18 return obj.isclosed() 19 except AttributeError: 20 pass 21 22 try: 23 # Check via the official file-like-object way. 24 return obj.closed 25 except AttributeError: 26 pass 27 28 try: 29 # Check if the object is a container for another file-like object that 30 # gets released on exhaustion (e.g. HTTPResponse). 31 return obj.fp is None 32 except AttributeError: 33 pass 34 35 raise ValueError("Unable to determine whether fp is closed.") 36 37 38 def assert_header_parsing(headers): 39 """ 40 Asserts whether all headers have been successfully parsed. 41 Extracts encountered errors from the result of parsing headers. 42 43 Only works on Python 3. 44 45 :param headers: Headers to verify. 46 :type headers: `httplib.HTTPMessage`. 47 48 :raises urllib3.exceptions.HeaderParsingError: 49 If parsing errors are found. 50 """ 51 52 # This will fail silently if we pass in the wrong kind of parameter. 53 # To make debugging easier add an explicit check. 54 if not isinstance(headers, httplib.HTTPMessage): 55 raise TypeError('expected httplib.Message, got {0}.'.format( 56 type(headers))) 57 58 defects = getattr(headers, 'defects', None) 59 get_payload = getattr(headers, 'get_payload', None) 60 61 unparsed_data = None 62 if get_payload: # Platform-specific: Python 3. 63 unparsed_data = get_payload() 64 65 if defects or unparsed_data: 66 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) 67 68 69 def is_response_to_head(response): 70 """ 71 Checks whether the request of a response has been a HEAD-request. 72 Handles the quirks of AppEngine. 73 74 :param conn: 75 :type conn: :class:`httplib.HTTPResponse` 76 """ 77 # FIXME: Can we do this somehow without accessing private httplib _method? 78 method = response._method 79 if isinstance(method, int): # Platform-specific: Appengine 80 return method == 3 81 return method.upper() == 'HEAD' 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py --- a/src/urllib3/util/response.py +++ b/src/urllib3/util/response.py @@ -59,8 +59,14 @@ get_payload = getattr(headers, 'get_payload', None) unparsed_data = None - if get_payload: # Platform-specific: Python 3. - unparsed_data = get_payload() + if get_payload: + # get_payload is actually email.message.Message.get_payload; + # we're only interested in the result if it's not a multipart message + if not headers.is_multipart(): + payload = get_payload() + + if isinstance(payload, (bytes, str)): + unparsed_data = payload if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
{"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -59,8 +59,14 @@\n get_payload = getattr(headers, 'get_payload', None)\n \n unparsed_data = None\n- if get_payload: # Platform-specific: Python 3.\n- unparsed_data = get_payload()\n+ if get_payload:\n+ # get_payload is actually email.message.Message.get_payload;\n+ # we're only interested in the result if it's not a multipart message\n+ if not headers.is_multipart():\n+ payload = get_payload()\n+\n+ if isinstance(payload, (bytes, str)):\n+ unparsed_data = payload\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "'Failed to parse headers' warning logged when getting message/rfc822 content\nI've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:\r\n\r\n```py\r\nclass TestOkayHeaders(SocketDummyServerTestCase):\r\n\r\n def _test_okay_header_parsing(self, header):\r\n self.start_response_handler((\r\n b'HTTP/1.1 200 OK\\r\\n'\r\n b'Content-Length: 0\\r\\n'\r\n ) + header + b'\\r\\n\\r\\n'\r\n )\r\n\r\n pool = HTTPConnectionPool(self.host, self.port, retries=False)\r\n self.addCleanup(pool.close)\r\n\r\n with LogRecorder() as logs:\r\n pool.request('GET', '/')\r\n\r\n for record in logs:\r\n assert 'Failed to parse headers' not in record.msg\r\n\r\n def test_header_text_plain(self):\r\n self._test_okay_header_parsing(b'Content-type: text/plain')\r\n\r\n def test_header_message_rfc822(self):\r\n self._test_okay_header_parsing(b'Content-type: message/rfc822')\r\n```\r\n\r\nThe test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:\r\n\r\n```py\r\nFailed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\nTraceback (most recent call last):\r\n File \"/home/user/git/urllib3/src/urllib3/connectionpool.py\", line 396, in _make_request\r\n assert_header_parsing(httplib_response.msg)\r\n File \"/home/user/git/urllib3/src/urllib3/util/response.py\", line 68, in assert_header_parsing\r\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\r\nurllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\n```\r\n\r\nWhile retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload: # Platform-specific: Python 3.\n unparsed_data = get_payload()\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}]}
1,475
204
gh_patches_debug_26233
rasdani/github-patches
git_diff
cupy__cupy-5828
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unexpected NaN when using big-endian arrays When a big-endian array is loaded on the GPU using cp.array(), random NaNs appear in the data and calculations will start returning NaN. No errors or warnings are given to the user. ### Conditions CuPy Version : 7.6.0 CUDA Root : /usr/local/cuda CUDA Build Version : 9010 CUDA Driver Version : 10010 CUDA Runtime Version : 9010 cuBLAS Version : 9010 cuFFT Version : 9010 cuRAND Version : 9010 cuSOLVER Version : (9, 1, 0) cuSPARSE Version : 9010 NVRTC Version : (9, 1) cuDNN Build Version : 7102 cuDNN Version : 7102 NCCL Build Version : 2115 NCCL Runtime Version : (unknown) CUB Version : None cuTENSOR Version : None ### Code to reproduce ```import cupy as cp import numpy as np data = np.arange(1000*1000, dtype='>f4')/1e9 print(' numpy:', type(data), data.shape, data.dtype) print(' nan:', np.where(np.isnan(data))) print(' total:', np.sum(data)) arr = cp.array(data) print('-----') print(' cupy:' , type(arr), arr.shape, arr.dtype) print(' nan:' , cp.where(cp.isnan(arr))) print(' total:', cp.sum(arr))` ``` ### Output of the above code: ``` numpy: <class 'numpy.ndarray'> (1000000,) >f4 nan: (array([], dtype=int64),) total: 499.99963 ----- cupy: <class 'cupy.core.core.ndarray'> (1000000,) >f4 nan: (array([ 213, 385, 426, ..., 999227, 999242, 999391]),) total: nan ``` The numpy array shows no NaNs as expected, while the cupy array on the GPU shows several NaNs and functions like sum() that operate on the whole array return NaN as well. ### Scenarios A fairly common occurence in a scientific environment is when readings FITS files, which store data big-endian, for example using the astropy.io module: ``` import cupy as cp from astropy.io import fits data = fits.getdata(filename) gpu_data = cp.array(data) # Results in NaN ``` A workaround is to convert the array to little endian before using it with cupy: `data = data.astype(np.float32)` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/_creation/from_data.py` Content: ``` 1 import numpy 2 3 from cupy import _core 4 from cupy._core import fusion 5 6 7 def array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0): 8 """Creates an array on the current device. 9 10 This function currently does not support the ``subok`` option. 11 12 Args: 13 obj: :class:`cupy.ndarray` object or any other object that can be 14 passed to :func:`numpy.array`. 15 dtype: Data type specifier. 16 copy (bool): If ``False``, this function returns ``obj`` if possible. 17 Otherwise this function always returns a new array. 18 order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major 19 (Fortran-style) order. 20 When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major 21 and uses ``'C'`` otherwise. 22 And when ``order`` is ``'K'``, it keeps strides as closely as 23 possible. 24 If ``obj`` is :class:`numpy.ndarray`, the function returns ``'C'`` 25 or ``'F'`` order array. 26 subok (bool): If ``True``, then sub-classes will be passed-through, 27 otherwise the returned array will be forced to be a base-class 28 array (default). 29 ndmin (int): Minimum number of dimensions. Ones are inserted to the 30 head of the shape if needed. 31 32 Returns: 33 cupy.ndarray: An array on the current device. 34 35 .. note:: 36 This method currently does not support ``subok`` argument. 37 38 .. seealso:: :func:`numpy.array` 39 40 """ 41 return _core.array(obj, dtype, copy, order, subok, ndmin) 42 43 44 def asarray(a, dtype=None, order=None): 45 """Converts an object to array. 46 47 This is equivalent to ``array(a, dtype, copy=False)``. 48 This function currently does not support the ``order`` option. 49 50 Args: 51 a: The source object. 52 dtype: Data type specifier. It is inferred from the input by default. 53 order ({'C', 'F'}): 54 Whether to use row-major (C-style) or column-major (Fortran-style) 55 memory representation. Defaults to ``'C'``. ``order`` is ignored 56 for objects that are not :class:`cupy.ndarray`, but have the 57 ``__cuda_array_interface__`` attribute. 58 59 Returns: 60 cupy.ndarray: An array on the current device. If ``a`` is already on 61 the device, no copy is performed. 62 63 .. seealso:: :func:`numpy.asarray` 64 65 """ 66 return _core.array(a, dtype, False, order) 67 68 69 def asanyarray(a, dtype=None, order=None): 70 """Converts an object to array. 71 72 This is currently equivalent to :func:`cupy.asarray`, since there is no 73 subclass of :class:`cupy.ndarray` in CuPy. Note that the original 74 :func:`numpy.asanyarray` returns the input array as is if it is an instance 75 of a subtype of :class:`numpy.ndarray`. 76 77 .. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray` 78 79 """ 80 return _core.array(a, dtype, False, order) 81 82 83 def ascontiguousarray(a, dtype=None): 84 """Returns a C-contiguous array. 85 86 Args: 87 a (cupy.ndarray): Source array. 88 dtype: Data type specifier. 89 90 Returns: 91 cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it 92 returns a copy of ``a``. 93 94 .. seealso:: :func:`numpy.ascontiguousarray` 95 96 """ 97 return _core.ascontiguousarray(a, dtype) 98 99 100 # TODO(okuta): Implement asmatrix 101 102 103 def copy(a, order='K'): 104 """Creates a copy of a given array on the current device. 105 106 This function allocates the new array on the current device. If the given 107 array is allocated on the different device, then this function tries to 108 copy the contents over the devices. 109 110 Args: 111 a (cupy.ndarray): The source array. 112 order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major 113 (Fortran-style) order. 114 When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major 115 and uses ``'C'`` otherwise. 116 And when ``order`` is ``'K'``, it keeps strides as closely as 117 possible. 118 119 Returns: 120 cupy.ndarray: The copy of ``a`` on the current device. 121 122 .. seealso:: :func:`numpy.copy`, :meth:`cupy.ndarray.copy` 123 124 """ 125 if fusion._is_fusing(): 126 if order != 'K': 127 raise NotImplementedError( 128 'cupy.copy does not support `order` in fusion yet.') 129 return fusion._call_ufunc(_core.elementwise_copy, a) 130 131 # If the current device is different from the device of ``a``, then this 132 # function allocates a new array on the current device, and copies the 133 # contents over the devices. 134 return a.copy(order=order) 135 136 137 # TODO(okuta): Implement frombuffer 138 139 140 def fromfile(*args, **kwargs): 141 """Reads an array from a file. 142 143 .. note:: 144 Uses NumPy's ``fromfile`` and coerces the result to a CuPy array. 145 146 .. seealso:: :func:`numpy.fromfile` 147 148 """ 149 return asarray(numpy.fromfile(*args, **kwargs)) 150 151 152 # TODO(okuta): Implement fromfunction 153 154 155 # TODO(okuta): Implement fromiter 156 157 158 # TODO(okuta): Implement fromstring 159 160 161 # TODO(okuta): Implement loadtxt 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/_creation/from_data.py b/cupy/_creation/from_data.py --- a/cupy/_creation/from_data.py +++ b/cupy/_creation/from_data.py @@ -35,6 +35,11 @@ .. note:: This method currently does not support ``subok`` argument. + .. note:: + If ``obj`` is an `numpy.ndarray` instance that contains big-endian data, + this function automatically swaps its byte order to little-endian, + which is the NVIDIA and AMD GPU architecture's native use. + .. seealso:: :func:`numpy.array` """ @@ -60,6 +65,11 @@ cupy.ndarray: An array on the current device. If ``a`` is already on the device, no copy is performed. + .. note:: + If ``a`` is an `numpy.ndarray` instance that contains big-endian data, + this function automatically swaps its byte order to little-endian, + which is the NVIDIA and AMD GPU architecture's native use. + .. seealso:: :func:`numpy.asarray` """ @@ -143,6 +153,11 @@ .. note:: Uses NumPy's ``fromfile`` and coerces the result to a CuPy array. + .. note:: + If you let NumPy's ``fromfile`` read the file in big-endian, CuPy + automatically swaps its byte order to little-endian, which is the NVIDIA + and AMD GPU architecture's native use. + .. seealso:: :func:`numpy.fromfile` """
{"golden_diff": "diff --git a/cupy/_creation/from_data.py b/cupy/_creation/from_data.py\n--- a/cupy/_creation/from_data.py\n+++ b/cupy/_creation/from_data.py\n@@ -35,6 +35,11 @@\n .. note::\n This method currently does not support ``subok`` argument.\n \n+ .. note::\n+ If ``obj`` is an `numpy.ndarray` instance that contains big-endian data,\n+ this function automatically swaps its byte order to little-endian,\n+ which is the NVIDIA and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.array`\n \n \"\"\"\n@@ -60,6 +65,11 @@\n cupy.ndarray: An array on the current device. If ``a`` is already on\n the device, no copy is performed.\n \n+ .. note::\n+ If ``a`` is an `numpy.ndarray` instance that contains big-endian data,\n+ this function automatically swaps its byte order to little-endian,\n+ which is the NVIDIA and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.asarray`\n \n \"\"\"\n@@ -143,6 +153,11 @@\n .. note::\n Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.\n \n+ .. note::\n+ If you let NumPy's ``fromfile`` read the file in big-endian, CuPy\n+ automatically swaps its byte order to little-endian, which is the NVIDIA\n+ and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.fromfile`\n \n \"\"\"\n", "issue": "Unexpected NaN when using big-endian arrays\nWhen a big-endian array is loaded on the GPU using cp.array(), random NaNs appear in the data and calculations will start returning NaN. No errors or warnings are given to the user.\r\n\r\n\r\n### Conditions\r\nCuPy Version : 7.6.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 9010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 9010\r\ncuBLAS Version : 9010\r\ncuFFT Version : 9010\r\ncuRAND Version : 9010\r\ncuSOLVER Version : (9, 1, 0)\r\ncuSPARSE Version : 9010\r\nNVRTC Version : (9, 1)\r\ncuDNN Build Version : 7102\r\ncuDNN Version : 7102\r\nNCCL Build Version : 2115\r\nNCCL Runtime Version : (unknown)\r\nCUB Version : None\r\ncuTENSOR Version : None\r\n\r\n### Code to reproduce\r\n```import cupy as cp\r\nimport numpy as np\r\n\r\ndata = np.arange(1000*1000, dtype='>f4')/1e9\r\nprint(' numpy:', type(data), data.shape, data.dtype)\r\nprint(' nan:', np.where(np.isnan(data)))\r\nprint(' total:', np.sum(data))\r\n\r\narr = cp.array(data)\r\nprint('-----')\r\nprint(' cupy:' , type(arr), arr.shape, arr.dtype)\r\nprint(' nan:' , cp.where(cp.isnan(arr)))\r\nprint(' total:', cp.sum(arr))`\r\n```\r\n\r\n### Output of the above code:\r\n\r\n```\r\n numpy: <class 'numpy.ndarray'> (1000000,) >f4\r\n nan: (array([], dtype=int64),)\r\n total: 499.99963\r\n-----\r\n cupy: <class 'cupy.core.core.ndarray'> (1000000,) >f4\r\n nan: (array([ 213, 385, 426, ..., 999227, 999242, 999391]),)\r\n total: nan\r\n```\r\n\r\nThe numpy array shows no NaNs as expected, while the cupy array on the GPU shows several NaNs and functions like sum() that operate on the whole array return NaN as well.\r\n\r\n### Scenarios\r\n\r\nA fairly common occurence in a scientific environment is when readings FITS files, which store data big-endian, for example using the astropy.io module:\r\n\r\n```\r\nimport cupy as cp\r\nfrom astropy.io import fits\r\n\r\ndata = fits.getdata(filename)\r\ngpu_data = cp.array(data) # Results in NaN\r\n```\r\n\r\nA workaround is to convert the array to little endian before using it with cupy:\r\n\r\n`data = data.astype(np.float32)`\r\n\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import fusion\n\n\ndef array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0):\n \"\"\"Creates an array on the current device.\n\n This function currently does not support the ``subok`` option.\n\n Args:\n obj: :class:`cupy.ndarray` object or any other object that can be\n passed to :func:`numpy.array`.\n dtype: Data type specifier.\n copy (bool): If ``False``, this function returns ``obj`` if possible.\n Otherwise this function always returns a new array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n If ``obj`` is :class:`numpy.ndarray`, the function returns ``'C'``\n or ``'F'`` order array.\n subok (bool): If ``True``, then sub-classes will be passed-through,\n otherwise the returned array will be forced to be a base-class\n array (default).\n ndmin (int): Minimum number of dimensions. Ones are inserted to the\n head of the shape if needed.\n\n Returns:\n cupy.ndarray: An array on the current device.\n\n .. note::\n This method currently does not support ``subok`` argument.\n\n .. seealso:: :func:`numpy.array`\n\n \"\"\"\n return _core.array(obj, dtype, copy, order, subok, ndmin)\n\n\ndef asarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is equivalent to ``array(a, dtype, copy=False)``.\n This function currently does not support the ``order`` option.\n\n Args:\n a: The source object.\n dtype: Data type specifier. It is inferred from the input by default.\n order ({'C', 'F'}):\n Whether to use row-major (C-style) or column-major (Fortran-style)\n memory representation. Defaults to ``'C'``. ``order`` is ignored\n for objects that are not :class:`cupy.ndarray`, but have the\n ``__cuda_array_interface__`` attribute.\n\n Returns:\n cupy.ndarray: An array on the current device. If ``a`` is already on\n the device, no copy is performed.\n\n .. seealso:: :func:`numpy.asarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef asanyarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is currently equivalent to :func:`cupy.asarray`, since there is no\n subclass of :class:`cupy.ndarray` in CuPy. Note that the original\n :func:`numpy.asanyarray` returns the input array as is if it is an instance\n of a subtype of :class:`numpy.ndarray`.\n\n .. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef ascontiguousarray(a, dtype=None):\n \"\"\"Returns a C-contiguous array.\n\n Args:\n a (cupy.ndarray): Source array.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it\n returns a copy of ``a``.\n\n .. seealso:: :func:`numpy.ascontiguousarray`\n\n \"\"\"\n return _core.ascontiguousarray(a, dtype)\n\n\n# TODO(okuta): Implement asmatrix\n\n\ndef copy(a, order='K'):\n \"\"\"Creates a copy of a given array on the current device.\n\n This function allocates the new array on the current device. If the given\n array is allocated on the different device, then this function tries to\n copy the contents over the devices.\n\n Args:\n a (cupy.ndarray): The source array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n\n Returns:\n cupy.ndarray: The copy of ``a`` on the current device.\n\n .. seealso:: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`\n\n \"\"\"\n if fusion._is_fusing():\n if order != 'K':\n raise NotImplementedError(\n 'cupy.copy does not support `order` in fusion yet.')\n return fusion._call_ufunc(_core.elementwise_copy, a)\n\n # If the current device is different from the device of ``a``, then this\n # function allocates a new array on the current device, and copies the\n # contents over the devices.\n return a.copy(order=order)\n\n\n# TODO(okuta): Implement frombuffer\n\n\ndef fromfile(*args, **kwargs):\n \"\"\"Reads an array from a file.\n\n .. note::\n Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.\n\n .. seealso:: :func:`numpy.fromfile`\n\n \"\"\"\n return asarray(numpy.fromfile(*args, **kwargs))\n\n\n# TODO(okuta): Implement fromfunction\n\n\n# TODO(okuta): Implement fromiter\n\n\n# TODO(okuta): Implement fromstring\n\n\n# TODO(okuta): Implement loadtxt\n", "path": "cupy/_creation/from_data.py"}], "after_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import fusion\n\n\ndef array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0):\n \"\"\"Creates an array on the current device.\n\n This function currently does not support the ``subok`` option.\n\n Args:\n obj: :class:`cupy.ndarray` object or any other object that can be\n passed to :func:`numpy.array`.\n dtype: Data type specifier.\n copy (bool): If ``False``, this function returns ``obj`` if possible.\n Otherwise this function always returns a new array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n If ``obj`` is :class:`numpy.ndarray`, the function returns ``'C'``\n or ``'F'`` order array.\n subok (bool): If ``True``, then sub-classes will be passed-through,\n otherwise the returned array will be forced to be a base-class\n array (default).\n ndmin (int): Minimum number of dimensions. Ones are inserted to the\n head of the shape if needed.\n\n Returns:\n cupy.ndarray: An array on the current device.\n\n .. note::\n This method currently does not support ``subok`` argument.\n\n .. note::\n If ``obj`` is an `numpy.ndarray` instance that contains big-endian data,\n this function automatically swaps its byte order to little-endian,\n which is the NVIDIA and AMD GPU architecture's native use.\n\n .. seealso:: :func:`numpy.array`\n\n \"\"\"\n return _core.array(obj, dtype, copy, order, subok, ndmin)\n\n\ndef asarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is equivalent to ``array(a, dtype, copy=False)``.\n This function currently does not support the ``order`` option.\n\n Args:\n a: The source object.\n dtype: Data type specifier. It is inferred from the input by default.\n order ({'C', 'F'}):\n Whether to use row-major (C-style) or column-major (Fortran-style)\n memory representation. Defaults to ``'C'``. ``order`` is ignored\n for objects that are not :class:`cupy.ndarray`, but have the\n ``__cuda_array_interface__`` attribute.\n\n Returns:\n cupy.ndarray: An array on the current device. If ``a`` is already on\n the device, no copy is performed.\n\n .. note::\n If ``a`` is an `numpy.ndarray` instance that contains big-endian data,\n this function automatically swaps its byte order to little-endian,\n which is the NVIDIA and AMD GPU architecture's native use.\n\n .. seealso:: :func:`numpy.asarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef asanyarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is currently equivalent to :func:`cupy.asarray`, since there is no\n subclass of :class:`cupy.ndarray` in CuPy. Note that the original\n :func:`numpy.asanyarray` returns the input array as is if it is an instance\n of a subtype of :class:`numpy.ndarray`.\n\n .. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef ascontiguousarray(a, dtype=None):\n \"\"\"Returns a C-contiguous array.\n\n Args:\n a (cupy.ndarray): Source array.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it\n returns a copy of ``a``.\n\n .. seealso:: :func:`numpy.ascontiguousarray`\n\n \"\"\"\n return _core.ascontiguousarray(a, dtype)\n\n\n# TODO(okuta): Implement asmatrix\n\n\ndef copy(a, order='K'):\n \"\"\"Creates a copy of a given array on the current device.\n\n This function allocates the new array on the current device. If the given\n array is allocated on the different device, then this function tries to\n copy the contents over the devices.\n\n Args:\n a (cupy.ndarray): The source array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n\n Returns:\n cupy.ndarray: The copy of ``a`` on the current device.\n\n .. seealso:: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`\n\n \"\"\"\n if fusion._is_fusing():\n if order != 'K':\n raise NotImplementedError(\n 'cupy.copy does not support `order` in fusion yet.')\n return fusion._call_ufunc(_core.elementwise_copy, a)\n\n # If the current device is different from the device of ``a``, then this\n # function allocates a new array on the current device, and copies the\n # contents over the devices.\n return a.copy(order=order)\n\n\n# TODO(okuta): Implement frombuffer\n\n\ndef fromfile(*args, **kwargs):\n \"\"\"Reads an array from a file.\n\n .. note::\n Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.\n\n .. note::\n If you let NumPy's ``fromfile`` read the file in big-endian, CuPy\n automatically swaps its byte order to little-endian, which is the NVIDIA\n and AMD GPU architecture's native use.\n\n .. seealso:: :func:`numpy.fromfile`\n\n \"\"\"\n return asarray(numpy.fromfile(*args, **kwargs))\n\n\n# TODO(okuta): Implement fromfunction\n\n\n# TODO(okuta): Implement fromiter\n\n\n# TODO(okuta): Implement fromstring\n\n\n# TODO(okuta): Implement loadtxt\n", "path": "cupy/_creation/from_data.py"}]}
2,581
361
gh_patches_debug_42411
rasdani/github-patches
git_diff
wright-group__WrightTools-120
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- better WMEL defaults --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `WrightTools/diagrams/WMEL.py` Content: ``` 1 ### import #################################################################### 2 3 4 import numpy as np 5 import matplotlib.pyplot as plt 6 7 8 ### define #################################################################### 9 10 11 ### subplot ################################################################### 12 13 14 class Subplot: 15 16 def __init__(self, ax, energies, number_of_interactions=4, title='', 17 title_font_size=16, state_names=None, virtual=[None], 18 state_font_size=8, state_text_buffer=0.5, label_side='left'): 19 ''' 20 Subplot. 21 22 Parameters 23 ---------- 24 ax : matplotlib axis 25 The axis. 26 energies : 1D array-like 27 Energies (scaled between 0 and 1) 28 number_of_interactions : integer 29 Number of interactions in diagram. 30 title : string (optional) 31 Title of subplot. Default is empty string. 32 state_names: list of str (optional) 33 list of the names of the states 34 virtual: list of ints (optional) 35 list of indexes of any vitual energy states 36 state_font_size: numtype (optional) 37 font size for the state lables 38 state_text_buffer: numtype (optional) 39 space between the energy level bars and the state labels 40 ''' 41 self.ax = ax 42 self.energies = energies 43 self.interactions = number_of_interactions 44 self.state_names = state_names 45 46 # Plot Energy Levels 47 for i in range(len(self.energies)): 48 if i in virtual: 49 linestyle = '--' 50 else: 51 linestyle = '-' 52 self.ax.axhline(self.energies[i], color='k', linewidth=2, 53 ls=linestyle, zorder=5) 54 55 # add state names 56 if isinstance(state_names, list): 57 for i in range(len(self.energies)): 58 if label_side == 'left': 59 ax.text(-state_text_buffer, energies[i], state_names[i], 60 fontsize=state_font_size, verticalalignment='center', 61 horizontalalignment='center') 62 elif label_side == 'right': 63 ax.text(1 + state_text_buffer, energies[i], state_names[i], 64 fontsize=state_font_size, verticalalignment='center', 65 horizontalalignment='center') 66 # calculate interaction_positons 67 self.x_pos = np.linspace(0, 1, number_of_interactions) 68 # set limits 69 self.ax.set_xlim(-0.1, 1.1) 70 self.ax.set_ylim(-0.01, 1.01) 71 # remove guff 72 self.ax.axis('off') 73 # title 74 self.ax.set_title(title, fontsize=title_font_size) 75 76 def add_arrow(self, index, between, kind, label='', head_length=0.075, 77 head_aspect=2, font_size=7, color='k'): 78 ''' 79 Add an arrow to the WMEL diagram. 80 81 Parameters 82 ---------- 83 index : integer 84 The interaction, or start and stop interaction for the arrow. 85 between : 2-element iterable of integers 86 The inital and final state of the arrow 87 kind : {'ket', 'bra'} 88 The kind of interaction. 89 label : string (optional) 90 Interaction label. Default is empty string. 91 head_length: number (optional) 92 size of arrow head 93 font_size : number (optional) 94 Label font size. Default is 14. 95 color : matplotlib color (optional) 96 Arrow color. Default is black. 97 98 Returns 99 ------- 100 [line,arrow_head,text] 101 ''' 102 if hasattr(index, 'index'): 103 x_pos = list(index) 104 else: 105 x_pos = [index] * 2 106 x_pos = [np.linspace(0, 1, self.interactions)[i] for i in x_pos] 107 108 # calculate arrow length 109 arrow_length = self.energies[between[1]] - self.energies[between[0]] 110 arrow_end = self.energies[between[1]] 111 if arrow_length > 0: 112 direction = 1 113 y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length] 114 elif arrow_length < 0: 115 direction = -1 116 y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length] 117 else: 118 raise ValueError('between invalid!') 119 120 length = abs(y_poss[0] - y_poss[1]) 121 if kind == 'ket': 122 line = self.ax.plot(x_pos, y_poss, linestyle='-', color=color, 123 linewidth=2, zorder=9) 124 elif kind == 'bra': 125 line = self.ax.plot(x_pos, y_poss, linestyle='--', color=color, 126 linewidth=2, zorder=9) 127 elif kind == 'out': 128 yi = np.linspace(y_poss[0], y_poss[1], 100) 129 xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20) 130 * 2 * np.pi * length) / 40 + x_pos[0] 131 line = self.ax.plot(xi, yi, linestyle='-', color=color, 132 linewidth=2, solid_capstyle='butt', zorder=9) 133 else: 134 raise ValueError('kind is not \'ket\', \'out\', or \'bra\'.') 135 # add arrow head 136 arrow_head = self.ax.arrow(x_pos[1], arrow_end - head_length * direction, 137 0, 0.0001 * direction, 138 head_width=head_length * head_aspect, 139 head_length=head_length, 140 fc=color, ec=color, linestyle='solid', 141 linewidth=0, zorder=10) 142 # add text 143 text = self.ax.text(np.mean(x_pos), -0.1, label, fontsize=font_size, 144 horizontalalignment='center') 145 return line, arrow_head, text 146 147 148 ### artist #################################################################### 149 150 151 class Artist: 152 153 def __init__(self, size, energies, state_names=None, 154 number_of_interactions=4, virtual=[None], 155 state_font_size=8, state_text_buffer=0.5): 156 ''' 157 virtual a list of indicies 158 ''' 159 # create figure 160 figsize = [int(size[0] * ((number_of_interactions + 1.) / 6.)), size[1] * 2.5] 161 fig, (subplots) = plt.subplots(size[1], size[0], figsize=figsize) 162 self.fig = fig 163 # wrap subplots if need be 164 if size == [1, 1]: 165 self.subplots = np.array([[subplots]]) 166 plt.subplots_adjust(left=0.3) 167 elif size[1] == 1: 168 self.subplots = np.array([subplots]) 169 else: 170 self.subplots = subplots 171 # add energy levels 172 self.energies = energies 173 for plot in self.subplots.flatten(): 174 for i in range(len(self.energies)): 175 if i in virtual: 176 linestyle = '--' 177 else: 178 linestyle = '-' 179 plot.axhline(energies[i], color='k', linewidth=2, linestyle=linestyle) 180 # add state names to leftmost plots 181 if state_names: 182 for i in range(size[1]): 183 plot = self.subplots[i][0] 184 for i in range(len(self.energies)): 185 plot.text(-state_text_buffer, energies[i], state_names[i], fontsize=state_font_size, 186 verticalalignment='center', horizontalalignment='center') 187 # calculate interaction_positons 188 self.x_pos = np.linspace(0, 1, number_of_interactions) 189 # plot cleans up a bunch - call it now as well as later 190 self.plot() 191 192 def label_rows(self, labels, font_size=15, text_buffer=1.5): 193 for i in range(len(self.subplots)): 194 plot = self.subplots[i][-1] 195 plot.text(text_buffer, 0.5, labels[i], fontsize=font_size, 196 verticalalignment='center', horizontalalignment='center') 197 198 def label_columns(self, labels, font_size=15, text_buffer=1.15): 199 for i in range(len(labels)): 200 plot = self.subplots[0][i] 201 plot.text(0.5, text_buffer, labels[i], fontsize=font_size, 202 verticalalignment='center', horizontalalignment='center') 203 204 def clear_diagram(self, diagram): 205 plot = self.subplots[diagram[1]][diagram[0]] 206 plot.cla() 207 208 def add_arrow(self, diagram, number, between, kind, label='', 209 head_length=0.075, font_size=7, color='k'): 210 ''' 211 kind one in [ket, bra, out] \n 212 returns [line, arrow_head, text] 213 ''' 214 column, row = diagram 215 x_pos = self.x_pos[number] 216 # calculate arrow length 217 arrow_length = self.energies[between[1]] - self.energies[between[0]] 218 arrow_end = self.energies[between[1]] 219 if arrow_length > 0: 220 direction = 1 221 y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length] 222 elif arrow_length < 0: 223 direction = -1 224 y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length] 225 else: 226 print('between invalid!') 227 return 228 subplot = self.subplots[row][column] 229 # add line 230 length = abs(y_poss[0] - y_poss[1]) 231 if kind == 'ket': 232 line = subplot.plot([x_pos, x_pos], y_poss, linestyle='-', color=color, linewidth=2) 233 elif kind == 'bra': 234 line = subplot.plot([x_pos, x_pos], y_poss, linestyle='--', color=color, linewidth=2) 235 elif kind == 'out': 236 yi = np.linspace(y_poss[0], y_poss[1], 100) 237 xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20) 238 * 2 * np.pi * length) / 40 + x_pos 239 line = subplot.plot(xi, yi, linestyle='-', color=color, 240 linewidth=2, solid_capstyle='butt') 241 # add arrow head 242 arrow_head = subplot.arrow(self.x_pos[number], arrow_end - head_length * direction, 243 0, 0.0001 * direction, 244 head_width=head_length * 2, 245 head_length=head_length, 246 fc=color, ec=color, linestyle='solid', linewidth=0) 247 # add text 248 text = subplot.text(self.x_pos[number], -0.1, label, 249 fontsize=font_size, horizontalalignment='center') 250 return line, arrow_head, text 251 252 def plot(self, save_path=None, close=False, bbox_inches='tight', pad_inches=1): 253 # final manipulations 254 for plot in self.subplots.flatten(): 255 # set limits 256 plot.set_xlim(-0.1, 1.1) 257 plot.set_ylim(-0.1, 1.1) 258 # remove guff 259 plot.axis('off') 260 # save 261 if save_path: 262 plt.savefig(save_path, transparent=True, dpi=300, 263 bbox_inches=bbox_inches, pad_inches=pad_inches) 264 # close 265 if close: 266 plt.close() 267 268 269 ### testing ################################################################### 270 271 272 if __name__ == '__main__': 273 # testing code 274 275 plt.close('all') 276 277 diagram = Artist(size=[6, 3], 278 energies=[0., 0.4, 0.6, 1.], 279 state_names=['g', 'a', 'b', 'a+b']) 280 281 diagram.label_rows([r'$\mathrm{\alpha}$', r'$\mathrm{\beta}$', r'$\mathrm{\gamma}$']) 282 diagram.label_columns(['I', 'II', 'III', 'IV', 'V', 'VI']) 283 284 # pw1 alpha 285 diagram.add_arrow([0, 0], 0, [0, 2], 'ket', '1') 286 diagram.add_arrow([0, 0], 1, [0, 1], 'bra', '-2') 287 diagram.add_arrow([0, 0], 2, [1, 0], 'bra', '2\'') 288 diagram.add_arrow([0, 0], 3, [3, 0], 'out') 289 290 diagram.add_arrow([1, 0], 3, [2, 0], 'out') 291 292 diagram.clear_diagram([2, 1]) 293 294 diagram.plot('WMEL_out.png') 295 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/WrightTools/diagrams/WMEL.py b/WrightTools/diagrams/WMEL.py --- a/WrightTools/diagrams/WMEL.py +++ b/WrightTools/diagrams/WMEL.py @@ -15,9 +15,8 @@ def __init__(self, ax, energies, number_of_interactions=4, title='', title_font_size=16, state_names=None, virtual=[None], - state_font_size=8, state_text_buffer=0.5, label_side='left'): - ''' - Subplot. + state_font_size=14, state_text_buffer=0.5, label_side='left'): + """ Subplot. Parameters ---------- @@ -37,7 +36,7 @@ font size for the state lables state_text_buffer: numtype (optional) space between the energy level bars and the state labels - ''' + """ self.ax = ax self.energies = energies self.interactions = number_of_interactions @@ -73,10 +72,9 @@ # title self.ax.set_title(title, fontsize=title_font_size) - def add_arrow(self, index, between, kind, label='', head_length=0.075, - head_aspect=2, font_size=7, color='k'): - ''' - Add an arrow to the WMEL diagram. + def add_arrow(self, index, between, kind, label='', head_length=0.1, + head_aspect=2, font_size=14, color='k'): + """ Add an arrow to the WMEL diagram. Parameters ---------- @@ -98,7 +96,7 @@ Returns ------- [line,arrow_head,text] - ''' + """ if hasattr(index, 'index'): x_pos = list(index) else: @@ -140,7 +138,7 @@ fc=color, ec=color, linestyle='solid', linewidth=0, zorder=10) # add text - text = self.ax.text(np.mean(x_pos), -0.1, label, fontsize=font_size, + text = self.ax.text(np.mean(x_pos), -0.15, label, fontsize=font_size, horizontalalignment='center') return line, arrow_head, text @@ -153,9 +151,7 @@ def __init__(self, size, energies, state_names=None, number_of_interactions=4, virtual=[None], state_font_size=8, state_text_buffer=0.5): - ''' - virtual a list of indicies - ''' + """ virtual a list of indicies """ # create figure figsize = [int(size[0] * ((number_of_interactions + 1.) / 6.)), size[1] * 2.5] fig, (subplots) = plt.subplots(size[1], size[0], figsize=figsize) @@ -207,10 +203,10 @@ def add_arrow(self, diagram, number, between, kind, label='', head_length=0.075, font_size=7, color='k'): - ''' + """ kind one in [ket, bra, out] \n returns [line, arrow_head, text] - ''' + """ column, row = diagram x_pos = self.x_pos[number] # calculate arrow length
{"golden_diff": "diff --git a/WrightTools/diagrams/WMEL.py b/WrightTools/diagrams/WMEL.py\n--- a/WrightTools/diagrams/WMEL.py\n+++ b/WrightTools/diagrams/WMEL.py\n@@ -15,9 +15,8 @@\n \n def __init__(self, ax, energies, number_of_interactions=4, title='',\n title_font_size=16, state_names=None, virtual=[None],\n- state_font_size=8, state_text_buffer=0.5, label_side='left'):\n- '''\n- Subplot.\n+ state_font_size=14, state_text_buffer=0.5, label_side='left'):\n+ \"\"\" Subplot.\n \n Parameters\n ----------\n@@ -37,7 +36,7 @@\n font size for the state lables\n state_text_buffer: numtype (optional)\n space between the energy level bars and the state labels\n- '''\n+ \"\"\"\n self.ax = ax\n self.energies = energies\n self.interactions = number_of_interactions\n@@ -73,10 +72,9 @@\n # title\n self.ax.set_title(title, fontsize=title_font_size)\n \n- def add_arrow(self, index, between, kind, label='', head_length=0.075,\n- head_aspect=2, font_size=7, color='k'):\n- '''\n- Add an arrow to the WMEL diagram.\n+ def add_arrow(self, index, between, kind, label='', head_length=0.1,\n+ head_aspect=2, font_size=14, color='k'):\n+ \"\"\" Add an arrow to the WMEL diagram.\n \n Parameters\n ----------\n@@ -98,7 +96,7 @@\n Returns\n -------\n [line,arrow_head,text]\n- '''\n+ \"\"\"\n if hasattr(index, 'index'):\n x_pos = list(index)\n else:\n@@ -140,7 +138,7 @@\n fc=color, ec=color, linestyle='solid',\n linewidth=0, zorder=10)\n # add text\n- text = self.ax.text(np.mean(x_pos), -0.1, label, fontsize=font_size,\n+ text = self.ax.text(np.mean(x_pos), -0.15, label, fontsize=font_size,\n horizontalalignment='center')\n return line, arrow_head, text\n \n@@ -153,9 +151,7 @@\n def __init__(self, size, energies, state_names=None,\n number_of_interactions=4, virtual=[None],\n state_font_size=8, state_text_buffer=0.5):\n- '''\n- virtual a list of indicies\n- '''\n+ \"\"\" virtual a list of indicies \"\"\"\n # create figure\n figsize = [int(size[0] * ((number_of_interactions + 1.) / 6.)), size[1] * 2.5]\n fig, (subplots) = plt.subplots(size[1], size[0], figsize=figsize)\n@@ -207,10 +203,10 @@\n \n def add_arrow(self, diagram, number, between, kind, label='',\n head_length=0.075, font_size=7, color='k'):\n- '''\n+ \"\"\"\n kind one in [ket, bra, out] \\n\n returns [line, arrow_head, text]\n- '''\n+ \"\"\"\n column, row = diagram\n x_pos = self.x_pos[number]\n # calculate arrow length\n", "issue": "better WMEL defaults\n\n", "before_files": [{"content": "### import ####################################################################\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n### define ####################################################################\n\n\n### subplot ###################################################################\n\n\nclass Subplot:\n\n def __init__(self, ax, energies, number_of_interactions=4, title='',\n title_font_size=16, state_names=None, virtual=[None],\n state_font_size=8, state_text_buffer=0.5, label_side='left'):\n '''\n Subplot.\n\n Parameters\n ----------\n ax : matplotlib axis\n The axis.\n energies : 1D array-like\n Energies (scaled between 0 and 1)\n number_of_interactions : integer\n Number of interactions in diagram.\n title : string (optional)\n Title of subplot. Default is empty string.\n state_names: list of str (optional)\n list of the names of the states\n virtual: list of ints (optional)\n list of indexes of any vitual energy states\n state_font_size: numtype (optional)\n font size for the state lables\n state_text_buffer: numtype (optional)\n space between the energy level bars and the state labels\n '''\n self.ax = ax\n self.energies = energies\n self.interactions = number_of_interactions\n self.state_names = state_names\n\n # Plot Energy Levels\n for i in range(len(self.energies)):\n if i in virtual:\n linestyle = '--'\n else:\n linestyle = '-'\n self.ax.axhline(self.energies[i], color='k', linewidth=2,\n ls=linestyle, zorder=5)\n\n # add state names\n if isinstance(state_names, list):\n for i in range(len(self.energies)):\n if label_side == 'left':\n ax.text(-state_text_buffer, energies[i], state_names[i],\n fontsize=state_font_size, verticalalignment='center',\n horizontalalignment='center')\n elif label_side == 'right':\n ax.text(1 + state_text_buffer, energies[i], state_names[i],\n fontsize=state_font_size, verticalalignment='center',\n horizontalalignment='center')\n # calculate interaction_positons\n self.x_pos = np.linspace(0, 1, number_of_interactions)\n # set limits\n self.ax.set_xlim(-0.1, 1.1)\n self.ax.set_ylim(-0.01, 1.01)\n # remove guff\n self.ax.axis('off')\n # title\n self.ax.set_title(title, fontsize=title_font_size)\n\n def add_arrow(self, index, between, kind, label='', head_length=0.075,\n head_aspect=2, font_size=7, color='k'):\n '''\n Add an arrow to the WMEL diagram.\n\n Parameters\n ----------\n index : integer\n The interaction, or start and stop interaction for the arrow.\n between : 2-element iterable of integers\n The inital and final state of the arrow\n kind : {'ket', 'bra'}\n The kind of interaction.\n label : string (optional)\n Interaction label. Default is empty string.\n head_length: number (optional)\n size of arrow head\n font_size : number (optional)\n Label font size. Default is 14.\n color : matplotlib color (optional)\n Arrow color. Default is black.\n\n Returns\n -------\n [line,arrow_head,text]\n '''\n if hasattr(index, 'index'):\n x_pos = list(index)\n else:\n x_pos = [index] * 2\n x_pos = [np.linspace(0, 1, self.interactions)[i] for i in x_pos]\n\n # calculate arrow length\n arrow_length = self.energies[between[1]] - self.energies[between[0]]\n arrow_end = self.energies[between[1]]\n if arrow_length > 0:\n direction = 1\n y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length]\n elif arrow_length < 0:\n direction = -1\n y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length]\n else:\n raise ValueError('between invalid!')\n\n length = abs(y_poss[0] - y_poss[1])\n if kind == 'ket':\n line = self.ax.plot(x_pos, y_poss, linestyle='-', color=color,\n linewidth=2, zorder=9)\n elif kind == 'bra':\n line = self.ax.plot(x_pos, y_poss, linestyle='--', color=color,\n linewidth=2, zorder=9)\n elif kind == 'out':\n yi = np.linspace(y_poss[0], y_poss[1], 100)\n xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20)\n * 2 * np.pi * length) / 40 + x_pos[0]\n line = self.ax.plot(xi, yi, linestyle='-', color=color,\n linewidth=2, solid_capstyle='butt', zorder=9)\n else:\n raise ValueError('kind is not \\'ket\\', \\'out\\', or \\'bra\\'.')\n # add arrow head\n arrow_head = self.ax.arrow(x_pos[1], arrow_end - head_length * direction,\n 0, 0.0001 * direction,\n head_width=head_length * head_aspect,\n head_length=head_length,\n fc=color, ec=color, linestyle='solid',\n linewidth=0, zorder=10)\n # add text\n text = self.ax.text(np.mean(x_pos), -0.1, label, fontsize=font_size,\n horizontalalignment='center')\n return line, arrow_head, text\n\n\n### artist ####################################################################\n\n\nclass Artist:\n\n def __init__(self, size, energies, state_names=None,\n number_of_interactions=4, virtual=[None],\n state_font_size=8, state_text_buffer=0.5):\n '''\n virtual a list of indicies\n '''\n # create figure\n figsize = [int(size[0] * ((number_of_interactions + 1.) / 6.)), size[1] * 2.5]\n fig, (subplots) = plt.subplots(size[1], size[0], figsize=figsize)\n self.fig = fig\n # wrap subplots if need be\n if size == [1, 1]:\n self.subplots = np.array([[subplots]])\n plt.subplots_adjust(left=0.3)\n elif size[1] == 1:\n self.subplots = np.array([subplots])\n else:\n self.subplots = subplots\n # add energy levels\n self.energies = energies\n for plot in self.subplots.flatten():\n for i in range(len(self.energies)):\n if i in virtual:\n linestyle = '--'\n else:\n linestyle = '-'\n plot.axhline(energies[i], color='k', linewidth=2, linestyle=linestyle)\n # add state names to leftmost plots\n if state_names:\n for i in range(size[1]):\n plot = self.subplots[i][0]\n for i in range(len(self.energies)):\n plot.text(-state_text_buffer, energies[i], state_names[i], fontsize=state_font_size,\n verticalalignment='center', horizontalalignment='center')\n # calculate interaction_positons\n self.x_pos = np.linspace(0, 1, number_of_interactions)\n # plot cleans up a bunch - call it now as well as later\n self.plot()\n\n def label_rows(self, labels, font_size=15, text_buffer=1.5):\n for i in range(len(self.subplots)):\n plot = self.subplots[i][-1]\n plot.text(text_buffer, 0.5, labels[i], fontsize=font_size,\n verticalalignment='center', horizontalalignment='center')\n\n def label_columns(self, labels, font_size=15, text_buffer=1.15):\n for i in range(len(labels)):\n plot = self.subplots[0][i]\n plot.text(0.5, text_buffer, labels[i], fontsize=font_size,\n verticalalignment='center', horizontalalignment='center')\n\n def clear_diagram(self, diagram):\n plot = self.subplots[diagram[1]][diagram[0]]\n plot.cla()\n\n def add_arrow(self, diagram, number, between, kind, label='',\n head_length=0.075, font_size=7, color='k'):\n '''\n kind one in [ket, bra, out] \\n\n returns [line, arrow_head, text]\n '''\n column, row = diagram\n x_pos = self.x_pos[number]\n # calculate arrow length\n arrow_length = self.energies[between[1]] - self.energies[between[0]]\n arrow_end = self.energies[between[1]]\n if arrow_length > 0:\n direction = 1\n y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length]\n elif arrow_length < 0:\n direction = -1\n y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length]\n else:\n print('between invalid!')\n return\n subplot = self.subplots[row][column]\n # add line\n length = abs(y_poss[0] - y_poss[1])\n if kind == 'ket':\n line = subplot.plot([x_pos, x_pos], y_poss, linestyle='-', color=color, linewidth=2)\n elif kind == 'bra':\n line = subplot.plot([x_pos, x_pos], y_poss, linestyle='--', color=color, linewidth=2)\n elif kind == 'out':\n yi = np.linspace(y_poss[0], y_poss[1], 100)\n xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20)\n * 2 * np.pi * length) / 40 + x_pos\n line = subplot.plot(xi, yi, linestyle='-', color=color,\n linewidth=2, solid_capstyle='butt')\n # add arrow head\n arrow_head = subplot.arrow(self.x_pos[number], arrow_end - head_length * direction,\n 0, 0.0001 * direction,\n head_width=head_length * 2,\n head_length=head_length,\n fc=color, ec=color, linestyle='solid', linewidth=0)\n # add text\n text = subplot.text(self.x_pos[number], -0.1, label,\n fontsize=font_size, horizontalalignment='center')\n return line, arrow_head, text\n\n def plot(self, save_path=None, close=False, bbox_inches='tight', pad_inches=1):\n # final manipulations\n for plot in self.subplots.flatten():\n # set limits\n plot.set_xlim(-0.1, 1.1)\n plot.set_ylim(-0.1, 1.1)\n # remove guff\n plot.axis('off')\n # save\n if save_path:\n plt.savefig(save_path, transparent=True, dpi=300,\n bbox_inches=bbox_inches, pad_inches=pad_inches)\n # close\n if close:\n plt.close()\n\n\n### testing ###################################################################\n\n\nif __name__ == '__main__':\n # testing code\n\n plt.close('all')\n\n diagram = Artist(size=[6, 3],\n energies=[0., 0.4, 0.6, 1.],\n state_names=['g', 'a', 'b', 'a+b'])\n\n diagram.label_rows([r'$\\mathrm{\\alpha}$', r'$\\mathrm{\\beta}$', r'$\\mathrm{\\gamma}$'])\n diagram.label_columns(['I', 'II', 'III', 'IV', 'V', 'VI'])\n\n # pw1 alpha\n diagram.add_arrow([0, 0], 0, [0, 2], 'ket', '1')\n diagram.add_arrow([0, 0], 1, [0, 1], 'bra', '-2')\n diagram.add_arrow([0, 0], 2, [1, 0], 'bra', '2\\'')\n diagram.add_arrow([0, 0], 3, [3, 0], 'out')\n\n diagram.add_arrow([1, 0], 3, [2, 0], 'out')\n\n diagram.clear_diagram([2, 1])\n\n diagram.plot('WMEL_out.png')\n", "path": "WrightTools/diagrams/WMEL.py"}], "after_files": [{"content": "### import ####################################################################\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n### define ####################################################################\n\n\n### subplot ###################################################################\n\n\nclass Subplot:\n\n def __init__(self, ax, energies, number_of_interactions=4, title='',\n title_font_size=16, state_names=None, virtual=[None],\n state_font_size=14, state_text_buffer=0.5, label_side='left'):\n \"\"\" Subplot.\n\n Parameters\n ----------\n ax : matplotlib axis\n The axis.\n energies : 1D array-like\n Energies (scaled between 0 and 1)\n number_of_interactions : integer\n Number of interactions in diagram.\n title : string (optional)\n Title of subplot. Default is empty string.\n state_names: list of str (optional)\n list of the names of the states\n virtual: list of ints (optional)\n list of indexes of any vitual energy states\n state_font_size: numtype (optional)\n font size for the state lables\n state_text_buffer: numtype (optional)\n space between the energy level bars and the state labels\n \"\"\"\n self.ax = ax\n self.energies = energies\n self.interactions = number_of_interactions\n self.state_names = state_names\n\n # Plot Energy Levels\n for i in range(len(self.energies)):\n if i in virtual:\n linestyle = '--'\n else:\n linestyle = '-'\n self.ax.axhline(self.energies[i], color='k', linewidth=2,\n ls=linestyle, zorder=5)\n\n # add state names\n if isinstance(state_names, list):\n for i in range(len(self.energies)):\n if label_side == 'left':\n ax.text(-state_text_buffer, energies[i], state_names[i],\n fontsize=state_font_size, verticalalignment='center',\n horizontalalignment='center')\n elif label_side == 'right':\n ax.text(1 + state_text_buffer, energies[i], state_names[i],\n fontsize=state_font_size, verticalalignment='center',\n horizontalalignment='center')\n # calculate interaction_positons\n self.x_pos = np.linspace(0, 1, number_of_interactions)\n # set limits\n self.ax.set_xlim(-0.1, 1.1)\n self.ax.set_ylim(-0.01, 1.01)\n # remove guff\n self.ax.axis('off')\n # title\n self.ax.set_title(title, fontsize=title_font_size)\n\n def add_arrow(self, index, between, kind, label='', head_length=0.1,\n head_aspect=2, font_size=14, color='k'):\n \"\"\" Add an arrow to the WMEL diagram.\n\n Parameters\n ----------\n index : integer\n The interaction, or start and stop interaction for the arrow.\n between : 2-element iterable of integers\n The inital and final state of the arrow\n kind : {'ket', 'bra'}\n The kind of interaction.\n label : string (optional)\n Interaction label. Default is empty string.\n head_length: number (optional)\n size of arrow head\n font_size : number (optional)\n Label font size. Default is 14.\n color : matplotlib color (optional)\n Arrow color. Default is black.\n\n Returns\n -------\n [line,arrow_head,text]\n \"\"\"\n if hasattr(index, 'index'):\n x_pos = list(index)\n else:\n x_pos = [index] * 2\n x_pos = [np.linspace(0, 1, self.interactions)[i] for i in x_pos]\n\n # calculate arrow length\n arrow_length = self.energies[between[1]] - self.energies[between[0]]\n arrow_end = self.energies[between[1]]\n if arrow_length > 0:\n direction = 1\n y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length]\n elif arrow_length < 0:\n direction = -1\n y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length]\n else:\n raise ValueError('between invalid!')\n\n length = abs(y_poss[0] - y_poss[1])\n if kind == 'ket':\n line = self.ax.plot(x_pos, y_poss, linestyle='-', color=color,\n linewidth=2, zorder=9)\n elif kind == 'bra':\n line = self.ax.plot(x_pos, y_poss, linestyle='--', color=color,\n linewidth=2, zorder=9)\n elif kind == 'out':\n yi = np.linspace(y_poss[0], y_poss[1], 100)\n xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20)\n * 2 * np.pi * length) / 40 + x_pos[0]\n line = self.ax.plot(xi, yi, linestyle='-', color=color,\n linewidth=2, solid_capstyle='butt', zorder=9)\n else:\n raise ValueError('kind is not \\'ket\\', \\'out\\', or \\'bra\\'.')\n # add arrow head\n arrow_head = self.ax.arrow(x_pos[1], arrow_end - head_length * direction,\n 0, 0.0001 * direction,\n head_width=head_length * head_aspect,\n head_length=head_length,\n fc=color, ec=color, linestyle='solid',\n linewidth=0, zorder=10)\n # add text\n text = self.ax.text(np.mean(x_pos), -0.15, label, fontsize=font_size,\n horizontalalignment='center')\n return line, arrow_head, text\n\n\n### artist ####################################################################\n\n\nclass Artist:\n\n def __init__(self, size, energies, state_names=None,\n number_of_interactions=4, virtual=[None],\n state_font_size=8, state_text_buffer=0.5):\n \"\"\" virtual a list of indicies \"\"\"\n # create figure\n figsize = [int(size[0] * ((number_of_interactions + 1.) / 6.)), size[1] * 2.5]\n fig, (subplots) = plt.subplots(size[1], size[0], figsize=figsize)\n self.fig = fig\n # wrap subplots if need be\n if size == [1, 1]:\n self.subplots = np.array([[subplots]])\n plt.subplots_adjust(left=0.3)\n elif size[1] == 1:\n self.subplots = np.array([subplots])\n else:\n self.subplots = subplots\n # add energy levels\n self.energies = energies\n for plot in self.subplots.flatten():\n for i in range(len(self.energies)):\n if i in virtual:\n linestyle = '--'\n else:\n linestyle = '-'\n plot.axhline(energies[i], color='k', linewidth=2, linestyle=linestyle)\n # add state names to leftmost plots\n if state_names:\n for i in range(size[1]):\n plot = self.subplots[i][0]\n for i in range(len(self.energies)):\n plot.text(-state_text_buffer, energies[i], state_names[i], fontsize=state_font_size,\n verticalalignment='center', horizontalalignment='center')\n # calculate interaction_positons\n self.x_pos = np.linspace(0, 1, number_of_interactions)\n # plot cleans up a bunch - call it now as well as later\n self.plot()\n\n def label_rows(self, labels, font_size=15, text_buffer=1.5):\n for i in range(len(self.subplots)):\n plot = self.subplots[i][-1]\n plot.text(text_buffer, 0.5, labels[i], fontsize=font_size,\n verticalalignment='center', horizontalalignment='center')\n\n def label_columns(self, labels, font_size=15, text_buffer=1.15):\n for i in range(len(labels)):\n plot = self.subplots[0][i]\n plot.text(0.5, text_buffer, labels[i], fontsize=font_size,\n verticalalignment='center', horizontalalignment='center')\n\n def clear_diagram(self, diagram):\n plot = self.subplots[diagram[1]][diagram[0]]\n plot.cla()\n\n def add_arrow(self, diagram, number, between, kind, label='',\n head_length=0.075, font_size=7, color='k'):\n \"\"\"\n kind one in [ket, bra, out] \\n\n returns [line, arrow_head, text]\n \"\"\"\n column, row = diagram\n x_pos = self.x_pos[number]\n # calculate arrow length\n arrow_length = self.energies[between[1]] - self.energies[between[0]]\n arrow_end = self.energies[between[1]]\n if arrow_length > 0:\n direction = 1\n y_poss = [self.energies[between[0]], self.energies[between[1]] - head_length]\n elif arrow_length < 0:\n direction = -1\n y_poss = [self.energies[between[0]], self.energies[between[1]] + head_length]\n else:\n print('between invalid!')\n return\n subplot = self.subplots[row][column]\n # add line\n length = abs(y_poss[0] - y_poss[1])\n if kind == 'ket':\n line = subplot.plot([x_pos, x_pos], y_poss, linestyle='-', color=color, linewidth=2)\n elif kind == 'bra':\n line = subplot.plot([x_pos, x_pos], y_poss, linestyle='--', color=color, linewidth=2)\n elif kind == 'out':\n yi = np.linspace(y_poss[0], y_poss[1], 100)\n xi = np.sin((yi - y_poss[0]) * int((1 / length) * 20)\n * 2 * np.pi * length) / 40 + x_pos\n line = subplot.plot(xi, yi, linestyle='-', color=color,\n linewidth=2, solid_capstyle='butt')\n # add arrow head\n arrow_head = subplot.arrow(self.x_pos[number], arrow_end - head_length * direction,\n 0, 0.0001 * direction,\n head_width=head_length * 2,\n head_length=head_length,\n fc=color, ec=color, linestyle='solid', linewidth=0)\n # add text\n text = subplot.text(self.x_pos[number], -0.1, label,\n fontsize=font_size, horizontalalignment='center')\n return line, arrow_head, text\n\n def plot(self, save_path=None, close=False, bbox_inches='tight', pad_inches=1):\n # final manipulations\n for plot in self.subplots.flatten():\n # set limits\n plot.set_xlim(-0.1, 1.1)\n plot.set_ylim(-0.1, 1.1)\n # remove guff\n plot.axis('off')\n # save\n if save_path:\n plt.savefig(save_path, transparent=True, dpi=300,\n bbox_inches=bbox_inches, pad_inches=pad_inches)\n # close\n if close:\n plt.close()\n\n\n### testing ###################################################################\n\n\nif __name__ == '__main__':\n # testing code\n\n plt.close('all')\n\n diagram = Artist(size=[6, 3],\n energies=[0., 0.4, 0.6, 1.],\n state_names=['g', 'a', 'b', 'a+b'])\n\n diagram.label_rows([r'$\\mathrm{\\alpha}$', r'$\\mathrm{\\beta}$', r'$\\mathrm{\\gamma}$'])\n diagram.label_columns(['I', 'II', 'III', 'IV', 'V', 'VI'])\n\n # pw1 alpha\n diagram.add_arrow([0, 0], 0, [0, 2], 'ket', '1')\n diagram.add_arrow([0, 0], 1, [0, 1], 'bra', '-2')\n diagram.add_arrow([0, 0], 2, [1, 0], 'bra', '2\\'')\n diagram.add_arrow([0, 0], 3, [3, 0], 'out')\n\n diagram.add_arrow([1, 0], 3, [2, 0], 'out')\n\n diagram.clear_diagram([2, 1])\n\n diagram.plot('WMEL_out.png')\n", "path": "WrightTools/diagrams/WMEL.py"}]}
3,826
785
gh_patches_debug_2822
rasdani/github-patches
git_diff
StackStorm__st2-5092
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove obsolete 'bin/st2-check-license' The project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core. The task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts. This is an easy `good first issue` for someone willing to start contributing and exploring the st2 system. Bonus points to find other obsolete, outdated, irrelevant scripts in st2 core. Help wanted! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `st2common/setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2020 The StackStorm Authors. 3 # Copyright 2019 Extreme Networks, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from __future__ import absolute_import 18 import os.path 19 20 from setuptools import setup, find_packages 21 22 from dist_utils import fetch_requirements 23 from dist_utils import apply_vagrant_workaround 24 from dist_utils import get_version_string 25 26 ST2_COMPONENT = 'st2common' 27 BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 28 REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt') 29 INIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py') 30 31 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE) 32 33 apply_vagrant_workaround() 34 setup( 35 name=ST2_COMPONENT, 36 version=get_version_string(INIT_FILE), 37 description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT), 38 author='StackStorm', 39 author_email='[email protected]', 40 license='Apache License (2.0)', 41 url='https://stackstorm.com/', 42 install_requires=install_reqs, 43 dependency_links=dep_links, 44 test_suite=ST2_COMPONENT, 45 zip_safe=False, 46 include_package_data=True, 47 packages=find_packages(exclude=['setuptools', 'tests']), 48 scripts=[ 49 'bin/st2-bootstrap-rmq', 50 'bin/st2-cleanup-db', 51 'bin/st2-register-content', 52 'bin/st2-purge-executions', 53 'bin/st2-purge-trigger-instances', 54 'bin/st2-run-pack-tests', 55 'bin/st2ctl', 56 'bin/st2-generate-symmetric-crypto-key', 57 'bin/st2-self-check', 58 'bin/st2-track-result', 59 'bin/st2-validate-pack-config', 60 'bin/st2-check-license', 61 'bin/st2-pack-install', 62 'bin/st2-pack-download', 63 'bin/st2-pack-setup-virtualenv' 64 ], 65 entry_points={ 66 'st2common.metrics.driver': [ 67 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver', 68 'noop = st2common.metrics.drivers.noop_driver:NoopDriver', 69 'echo = st2common.metrics.drivers.echo_driver:EchoDriver' 70 ], 71 'st2common.rbac.backend': [ 72 'noop = st2common.rbac.backends.noop:NoOpRBACBackend' 73 ], 74 } 75 ) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/st2common/setup.py b/st2common/setup.py --- a/st2common/setup.py +++ b/st2common/setup.py @@ -57,7 +57,6 @@ 'bin/st2-self-check', 'bin/st2-track-result', 'bin/st2-validate-pack-config', - 'bin/st2-check-license', 'bin/st2-pack-install', 'bin/st2-pack-download', 'bin/st2-pack-setup-virtualenv'
{"golden_diff": "diff --git a/st2common/setup.py b/st2common/setup.py\n--- a/st2common/setup.py\n+++ b/st2common/setup.py\n@@ -57,7 +57,6 @@\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n- 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n", "issue": "Remove obsolete 'bin/st2-check-license'\nThe project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.\r\n\r\nThe task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.\r\n\r\nThis is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.\r\nBonus points to find other obsolete, outdated, irrelevant scripts in st2 core.\r\nHelp wanted!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}]}
1,168
105
gh_patches_debug_743
rasdani/github-patches
git_diff
HypothesisWorks__hypothesis-3148
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- clarification on `note` https://hypothesis.readthedocs.io/en/latest/details.html#hypothesis.note states `Report this value in the final execution.` From my test, `note` wasn't printed on successful run and was printed on falsified run. Please help me understand this functionality --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hypothesis-python/src/hypothesis/control.py` Content: ``` 1 # This file is part of Hypothesis, which may be found at 2 # https://github.com/HypothesisWorks/hypothesis/ 3 # 4 # Most of this work is copyright (C) 2013-2021 David R. MacIver 5 # ([email protected]), but it contains contributions by others. See 6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 7 # consult the git log if you need to determine who owns an individual 8 # contribution. 9 # 10 # This Source Code Form is subject to the terms of the Mozilla Public License, 11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 12 # obtain one at https://mozilla.org/MPL/2.0/. 13 # 14 # END HEADER 15 16 import math 17 import traceback 18 from typing import NoReturn, Union 19 20 from hypothesis import Verbosity, settings 21 from hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption 22 from hypothesis.internal.conjecture.data import ConjectureData 23 from hypothesis.internal.validation import check_type 24 from hypothesis.reporting import report, verbose_report 25 from hypothesis.utils.dynamicvariables import DynamicVariable 26 27 28 def reject() -> NoReturn: 29 raise UnsatisfiedAssumption() 30 31 32 def assume(condition: object) -> bool: 33 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks 34 the example as bad, rather than failing the test. 35 36 This allows you to specify properties that you *assume* will be 37 true, and let Hypothesis try to avoid similar examples in future. 38 """ 39 if not condition: 40 raise UnsatisfiedAssumption() 41 return True 42 43 44 _current_build_context = DynamicVariable(None) 45 46 47 def currently_in_test_context() -> bool: 48 """Return ``True`` if the calling code is currently running inside an 49 :func:`@given <hypothesis.given>` or :doc:`stateful <stateful>` test, 50 ``False`` otherwise. 51 52 This is useful for third-party integrations and assertion helpers which 53 may be called from traditional or property-based tests, but can only use 54 :func:`~hypothesis.assume` or :func:`~hypothesis.target` in the latter case. 55 """ 56 return _current_build_context.value is not None 57 58 59 def current_build_context(): 60 context = _current_build_context.value 61 if context is None: 62 raise InvalidArgument("No build context registered") 63 return context 64 65 66 class BuildContext: 67 def __init__(self, data, is_final=False, close_on_capture=True): 68 assert isinstance(data, ConjectureData) 69 self.data = data 70 self.tasks = [] 71 self.is_final = is_final 72 self.close_on_capture = close_on_capture 73 self.close_on_del = False 74 75 def __enter__(self): 76 self.assign_variable = _current_build_context.with_value(self) 77 self.assign_variable.__enter__() 78 return self 79 80 def __exit__(self, exc_type, exc_value, tb): 81 self.assign_variable.__exit__(exc_type, exc_value, tb) 82 if self.close() and exc_type is None: 83 raise CleanupFailed() 84 85 def close(self): 86 any_failed = False 87 for task in self.tasks: 88 try: 89 task() 90 except BaseException: 91 any_failed = True 92 report(traceback.format_exc()) 93 return any_failed 94 95 96 def cleanup(teardown): 97 """Register a function to be called when the current test has finished 98 executing. Any exceptions thrown in teardown will be printed but not 99 rethrown. 100 101 Inside a test this isn't very interesting, because you can just use 102 a finally block, but note that you can use this inside map, flatmap, 103 etc. in order to e.g. insist that a value is closed at the end. 104 """ 105 context = _current_build_context.value 106 if context is None: 107 raise InvalidArgument("Cannot register cleanup outside of build context") 108 context.tasks.append(teardown) 109 110 111 def should_note(): 112 context = _current_build_context.value 113 if context is None: 114 raise InvalidArgument("Cannot make notes outside of a test") 115 return context.is_final or settings.default.verbosity >= Verbosity.verbose 116 117 118 def note(value: str) -> None: 119 """Report this value in the final execution.""" 120 if should_note(): 121 report(value) 122 123 124 def event(value: str) -> None: 125 """Record an event that occurred this test. Statistics on number of test 126 runs with each event will be reported at the end if you run Hypothesis in 127 statistics reporting mode. 128 129 Events should be strings or convertible to them. 130 """ 131 context = _current_build_context.value 132 if context is None: 133 raise InvalidArgument("Cannot make record events outside of a test") 134 135 context.data.note_event(value) 136 137 138 def target(observation: Union[int, float], *, label: str = "") -> Union[int, float]: 139 """Calling this function with an ``int`` or ``float`` observation gives it feedback 140 with which to guide our search for inputs that will cause an error, in 141 addition to all the usual heuristics. Observations must always be finite. 142 143 Hypothesis will try to maximize the observed value over several examples; 144 almost any metric will work so long as it makes sense to increase it. 145 For example, ``-abs(error)`` is a metric that increases as ``error`` 146 approaches zero. 147 148 Example metrics: 149 150 - Number of elements in a collection, or tasks in a queue 151 - Mean or maximum runtime of a task (or both, if you use ``label``) 152 - Compression ratio for data (perhaps per-algorithm or per-level) 153 - Number of steps taken by a state machine 154 155 The optional ``label`` argument can be used to distinguish between 156 and therefore separately optimise distinct observations, such as the 157 mean and standard deviation of a dataset. It is an error to call 158 ``target()`` with any label more than once per test case. 159 160 .. note:: 161 **The more examples you run, the better this technique works.** 162 163 As a rule of thumb, the targeting effect is noticeable above 164 :obj:`max_examples=1000 <hypothesis.settings.max_examples>`, 165 and immediately obvious by around ten thousand examples 166 *per label* used by your test. 167 168 :ref:`statistics` include the best score seen for each label, 169 which can help avoid `the threshold problem 170 <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal 171 example shrinks right down to the threshold of failure (:issue:`2180`). 172 """ 173 check_type((int, float), observation, "observation") 174 if not math.isfinite(observation): 175 raise InvalidArgument(f"observation={observation!r} must be a finite float.") 176 check_type(str, label, "label") 177 178 context = _current_build_context.value 179 if context is None: 180 raise InvalidArgument( 181 "Calling target() outside of a test is invalid. " 182 "Consider guarding this call with `if currently_in_test_context(): ...`" 183 ) 184 verbose_report(f"Saw target(observation={observation!r}, label={label!r})") 185 186 if label in context.data.target_observations: 187 raise InvalidArgument( 188 f"Calling target({observation!r}, label={label!r}) would overwrite " 189 f"target({context.data.target_observations[label]!r}, label={label!r})" 190 ) 191 else: 192 context.data.target_observations[label] = observation 193 194 return observation 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py --- a/hypothesis-python/src/hypothesis/control.py +++ b/hypothesis-python/src/hypothesis/control.py @@ -116,7 +116,7 @@ def note(value: str) -> None: - """Report this value in the final execution.""" + """Report this value for the minimal failing example.""" if should_note(): report(value)
{"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -116,7 +116,7 @@\n \n \n def note(value: str) -> None:\n- \"\"\"Report this value in the final execution.\"\"\"\n+ \"\"\"Report this value for the minimal failing example.\"\"\"\n if should_note():\n report(value)\n", "issue": "clarification on `note`\nhttps://hypothesis.readthedocs.io/en/latest/details.html#hypothesis.note states\r\n\r\n`Report this value in the final execution.`\r\n\r\nFrom my test, `note` wasn't printed on successful run and was printed on falsified run.\r\n\r\nPlease help me understand this functionality\r\n\r\n\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport math\nimport traceback\nfrom typing import NoReturn, Union\n\nfrom hypothesis import Verbosity, settings\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption\nfrom hypothesis.internal.conjecture.data import ConjectureData\nfrom hypothesis.internal.validation import check_type\nfrom hypothesis.reporting import report, verbose_report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\n\ndef reject() -> NoReturn:\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition: object) -> bool:\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef currently_in_test_context() -> bool:\n \"\"\"Return ``True`` if the calling code is currently running inside an\n :func:`@given <hypothesis.given>` or :doc:`stateful <stateful>` test,\n ``False`` otherwise.\n\n This is useful for third-party integrations and assertion helpers which\n may be called from traditional or property-based tests, but can only use\n :func:`~hypothesis.assume` or :func:`~hypothesis.target` in the latter case.\n \"\"\"\n return _current_build_context.value is not None\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"No build context registered\")\n return context\n\n\nclass BuildContext:\n def __init__(self, data, is_final=False, close_on_capture=True):\n assert isinstance(data, ConjectureData)\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot register cleanup outside of build context\")\n context.tasks.append(teardown)\n\n\ndef should_note():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make notes outside of a test\")\n return context.is_final or settings.default.verbosity >= Verbosity.verbose\n\n\ndef note(value: str) -> None:\n \"\"\"Report this value in the final execution.\"\"\"\n if should_note():\n report(value)\n\n\ndef event(value: str) -> None:\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make record events outside of a test\")\n\n context.data.note_event(value)\n\n\ndef target(observation: Union[int, float], *, label: str = \"\") -> Union[int, float]:\n \"\"\"Calling this function with an ``int`` or ``float`` observation gives it feedback\n with which to guide our search for inputs that will cause an error, in\n addition to all the usual heuristics. Observations must always be finite.\n\n Hypothesis will try to maximize the observed value over several examples;\n almost any metric will work so long as it makes sense to increase it.\n For example, ``-abs(error)`` is a metric that increases as ``error``\n approaches zero.\n\n Example metrics:\n\n - Number of elements in a collection, or tasks in a queue\n - Mean or maximum runtime of a task (or both, if you use ``label``)\n - Compression ratio for data (perhaps per-algorithm or per-level)\n - Number of steps taken by a state machine\n\n The optional ``label`` argument can be used to distinguish between\n and therefore separately optimise distinct observations, such as the\n mean and standard deviation of a dataset. It is an error to call\n ``target()`` with any label more than once per test case.\n\n .. note::\n **The more examples you run, the better this technique works.**\n\n As a rule of thumb, the targeting effect is noticeable above\n :obj:`max_examples=1000 <hypothesis.settings.max_examples>`,\n and immediately obvious by around ten thousand examples\n *per label* used by your test.\n\n :ref:`statistics` include the best score seen for each label,\n which can help avoid `the threshold problem\n <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal\n example shrinks right down to the threshold of failure (:issue:`2180`).\n \"\"\"\n check_type((int, float), observation, \"observation\")\n if not math.isfinite(observation):\n raise InvalidArgument(f\"observation={observation!r} must be a finite float.\")\n check_type(str, label, \"label\")\n\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n \"Calling target() outside of a test is invalid. \"\n \"Consider guarding this call with `if currently_in_test_context(): ...`\"\n )\n verbose_report(f\"Saw target(observation={observation!r}, label={label!r})\")\n\n if label in context.data.target_observations:\n raise InvalidArgument(\n f\"Calling target({observation!r}, label={label!r}) would overwrite \"\n f\"target({context.data.target_observations[label]!r}, label={label!r})\"\n )\n else:\n context.data.target_observations[label] = observation\n\n return observation\n", "path": "hypothesis-python/src/hypothesis/control.py"}], "after_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport math\nimport traceback\nfrom typing import NoReturn, Union\n\nfrom hypothesis import Verbosity, settings\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption\nfrom hypothesis.internal.conjecture.data import ConjectureData\nfrom hypothesis.internal.validation import check_type\nfrom hypothesis.reporting import report, verbose_report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\n\ndef reject() -> NoReturn:\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition: object) -> bool:\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef currently_in_test_context() -> bool:\n \"\"\"Return ``True`` if the calling code is currently running inside an\n :func:`@given <hypothesis.given>` or :doc:`stateful <stateful>` test,\n ``False`` otherwise.\n\n This is useful for third-party integrations and assertion helpers which\n may be called from traditional or property-based tests, but can only use\n :func:`~hypothesis.assume` or :func:`~hypothesis.target` in the latter case.\n \"\"\"\n return _current_build_context.value is not None\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"No build context registered\")\n return context\n\n\nclass BuildContext:\n def __init__(self, data, is_final=False, close_on_capture=True):\n assert isinstance(data, ConjectureData)\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot register cleanup outside of build context\")\n context.tasks.append(teardown)\n\n\ndef should_note():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make notes outside of a test\")\n return context.is_final or settings.default.verbosity >= Verbosity.verbose\n\n\ndef note(value: str) -> None:\n \"\"\"Report this value for the minimal failing example.\"\"\"\n if should_note():\n report(value)\n\n\ndef event(value: str) -> None:\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make record events outside of a test\")\n\n context.data.note_event(value)\n\n\ndef target(observation: Union[int, float], *, label: str = \"\") -> Union[int, float]:\n \"\"\"Calling this function with an ``int`` or ``float`` observation gives it feedback\n with which to guide our search for inputs that will cause an error, in\n addition to all the usual heuristics. Observations must always be finite.\n\n Hypothesis will try to maximize the observed value over several examples;\n almost any metric will work so long as it makes sense to increase it.\n For example, ``-abs(error)`` is a metric that increases as ``error``\n approaches zero.\n\n Example metrics:\n\n - Number of elements in a collection, or tasks in a queue\n - Mean or maximum runtime of a task (or both, if you use ``label``)\n - Compression ratio for data (perhaps per-algorithm or per-level)\n - Number of steps taken by a state machine\n\n The optional ``label`` argument can be used to distinguish between\n and therefore separately optimise distinct observations, such as the\n mean and standard deviation of a dataset. It is an error to call\n ``target()`` with any label more than once per test case.\n\n .. note::\n **The more examples you run, the better this technique works.**\n\n As a rule of thumb, the targeting effect is noticeable above\n :obj:`max_examples=1000 <hypothesis.settings.max_examples>`,\n and immediately obvious by around ten thousand examples\n *per label* used by your test.\n\n :ref:`statistics` include the best score seen for each label,\n which can help avoid `the threshold problem\n <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal\n example shrinks right down to the threshold of failure (:issue:`2180`).\n \"\"\"\n check_type((int, float), observation, \"observation\")\n if not math.isfinite(observation):\n raise InvalidArgument(f\"observation={observation!r} must be a finite float.\")\n check_type(str, label, \"label\")\n\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n \"Calling target() outside of a test is invalid. \"\n \"Consider guarding this call with `if currently_in_test_context(): ...`\"\n )\n verbose_report(f\"Saw target(observation={observation!r}, label={label!r})\")\n\n if label in context.data.target_observations:\n raise InvalidArgument(\n f\"Calling target({observation!r}, label={label!r}) would overwrite \"\n f\"target({context.data.target_observations[label]!r}, label={label!r})\"\n )\n else:\n context.data.target_observations[label] = observation\n\n return observation\n", "path": "hypothesis-python/src/hypothesis/control.py"}]}
2,455
108
gh_patches_debug_27438
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2389
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't ignore webhooks when a dataset changes visibility See: - https://github.com/huggingface/moon-landing/issues/8779 - https://github.com/huggingface/moon-landing/pull/8825 A webhook is sent when a dataset is toggle between public and private. Currently, we ignore them due to https://github.com/huggingface/datasets-server/blob/66c1e089e204ab33195b957e1b99b0da6a4dd2de/services/api/src/api/routes/webhook.py#L84-L89 Instead, I think we should delete+update the dataset --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `services/api/src/api/routes/webhook.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 4 import logging 5 from typing import Any, Literal, Optional, TypedDict 6 7 from jsonschema import ValidationError, validate 8 from libapi.utils import Endpoint, get_response 9 from libcommon.dtos import Priority 10 from libcommon.exceptions import CustomError 11 from libcommon.operations import delete_dataset, get_current_revision, update_dataset 12 from libcommon.prometheus import StepProfiler 13 from libcommon.storage_client import StorageClient 14 from starlette.requests import Request 15 from starlette.responses import Response 16 17 schema = { 18 "$schema": "https://json-schema.org/draft/2020-12/schema", 19 "type": "object", 20 "properties": { 21 "event": {"type": "string", "enum": ["add", "remove", "update", "move"]}, 22 "movedTo": {"type": "string"}, 23 "repo": { 24 "type": "object", 25 "properties": { 26 "headSha": {"type": "string"}, 27 "name": {"type": "string"}, 28 "type": {"type": "string", "enum": ["dataset", "model", "space"]}, 29 }, 30 "required": ["type", "name"], 31 }, 32 "scope": { 33 "type": "string", 34 }, 35 }, 36 "required": ["event", "repo", "scope"], 37 } 38 39 40 class _MoonWebhookV2PayloadRepo(TypedDict): 41 type: Literal["model", "dataset", "space"] 42 name: str 43 44 45 class MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False): 46 headSha: Optional[str] 47 48 49 class MoonWebhookV2Payload(TypedDict): 50 """ 51 Payload from a moon-landing webhook call, v2. 52 """ 53 54 event: Literal["add", "remove", "update", "move"] 55 movedTo: Optional[str] 56 repo: MoonWebhookV2PayloadRepo 57 scope: str 58 59 60 def parse_payload(json: Any) -> MoonWebhookV2Payload: 61 validate(instance=json, schema=schema) 62 return json # type: ignore 63 # ^ validate() ensures the content is correct, but does not give the type 64 65 66 def process_payload( 67 payload: MoonWebhookV2Payload, 68 blocked_datasets: list[str], 69 hf_endpoint: str, 70 hf_token: Optional[str] = None, 71 hf_timeout_seconds: Optional[float] = None, 72 storage_clients: Optional[list[StorageClient]] = None, 73 ) -> None: 74 if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content"): 75 # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos 76 return 77 dataset = payload["repo"]["name"] 78 if dataset is None: 79 return 80 event = payload["event"] 81 if event == "remove": 82 delete_dataset(dataset=dataset, storage_clients=storage_clients) 83 elif event in ["add", "update", "move"]: 84 if event == "update" and get_current_revision(dataset) == payload["repo"]["headSha"]: 85 # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated 86 logging.warning( 87 f"Webhook revision for {dataset} is the same as the current revision in the db - skipping update." 88 ) 89 return 90 delete_dataset(dataset=dataset, storage_clients=storage_clients) 91 # ^ delete the old contents (cache + jobs + assets) to avoid mixed content 92 new_dataset = (event == "move" and payload["movedTo"]) or dataset 93 update_dataset( 94 dataset=new_dataset, 95 priority=Priority.NORMAL, 96 blocked_datasets=blocked_datasets, 97 hf_endpoint=hf_endpoint, 98 hf_token=hf_token, 99 hf_timeout_seconds=hf_timeout_seconds, 100 storage_clients=storage_clients, 101 ) 102 103 104 def create_webhook_endpoint( 105 blocked_datasets: list[str], 106 hf_endpoint: str, 107 hf_token: Optional[str] = None, 108 hf_timeout_seconds: Optional[float] = None, 109 hf_webhook_secret: Optional[str] = None, 110 storage_clients: Optional[list[StorageClient]] = None, 111 ) -> Endpoint: 112 async def webhook_endpoint(request: Request) -> Response: 113 with StepProfiler(method="webhook_endpoint", step="all"): 114 with StepProfiler(method="webhook_endpoint", step="get JSON"): 115 try: 116 json = await request.json() 117 except Exception: 118 content = {"status": "error", "error": "the body could not be parsed as a JSON"} 119 logging.info("/webhook: the body could not be parsed as a JSON.") 120 return get_response(content, 400) 121 logging.info(f"/webhook: {json}") 122 with StepProfiler(method="webhook_endpoint", step="parse payload and headers"): 123 try: 124 payload = parse_payload(json) 125 except ValidationError as e: 126 content = {"status": "error", "error": "the JSON payload is invalid"} 127 logging.info(f"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}") 128 return get_response(content, 400) 129 except Exception as e: 130 logging.exception("Unexpected error", exc_info=e) 131 content = {"status": "error", "error": "unexpected error"} 132 logging.warning(f"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}") 133 return get_response(content, 500) 134 135 HEADER = "x-webhook-secret" 136 trust_sender = ( 137 hf_webhook_secret is not None 138 and (secret := request.headers.get(HEADER)) is not None 139 and secret == hf_webhook_secret 140 ) 141 if not trust_sender: 142 logging.info(f"/webhook: the sender is not trusted. JSON: {json}") 143 return get_response( 144 {"status": "error", "error": "The sender is not trusted. Retry with a valid secret."}, 400 145 ) 146 147 with StepProfiler(method="webhook_endpoint", step="process payload"): 148 try: 149 process_payload( 150 payload=payload, 151 blocked_datasets=blocked_datasets, 152 hf_endpoint=hf_endpoint, 153 hf_token=hf_token, 154 hf_timeout_seconds=hf_timeout_seconds, 155 storage_clients=storage_clients, 156 ) 157 except CustomError as e: 158 content = {"status": "error", "error": "the dataset is not supported"} 159 dataset = payload["repo"]["name"] 160 logging.debug(f"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}") 161 return get_response(content, 400) 162 content = {"status": "ok"} 163 return get_response(content, 200) 164 165 return webhook_endpoint 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py --- a/services/api/src/api/routes/webhook.py +++ b/services/api/src/api/routes/webhook.py @@ -71,7 +71,7 @@ hf_timeout_seconds: Optional[float] = None, storage_clients: Optional[list[StorageClient]] = None, ) -> None: - if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content"): + if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content", "repo.config"): # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos return dataset = payload["repo"]["name"] @@ -81,7 +81,11 @@ if event == "remove": delete_dataset(dataset=dataset, storage_clients=storage_clients) elif event in ["add", "update", "move"]: - if event == "update" and get_current_revision(dataset) == payload["repo"]["headSha"]: + if ( + event == "update" + and get_current_revision(dataset) == payload["repo"]["headSha"] + and not payload["scope"] == "repo.config" + ): # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated logging.warning( f"Webhook revision for {dataset} is the same as the current revision in the db - skipping update."
{"golden_diff": "diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py\n--- a/services/api/src/api/routes/webhook.py\n+++ b/services/api/src/api/routes/webhook.py\n@@ -71,7 +71,7 @@\n hf_timeout_seconds: Optional[float] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n ) -> None:\n- if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\"):\n+ if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\", \"repo.config\"):\n # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos\n return\n dataset = payload[\"repo\"][\"name\"]\n@@ -81,7 +81,11 @@\n if event == \"remove\":\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n elif event in [\"add\", \"update\", \"move\"]:\n- if event == \"update\" and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]:\n+ if (\n+ event == \"update\"\n+ and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]\n+ and not payload[\"scope\"] == \"repo.config\"\n+ ):\n # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated\n logging.warning(\n f\"Webhook revision for {dataset} is the same as the current revision in the db - skipping update.\"\n", "issue": "Don't ignore webhooks when a dataset changes visibility\nSee:\r\n- https://github.com/huggingface/moon-landing/issues/8779\r\n- https://github.com/huggingface/moon-landing/pull/8825\r\n\r\nA webhook is sent when a dataset is toggle between public and private.\r\n\r\nCurrently, we ignore them due to \r\n\r\nhttps://github.com/huggingface/datasets-server/blob/66c1e089e204ab33195b957e1b99b0da6a4dd2de/services/api/src/api/routes/webhook.py#L84-L89\r\n\r\nInstead, I think we should delete+update the dataset\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Any, Literal, Optional, TypedDict\n\nfrom jsonschema import ValidationError, validate\nfrom libapi.utils import Endpoint, get_response\nfrom libcommon.dtos import Priority\nfrom libcommon.exceptions import CustomError\nfrom libcommon.operations import delete_dataset, get_current_revision, update_dataset\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.storage_client import StorageClient\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nschema = {\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"properties\": {\n \"event\": {\"type\": \"string\", \"enum\": [\"add\", \"remove\", \"update\", \"move\"]},\n \"movedTo\": {\"type\": \"string\"},\n \"repo\": {\n \"type\": \"object\",\n \"properties\": {\n \"headSha\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": [\"dataset\", \"model\", \"space\"]},\n },\n \"required\": [\"type\", \"name\"],\n },\n \"scope\": {\n \"type\": \"string\",\n },\n },\n \"required\": [\"event\", \"repo\", \"scope\"],\n}\n\n\nclass _MoonWebhookV2PayloadRepo(TypedDict):\n type: Literal[\"model\", \"dataset\", \"space\"]\n name: str\n\n\nclass MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):\n headSha: Optional[str]\n\n\nclass MoonWebhookV2Payload(TypedDict):\n \"\"\"\n Payload from a moon-landing webhook call, v2.\n \"\"\"\n\n event: Literal[\"add\", \"remove\", \"update\", \"move\"]\n movedTo: Optional[str]\n repo: MoonWebhookV2PayloadRepo\n scope: str\n\n\ndef parse_payload(json: Any) -> MoonWebhookV2Payload:\n validate(instance=json, schema=schema)\n return json # type: ignore\n # ^ validate() ensures the content is correct, but does not give the type\n\n\ndef process_payload(\n payload: MoonWebhookV2Payload,\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> None:\n if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\"):\n # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos\n return\n dataset = payload[\"repo\"][\"name\"]\n if dataset is None:\n return\n event = payload[\"event\"]\n if event == \"remove\":\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n elif event in [\"add\", \"update\", \"move\"]:\n if event == \"update\" and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]:\n # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated\n logging.warning(\n f\"Webhook revision for {dataset} is the same as the current revision in the db - skipping update.\"\n )\n return\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n # ^ delete the old contents (cache + jobs + assets) to avoid mixed content\n new_dataset = (event == \"move\" and payload[\"movedTo\"]) or dataset\n update_dataset(\n dataset=new_dataset,\n priority=Priority.NORMAL,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n\n\ndef create_webhook_endpoint(\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n hf_webhook_secret: Optional[str] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> Endpoint:\n async def webhook_endpoint(request: Request) -> Response:\n with StepProfiler(method=\"webhook_endpoint\", step=\"all\"):\n with StepProfiler(method=\"webhook_endpoint\", step=\"get JSON\"):\n try:\n json = await request.json()\n except Exception:\n content = {\"status\": \"error\", \"error\": \"the body could not be parsed as a JSON\"}\n logging.info(\"/webhook: the body could not be parsed as a JSON.\")\n return get_response(content, 400)\n logging.info(f\"/webhook: {json}\")\n with StepProfiler(method=\"webhook_endpoint\", step=\"parse payload and headers\"):\n try:\n payload = parse_payload(json)\n except ValidationError as e:\n content = {\"status\": \"error\", \"error\": \"the JSON payload is invalid\"}\n logging.info(f\"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n except Exception as e:\n logging.exception(\"Unexpected error\", exc_info=e)\n content = {\"status\": \"error\", \"error\": \"unexpected error\"}\n logging.warning(f\"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}\")\n return get_response(content, 500)\n\n HEADER = \"x-webhook-secret\"\n trust_sender = (\n hf_webhook_secret is not None\n and (secret := request.headers.get(HEADER)) is not None\n and secret == hf_webhook_secret\n )\n if not trust_sender:\n logging.info(f\"/webhook: the sender is not trusted. JSON: {json}\")\n return get_response(\n {\"status\": \"error\", \"error\": \"The sender is not trusted. Retry with a valid secret.\"}, 400\n )\n\n with StepProfiler(method=\"webhook_endpoint\", step=\"process payload\"):\n try:\n process_payload(\n payload=payload,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n except CustomError as e:\n content = {\"status\": \"error\", \"error\": \"the dataset is not supported\"}\n dataset = payload[\"repo\"][\"name\"]\n logging.debug(f\"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n content = {\"status\": \"ok\"}\n return get_response(content, 200)\n\n return webhook_endpoint\n", "path": "services/api/src/api/routes/webhook.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Any, Literal, Optional, TypedDict\n\nfrom jsonschema import ValidationError, validate\nfrom libapi.utils import Endpoint, get_response\nfrom libcommon.dtos import Priority\nfrom libcommon.exceptions import CustomError\nfrom libcommon.operations import delete_dataset, get_current_revision, update_dataset\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.storage_client import StorageClient\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nschema = {\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"properties\": {\n \"event\": {\"type\": \"string\", \"enum\": [\"add\", \"remove\", \"update\", \"move\"]},\n \"movedTo\": {\"type\": \"string\"},\n \"repo\": {\n \"type\": \"object\",\n \"properties\": {\n \"headSha\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": [\"dataset\", \"model\", \"space\"]},\n },\n \"required\": [\"type\", \"name\"],\n },\n \"scope\": {\n \"type\": \"string\",\n },\n },\n \"required\": [\"event\", \"repo\", \"scope\"],\n}\n\n\nclass _MoonWebhookV2PayloadRepo(TypedDict):\n type: Literal[\"model\", \"dataset\", \"space\"]\n name: str\n\n\nclass MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):\n headSha: Optional[str]\n\n\nclass MoonWebhookV2Payload(TypedDict):\n \"\"\"\n Payload from a moon-landing webhook call, v2.\n \"\"\"\n\n event: Literal[\"add\", \"remove\", \"update\", \"move\"]\n movedTo: Optional[str]\n repo: MoonWebhookV2PayloadRepo\n scope: str\n\n\ndef parse_payload(json: Any) -> MoonWebhookV2Payload:\n validate(instance=json, schema=schema)\n return json # type: ignore\n # ^ validate() ensures the content is correct, but does not give the type\n\n\ndef process_payload(\n payload: MoonWebhookV2Payload,\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> None:\n if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\", \"repo.config\"):\n # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos\n return\n dataset = payload[\"repo\"][\"name\"]\n if dataset is None:\n return\n event = payload[\"event\"]\n if event == \"remove\":\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n elif event in [\"add\", \"update\", \"move\"]:\n if (\n event == \"update\"\n and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]\n and not payload[\"scope\"] == \"repo.config\"\n ):\n # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated\n logging.warning(\n f\"Webhook revision for {dataset} is the same as the current revision in the db - skipping update.\"\n )\n return\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n # ^ delete the old contents (cache + jobs + assets) to avoid mixed content\n new_dataset = (event == \"move\" and payload[\"movedTo\"]) or dataset\n update_dataset(\n dataset=new_dataset,\n priority=Priority.NORMAL,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n\n\ndef create_webhook_endpoint(\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n hf_webhook_secret: Optional[str] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> Endpoint:\n async def webhook_endpoint(request: Request) -> Response:\n with StepProfiler(method=\"webhook_endpoint\", step=\"all\"):\n with StepProfiler(method=\"webhook_endpoint\", step=\"get JSON\"):\n try:\n json = await request.json()\n except Exception:\n content = {\"status\": \"error\", \"error\": \"the body could not be parsed as a JSON\"}\n logging.info(\"/webhook: the body could not be parsed as a JSON.\")\n return get_response(content, 400)\n logging.info(f\"/webhook: {json}\")\n with StepProfiler(method=\"webhook_endpoint\", step=\"parse payload and headers\"):\n try:\n payload = parse_payload(json)\n except ValidationError as e:\n content = {\"status\": \"error\", \"error\": \"the JSON payload is invalid\"}\n logging.info(f\"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n except Exception as e:\n logging.exception(\"Unexpected error\", exc_info=e)\n content = {\"status\": \"error\", \"error\": \"unexpected error\"}\n logging.warning(f\"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}\")\n return get_response(content, 500)\n\n HEADER = \"x-webhook-secret\"\n trust_sender = (\n hf_webhook_secret is not None\n and (secret := request.headers.get(HEADER)) is not None\n and secret == hf_webhook_secret\n )\n if not trust_sender:\n logging.info(f\"/webhook: the sender is not trusted. JSON: {json}\")\n return get_response(\n {\"status\": \"error\", \"error\": \"The sender is not trusted. Retry with a valid secret.\"}, 400\n )\n\n with StepProfiler(method=\"webhook_endpoint\", step=\"process payload\"):\n try:\n process_payload(\n payload=payload,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n except CustomError as e:\n content = {\"status\": \"error\", \"error\": \"the dataset is not supported\"}\n dataset = payload[\"repo\"][\"name\"]\n logging.debug(f\"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n content = {\"status\": \"ok\"}\n return get_response(content, 200)\n\n return webhook_endpoint\n", "path": "services/api/src/api/routes/webhook.py"}]}
2,262
334
gh_patches_debug_33210
rasdani/github-patches
git_diff
zigpy__zha-device-handlers-1462
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] IKEA E1812 shortcut button does not use quirks **Describe the bug** I have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log. The `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work. **To Reproduce** - pair a shortcut button that has the 0xfc7c input cluster - go to the device page and see that no quirks are applied - try to add an automation, see that triggers for button press are missing (Unfortunately I have no clue what caused the two buttons to present different signatures) **Expected behavior** The quirks should still apply **Additional context** signature of the button with the extra cluster (this is the only difference between my buttons) **EDIT:** The WindowCovering output cluster is also missing on the weird button ``` { "node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)", "endpoints": { "1": { "profile_id": 260, "device_type": "0x0820", "in_clusters": [ "0x0000", "0x0001", "0x0003", "0x0009", "0x0020", "0x1000", "0xfc7c" ], "out_clusters": [ "0x0003", "0x0004", "0x0006", "0x0008", "0x0019", "0x1000" ] } }, "manufacturer": "IKEA of Sweden", "model": "TRADFRI SHORTCUT Button", "class": "zigpy.device.Device" } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zhaquirks/ikea/shortcutbtn.py` Content: ``` 1 """Device handler for IKEA of Sweden TRADFRI shortcut button.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomDevice 4 from zigpy.zcl.clusters.closures import WindowCovering 5 from zigpy.zcl.clusters.general import ( 6 Alarms, 7 Basic, 8 Groups, 9 Identify, 10 LevelControl, 11 OnOff, 12 Ota, 13 PollControl, 14 PowerConfiguration, 15 ) 16 from zigpy.zcl.clusters.lightlink import LightLink 17 18 from zhaquirks.const import ( 19 ARGS, 20 CLUSTER_ID, 21 COMMAND, 22 COMMAND_MOVE_ON_OFF, 23 COMMAND_OFF, 24 COMMAND_ON, 25 COMMAND_STOP, 26 DEVICE_TYPE, 27 DIM_UP, 28 DOUBLE_PRESS, 29 ENDPOINT_ID, 30 ENDPOINTS, 31 INPUT_CLUSTERS, 32 LONG_PRESS, 33 LONG_RELEASE, 34 MODELS_INFO, 35 OUTPUT_CLUSTERS, 36 PROFILE_ID, 37 SHORT_PRESS, 38 TURN_ON, 39 ) 40 from zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster 41 42 43 class IkeaTradfriShortcutBtn(CustomDevice): 44 """Custom device representing IKEA of Sweden TRADFRI shortcut button.""" 45 46 signature = { 47 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080 48 # device_version=1 49 # input_clusters=[0, 1, 3, 9, 32, 4096] 50 # output_clusters=[3, 4, 6, 8, 25, 258, 4096]> 51 MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")], 52 ENDPOINTS: { 53 1: { 54 PROFILE_ID: zha.PROFILE_ID, 55 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER, 56 INPUT_CLUSTERS: [ 57 Basic.cluster_id, 58 PowerConfiguration.cluster_id, 59 Identify.cluster_id, 60 Alarms.cluster_id, 61 PollControl.cluster_id, 62 LightLink.cluster_id, 63 ], 64 OUTPUT_CLUSTERS: [ 65 Identify.cluster_id, 66 Groups.cluster_id, 67 OnOff.cluster_id, 68 LevelControl.cluster_id, 69 Ota.cluster_id, 70 WindowCovering.cluster_id, 71 LightLink.cluster_id, 72 ], 73 } 74 }, 75 } 76 77 replacement = { 78 ENDPOINTS: { 79 1: { 80 PROFILE_ID: zha.PROFILE_ID, 81 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER, 82 INPUT_CLUSTERS: [ 83 Basic.cluster_id, 84 PowerConfiguration1CRCluster, 85 Identify.cluster_id, 86 Alarms.cluster_id, 87 PollControl.cluster_id, 88 LightLinkCluster, 89 ], 90 OUTPUT_CLUSTERS: [ 91 Identify.cluster_id, 92 Groups.cluster_id, 93 OnOff.cluster_id, 94 LevelControl.cluster_id, 95 Ota.cluster_id, 96 WindowCovering.cluster_id, 97 LightLink.cluster_id, 98 ], 99 } 100 } 101 } 102 103 device_automation_triggers = { 104 (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1}, 105 (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1}, 106 (LONG_PRESS, DIM_UP): { 107 COMMAND: COMMAND_MOVE_ON_OFF, 108 CLUSTER_ID: 8, 109 ENDPOINT_ID: 1, 110 ARGS: [0, 83], 111 }, 112 (LONG_RELEASE, DIM_UP): { 113 COMMAND: COMMAND_STOP, 114 CLUSTER_ID: 8, 115 ENDPOINT_ID: 1, 116 }, 117 } 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py --- a/zhaquirks/ikea/shortcutbtn.py +++ b/zhaquirks/ikea/shortcutbtn.py @@ -115,3 +115,79 @@ ENDPOINT_ID: 1, }, } + + +class IkeaTradfriShortcutBtn2(CustomDevice): + """Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.""" + + signature = { + # <SimpleDescriptor endpoint=1 profile=260 device_type=2080 + # device_version=1 + # input_clusters=[0, 1, 3, 9, 32, 4096, 64636] + # output_clusters=[3, 4, 6, 8, 25, 4096]> + MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")], + ENDPOINTS: { + 1: { + PROFILE_ID: zha.PROFILE_ID, + DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER, + INPUT_CLUSTERS: [ + Basic.cluster_id, + PowerConfiguration.cluster_id, + Identify.cluster_id, + Alarms.cluster_id, + PollControl.cluster_id, + LightLink.cluster_id, + 0xFC7C, # IKEA Cluster + ], + OUTPUT_CLUSTERS: [ + Identify.cluster_id, + Groups.cluster_id, + OnOff.cluster_id, + LevelControl.cluster_id, + Ota.cluster_id, + LightLink.cluster_id, + ], + } + }, + } + + replacement = { + ENDPOINTS: { + 1: { + PROFILE_ID: zha.PROFILE_ID, + DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER, + INPUT_CLUSTERS: [ + Basic.cluster_id, + PowerConfiguration1CRCluster, + Identify.cluster_id, + Alarms.cluster_id, + PollControl.cluster_id, + LightLinkCluster, + ], + OUTPUT_CLUSTERS: [ + Identify.cluster_id, + Groups.cluster_id, + OnOff.cluster_id, + LevelControl.cluster_id, + Ota.cluster_id, + LightLink.cluster_id, + ], + } + } + } + + device_automation_triggers = { + (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1}, + (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1}, + (LONG_PRESS, DIM_UP): { + COMMAND: COMMAND_MOVE_ON_OFF, + CLUSTER_ID: 8, + ENDPOINT_ID: 1, + ARGS: [0, 83], + }, + (LONG_RELEASE, DIM_UP): { + COMMAND: COMMAND_STOP, + CLUSTER_ID: 8, + ENDPOINT_ID: 1, + }, + }
{"golden_diff": "diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py\n--- a/zhaquirks/ikea/shortcutbtn.py\n+++ b/zhaquirks/ikea/shortcutbtn.py\n@@ -115,3 +115,79 @@\n ENDPOINT_ID: 1,\n },\n }\n+\n+\n+class IkeaTradfriShortcutBtn2(CustomDevice):\n+ \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.\"\"\"\n+\n+ signature = {\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 4096]>\n+ MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration.cluster_id,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLink.cluster_id,\n+ 0xFC7C, # IKEA Cluster\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ },\n+ }\n+\n+ replacement = {\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration1CRCluster,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLinkCluster,\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ }\n+ }\n+\n+ device_automation_triggers = {\n+ (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (LONG_PRESS, DIM_UP): {\n+ COMMAND: COMMAND_MOVE_ON_OFF,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ ARGS: [0, 83],\n+ },\n+ (LONG_RELEASE, DIM_UP): {\n+ COMMAND: COMMAND_STOP,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ },\n+ }\n", "issue": "[BUG] IKEA E1812 shortcut button does not use quirks\n**Describe the bug**\r\nI have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log.\r\n\r\nThe `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work.\r\n\r\n**To Reproduce**\r\n- pair a shortcut button that has the 0xfc7c input cluster\r\n- go to the device page and see that no quirks are applied\r\n- try to add an automation, see that triggers for button press are missing\r\n\r\n(Unfortunately I have no clue what caused the two buttons to present different signatures)\r\n\r\n**Expected behavior**\r\nThe quirks should still apply\r\n\r\n**Additional context**\r\nsignature of the button with the extra cluster (this is the only difference between my buttons)\r\n**EDIT:** The WindowCovering output cluster is also missing on the weird button\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0820\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0001\",\r\n \"0x0003\",\r\n \"0x0009\",\r\n \"0x0020\",\r\n \"0x1000\",\r\n \"0xfc7c\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x0019\",\r\n \"0x1000\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"IKEA of Sweden\",\r\n \"model\": \"TRADFRI SHORTCUT Button\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI shortcut button.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_MOVE_ON_OFF,\n COMMAND_OFF,\n COMMAND_ON,\n COMMAND_STOP,\n DEVICE_TYPE,\n DIM_UP,\n DOUBLE_PRESS,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n LONG_RELEASE,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_ON,\n)\nfrom zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster\n\n\nclass IkeaTradfriShortcutBtn(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n", "path": "zhaquirks/ikea/shortcutbtn.py"}], "after_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI shortcut button.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_MOVE_ON_OFF,\n COMMAND_OFF,\n COMMAND_ON,\n COMMAND_STOP,\n DEVICE_TYPE,\n DIM_UP,\n DOUBLE_PRESS,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n LONG_RELEASE,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_ON,\n)\nfrom zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster\n\n\nclass IkeaTradfriShortcutBtn(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n\n\nclass IkeaTradfriShortcutBtn2(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n # output_clusters=[3, 4, 6, 8, 25, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n 0xFC7C, # IKEA Cluster\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n", "path": "zhaquirks/ikea/shortcutbtn.py"}]}
1,979
711
gh_patches_debug_22525
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-956
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PaymentMethod (and potentially other new SCA models) don't sync from web hooks **Describe the bug** I'm using 4a828a48092a3904094917776ce725ec9aa3fce5 (after the #914 merge) and trying to set up an SCA complaint flow though I'm encountering issues with the web hooks, they don't seem to sync. The reason, I assume, is that the stripe object name in the event is "payment_method" while the name in the PR is "paymentmethod". I assume it's the same issue for the other new models. **To Reproduce** Steps to reproduce the behavior: 1. Create a new payment method directly through the stripe API. 2. Wait for the webhook to be called **Expected behavior** The object should be created from the event but it isn't. Example event: ``` { "object": { "id": "pm_1F8uMTHa6wE0PhFmIrAlCMuB", "object": "payment_method", "billing_details": { "address": { "city": null, "country": "GB", "line1": null, "line2": null, "postal_code": "W1", "state": null }, "email": "[email protected]", "name": "Jeo", "phone": null }, "card": { "brand": "visa", "checks": { "address_line1_check": null, "address_postal_code_check": "pass", "cvc_check": "pass" }, "country": "US", "exp_month": 2, "exp_year": 2022, "fingerprint": "No6qZ6uMjc9xCthT", "funding": "credit", "generated_from": null, "last4": "4242", "three_d_secure_usage": { "supported": true }, "wallet": null }, "created": 1566157701, "customer": "cus_FXgo0IlbgH2cQS", "livemode": false, "metadata": { }, "type": "card" } } ``` **Environment** - dj-stripe version: master at 4a828a48092a3904094917776ce725ec9aa3fce5 - Your Stripe account's default API version: [e.g. 2019-02-19 - shown as "default" on https://dashboard.stripe.com/developers] - Database: Postgres - Python version: 3.7.4 - Django version: 2.2.4 **Can you reproduce the issue with the latest version of master?** Yes **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `djstripe/event_handlers.py` Content: ``` 1 """ 2 Webhook event handlers for the various models 3 4 Stripe docs for Events: https://stripe.com/docs/api/events 5 Stripe docs for Webhooks: https://stripe.com/docs/webhooks 6 7 TODO: Implement webhook event handlers for all the models that need to 8 respond to webhook events. 9 10 NOTE: 11 Event data is not guaranteed to be in the correct API version format. 12 See #116. When writing a webhook handler, make sure to first 13 re-retrieve the object you wish to process. 14 15 """ 16 import logging 17 18 from . import models, webhooks 19 from .enums import SourceType 20 from .utils import convert_tstamp 21 22 logger = logging.getLogger(__name__) 23 24 25 @webhooks.handler("customer") 26 def customer_webhook_handler(event): 27 """Handle updates to customer objects. 28 29 First determines the crud_type and then handles the event if a customer 30 exists locally. 31 As customers are tied to local users, djstripe will not create customers that 32 do not already exist locally. 33 34 Docs and an example customer webhook response: 35 https://stripe.com/docs/api#customer_object 36 """ 37 if event.customer: 38 # As customers are tied to local users, djstripe will not create 39 # customers that do not already exist locally. 40 _handle_crud_like_event( 41 target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True 42 ) 43 44 45 @webhooks.handler("customer.discount") 46 def customer_discount_webhook_handler(event): 47 """Handle updates to customer discount objects. 48 49 Docs: https://stripe.com/docs/api#discounts 50 51 Because there is no concept of a "Discount" model in dj-stripe (due to the 52 lack of a stripe id on them), this is a little different to the other 53 handlers. 54 """ 55 56 crud_type = CrudType.determine(event=event) 57 discount_data = event.data.get("object", {}) 58 coupon_data = discount_data.get("coupon", {}) 59 customer = event.customer 60 61 if crud_type.created or crud_type.updated: 62 coupon, _ = _handle_crud_like_event( 63 target_cls=models.Coupon, 64 event=event, 65 data=coupon_data, 66 id=coupon_data.get("id"), 67 ) 68 coupon_start = discount_data.get("start") 69 coupon_end = discount_data.get("end") 70 else: 71 coupon = None 72 coupon_start = None 73 coupon_end = None 74 75 customer.coupon = coupon 76 customer.coupon_start = convert_tstamp(coupon_start) 77 customer.coupon_end = convert_tstamp(coupon_end) 78 customer.save() 79 80 81 @webhooks.handler("customer.source") 82 def customer_source_webhook_handler(event): 83 """Handle updates to customer payment-source objects. 84 85 Docs: https://stripe.com/docs/api#customer_object-sources. 86 """ 87 customer_data = event.data.get("object", {}) 88 source_type = customer_data.get("object", {}) 89 90 # TODO: handle other types of sources 91 # (https://stripe.com/docs/api#customer_object-sources) 92 if source_type == SourceType.card: 93 if event.verb.endswith("deleted") and customer_data: 94 # On customer.source.deleted, we do not delete the object, 95 # we merely unlink it. 96 # customer = Customer.objects.get(id=customer_data["id"]) 97 # NOTE: for now, customer.sources still points to Card 98 # Also, https://github.com/dj-stripe/dj-stripe/issues/576 99 models.Card.objects.filter(id=customer_data.get("id", "")).delete() 100 models.DjstripePaymentMethod.objects.filter( 101 id=customer_data.get("id", "") 102 ).delete() 103 else: 104 _handle_crud_like_event(target_cls=models.Card, event=event) 105 106 107 @webhooks.handler("customer.subscription") 108 def customer_subscription_webhook_handler(event): 109 """Handle updates to customer subscription objects. 110 111 Docs an example subscription webhook response: 112 https://stripe.com/docs/api#subscription_object 113 """ 114 _handle_crud_like_event(target_cls=models.Subscription, event=event) 115 116 117 @webhooks.handler( 118 "transfer", 119 "charge", 120 "coupon", 121 "invoice", 122 "invoiceitem", 123 "paymentintent", 124 "paymentmethod", 125 "plan", 126 "product", 127 "setupintent", 128 "source", 129 ) 130 def other_object_webhook_handler(event): 131 """ 132 Handle updates to transfer, charge, invoice, invoiceitem, plan, product 133 and source objects. 134 135 Docs for: 136 - charge: https://stripe.com/docs/api#charges 137 - coupon: https://stripe.com/docs/api#coupons 138 - invoice: https://stripe.com/docs/api#invoices 139 - invoiceitem: https://stripe.com/docs/api#invoiceitems 140 - plan: https://stripe.com/docs/api#plans 141 - product: https://stripe.com/docs/api#products 142 - source: https://stripe.com/docs/api#sources 143 - payment_method: https://stripe.com/docs/api/payment_methods 144 - payment_intent: https://stripe.com/docs/api/payment_intents 145 """ 146 147 if event.parts[:2] == ["charge", "dispute"]: 148 # Do not attempt to handle charge.dispute.* events. 149 # We do not have a Dispute model yet. 150 target_cls = models.Dispute 151 else: 152 target_cls = { 153 "charge": models.Charge, 154 "coupon": models.Coupon, 155 "invoice": models.Invoice, 156 "invoiceitem": models.InvoiceItem, 157 "paymentintent": models.PaymentIntent, 158 "paymentmethod": models.PaymentMethod, 159 "plan": models.Plan, 160 "product": models.Product, 161 "transfer": models.Transfer, 162 "setupintent": models.SetupIntent, 163 "source": models.Source, 164 }.get(event.category) 165 166 _handle_crud_like_event(target_cls=target_cls, event=event) 167 168 169 # 170 # Helpers 171 # 172 173 174 class CrudType(object): 175 """Helper object to determine CRUD-like event state.""" 176 177 created = False 178 updated = False 179 deleted = False 180 181 def __init__(self, **kwargs): 182 """Set attributes.""" 183 for k, v in kwargs.items(): 184 setattr(self, k, v) 185 186 @property 187 def valid(self): 188 """Return True if this is a CRUD-like event.""" 189 return self.created or self.updated or self.deleted 190 191 @classmethod 192 def determine(cls, event, verb=None, exact=False): 193 """ 194 Determine if the event verb is a crud_type (without the 'R') event. 195 196 :param verb: The event verb to examine. 197 :type verb: string (``str``/`unicode``) 198 :param exact: If True, match crud_type to event verb string exactly. 199 :param type: ``bool`` 200 :returns: The CrudType state object. 201 :rtype: ``CrudType`` 202 """ 203 verb = verb or event.verb 204 205 def check(crud_type_event): 206 if exact: 207 return verb == crud_type_event 208 else: 209 return verb.endswith(crud_type_event) 210 211 created = updated = deleted = False 212 213 if check("updated"): 214 updated = True 215 elif check("created"): 216 created = True 217 elif check("deleted"): 218 deleted = True 219 220 return cls(created=created, updated=updated, deleted=deleted) 221 222 223 def _handle_crud_like_event( 224 target_cls, 225 event, 226 data=None, 227 verb=None, 228 id=None, 229 customer=None, 230 crud_type=None, 231 crud_exact=False, 232 crud_valid=False, 233 ): 234 """ 235 Helper to process crud_type-like events for objects. 236 237 Non-deletes (creates, updates and "anything else" events) are treated as 238 update_or_create events - The object will be retrieved locally, then it is 239 synchronised with the Stripe API for parity. 240 241 Deletes only occur for delete events and cause the object to be deleted 242 from the local database, if it existed. If it doesn't exist then it is 243 ignored (but the event processing still succeeds). 244 245 :param target_cls: The djstripe model being handled. 246 :type: ``djstripe.models.StripeModel`` 247 :param data: The event object data (defaults to ``event.data``). 248 :param verb: The event verb (defaults to ``event.verb``). 249 :param id: The object Stripe ID (defaults to ``object.id``). 250 :param customer: The customer object (defaults to ``event.customer``). 251 :param crud_type: The CrudType object (determined by default). 252 :param crud_exact: If True, match verb against CRUD type exactly. 253 :param crud_valid: If True, CRUD type must match valid type. 254 :returns: The object (if any) and the event CrudType. 255 :rtype: ``tuple(obj, CrudType)`` 256 """ 257 data = data or event.data 258 id = id or data.get("object", {}).get("id", None) 259 260 if not id: 261 # We require an object when applying CRUD-like events, so if there's 262 # no ID the event is ignored/dropped. This happens in events such as 263 # invoice.upcoming, which refer to a future (non-existant) invoice. 264 logger.debug( 265 "Ignoring %r Stripe event without object ID: %r", event.type, event 266 ) 267 return 268 269 verb = verb or event.verb 270 customer = customer or event.customer 271 crud_type = crud_type or CrudType.determine( 272 event=event, verb=verb, exact=crud_exact 273 ) 274 obj = None 275 276 if crud_valid and not crud_type.valid: 277 logger.debug( 278 "Ignoring %r Stripe event without valid CRUD type: %r", event.type, event 279 ) 280 return 281 282 if crud_type.deleted: 283 qs = target_cls.objects.filter(id=id) 284 if target_cls is models.Customer and qs.exists(): 285 qs.get().purge() 286 else: 287 obj = target_cls.objects.filter(id=id).delete() 288 else: 289 # Any other event type (creates, updates, etc.) - This can apply to 290 # verbs that aren't strictly CRUD but Stripe do intend an update. Such 291 # as invoice.payment_failed. 292 kwargs = {"id": id} 293 if hasattr(target_cls, "customer"): 294 kwargs["customer"] = customer 295 data = target_cls(**kwargs).api_retrieve() 296 obj = target_cls.sync_from_stripe_data(data) 297 298 return obj, crud_type 299 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py --- a/djstripe/event_handlers.py +++ b/djstripe/event_handlers.py @@ -120,11 +120,11 @@ "coupon", "invoice", "invoiceitem", - "paymentintent", - "paymentmethod", + "payment_intent", + "payment_method", "plan", "product", - "setupintent", + "setup_intent", "source", ) def other_object_webhook_handler(event): @@ -154,12 +154,12 @@ "coupon": models.Coupon, "invoice": models.Invoice, "invoiceitem": models.InvoiceItem, - "paymentintent": models.PaymentIntent, - "paymentmethod": models.PaymentMethod, + "payment_intent": models.PaymentIntent, + "payment_method": models.PaymentMethod, "plan": models.Plan, "product": models.Product, "transfer": models.Transfer, - "setupintent": models.SetupIntent, + "setup_intent": models.SetupIntent, "source": models.Source, }.get(event.category)
{"golden_diff": "diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py\n--- a/djstripe/event_handlers.py\n+++ b/djstripe/event_handlers.py\n@@ -120,11 +120,11 @@\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n- \"paymentintent\",\n- \"paymentmethod\",\n+ \"payment_intent\",\n+ \"payment_method\",\n \"plan\",\n \"product\",\n- \"setupintent\",\n+ \"setup_intent\",\n \"source\",\n )\n def other_object_webhook_handler(event):\n@@ -154,12 +154,12 @@\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n- \"paymentintent\": models.PaymentIntent,\n- \"paymentmethod\": models.PaymentMethod,\n+ \"payment_intent\": models.PaymentIntent,\n+ \"payment_method\": models.PaymentMethod,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n- \"setupintent\": models.SetupIntent,\n+ \"setup_intent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n", "issue": "PaymentMethod (and potentially other new SCA models) don't sync from web hooks\n**Describe the bug**\r\nI'm using 4a828a48092a3904094917776ce725ec9aa3fce5 (after the #914 merge) and trying to set up an SCA complaint flow though I'm encountering issues with the web hooks, they don't seem to sync.\r\nThe reason, I assume, is that the stripe object name in the event is \"payment_method\" while the name in the PR is \"paymentmethod\". I assume it's the same issue for the other new models.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a new payment method directly through the stripe API.\r\n2. Wait for the webhook to be called\r\n\r\n**Expected behavior**\r\nThe object should be created from the event but it isn't.\r\n\r\nExample event:\r\n```\r\n{\r\n \"object\": {\r\n \"id\": \"pm_1F8uMTHa6wE0PhFmIrAlCMuB\",\r\n \"object\": \"payment_method\",\r\n \"billing_details\": {\r\n \"address\": {\r\n \"city\": null,\r\n \"country\": \"GB\",\r\n \"line1\": null,\r\n \"line2\": null,\r\n \"postal_code\": \"W1\",\r\n \"state\": null\r\n },\r\n \"email\": \"[email protected]\",\r\n \"name\": \"Jeo\",\r\n \"phone\": null\r\n },\r\n \"card\": {\r\n \"brand\": \"visa\",\r\n \"checks\": {\r\n \"address_line1_check\": null,\r\n \"address_postal_code_check\": \"pass\",\r\n \"cvc_check\": \"pass\"\r\n },\r\n \"country\": \"US\",\r\n \"exp_month\": 2,\r\n \"exp_year\": 2022,\r\n \"fingerprint\": \"No6qZ6uMjc9xCthT\",\r\n \"funding\": \"credit\",\r\n \"generated_from\": null,\r\n \"last4\": \"4242\",\r\n \"three_d_secure_usage\": {\r\n \"supported\": true\r\n },\r\n \"wallet\": null\r\n },\r\n \"created\": 1566157701,\r\n \"customer\": \"cus_FXgo0IlbgH2cQS\",\r\n \"livemode\": false,\r\n \"metadata\": {\r\n },\r\n \"type\": \"card\"\r\n }\r\n}\r\n```\r\n\r\n**Environment**\r\n- dj-stripe version: master at 4a828a48092a3904094917776ce725ec9aa3fce5\r\n- Your Stripe account's default API version: [e.g. 2019-02-19 - shown as \"default\" on https://dashboard.stripe.com/developers]\r\n- Database: Postgres\r\n- Python version: 3.7.4\r\n- Django version: 2.2.4\r\n\r\n**Can you reproduce the issue with the latest version of master?**\r\n\r\nYes\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\"\nWebhook event handlers for the various models\n\nStripe docs for Events: https://stripe.com/docs/api/events\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\nTODO: Implement webhook event handlers for all the models that need to\n respond to webhook events.\n\nNOTE:\n Event data is not guaranteed to be in the correct API version format.\n See #116. When writing a webhook handler, make sure to first\n re-retrieve the object you wish to process.\n\n\"\"\"\nimport logging\n\nfrom . import models, webhooks\nfrom .enums import SourceType\nfrom .utils import convert_tstamp\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"customer\")\ndef customer_webhook_handler(event):\n \"\"\"Handle updates to customer objects.\n\n First determines the crud_type and then handles the event if a customer\n exists locally.\n As customers are tied to local users, djstripe will not create customers that\n do not already exist locally.\n\n Docs and an example customer webhook response:\n https://stripe.com/docs/api#customer_object\n \"\"\"\n if event.customer:\n # As customers are tied to local users, djstripe will not create\n # customers that do not already exist locally.\n _handle_crud_like_event(\n target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True\n )\n\n\[email protected](\"customer.discount\")\ndef customer_discount_webhook_handler(event):\n \"\"\"Handle updates to customer discount objects.\n\n Docs: https://stripe.com/docs/api#discounts\n\n Because there is no concept of a \"Discount\" model in dj-stripe (due to the\n lack of a stripe id on them), this is a little different to the other\n handlers.\n \"\"\"\n\n crud_type = CrudType.determine(event=event)\n discount_data = event.data.get(\"object\", {})\n coupon_data = discount_data.get(\"coupon\", {})\n customer = event.customer\n\n if crud_type.created or crud_type.updated:\n coupon, _ = _handle_crud_like_event(\n target_cls=models.Coupon,\n event=event,\n data=coupon_data,\n id=coupon_data.get(\"id\"),\n )\n coupon_start = discount_data.get(\"start\")\n coupon_end = discount_data.get(\"end\")\n else:\n coupon = None\n coupon_start = None\n coupon_end = None\n\n customer.coupon = coupon\n customer.coupon_start = convert_tstamp(coupon_start)\n customer.coupon_end = convert_tstamp(coupon_end)\n customer.save()\n\n\[email protected](\"customer.source\")\ndef customer_source_webhook_handler(event):\n \"\"\"Handle updates to customer payment-source objects.\n\n Docs: https://stripe.com/docs/api#customer_object-sources.\n \"\"\"\n customer_data = event.data.get(\"object\", {})\n source_type = customer_data.get(\"object\", {})\n\n # TODO: handle other types of sources\n # (https://stripe.com/docs/api#customer_object-sources)\n if source_type == SourceType.card:\n if event.verb.endswith(\"deleted\") and customer_data:\n # On customer.source.deleted, we do not delete the object,\n # we merely unlink it.\n # customer = Customer.objects.get(id=customer_data[\"id\"])\n # NOTE: for now, customer.sources still points to Card\n # Also, https://github.com/dj-stripe/dj-stripe/issues/576\n models.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n models.DjstripePaymentMethod.objects.filter(\n id=customer_data.get(\"id\", \"\")\n ).delete()\n else:\n _handle_crud_like_event(target_cls=models.Card, event=event)\n\n\[email protected](\"customer.subscription\")\ndef customer_subscription_webhook_handler(event):\n \"\"\"Handle updates to customer subscription objects.\n\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n _handle_crud_like_event(target_cls=models.Subscription, event=event)\n\n\[email protected](\n \"transfer\",\n \"charge\",\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n \"paymentintent\",\n \"paymentmethod\",\n \"plan\",\n \"product\",\n \"setupintent\",\n \"source\",\n)\ndef other_object_webhook_handler(event):\n \"\"\"\n Handle updates to transfer, charge, invoice, invoiceitem, plan, product\n and source objects.\n\n Docs for:\n - charge: https://stripe.com/docs/api#charges\n - coupon: https://stripe.com/docs/api#coupons\n - invoice: https://stripe.com/docs/api#invoices\n - invoiceitem: https://stripe.com/docs/api#invoiceitems\n - plan: https://stripe.com/docs/api#plans\n - product: https://stripe.com/docs/api#products\n - source: https://stripe.com/docs/api#sources\n - payment_method: https://stripe.com/docs/api/payment_methods\n - payment_intent: https://stripe.com/docs/api/payment_intents\n \"\"\"\n\n if event.parts[:2] == [\"charge\", \"dispute\"]:\n # Do not attempt to handle charge.dispute.* events.\n # We do not have a Dispute model yet.\n target_cls = models.Dispute\n else:\n target_cls = {\n \"charge\": models.Charge,\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n \"paymentintent\": models.PaymentIntent,\n \"paymentmethod\": models.PaymentMethod,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n \"setupintent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n\n _handle_crud_like_event(target_cls=target_cls, event=event)\n\n\n#\n# Helpers\n#\n\n\nclass CrudType(object):\n \"\"\"Helper object to determine CRUD-like event state.\"\"\"\n\n created = False\n updated = False\n deleted = False\n\n def __init__(self, **kwargs):\n \"\"\"Set attributes.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def valid(self):\n \"\"\"Return True if this is a CRUD-like event.\"\"\"\n return self.created or self.updated or self.deleted\n\n @classmethod\n def determine(cls, event, verb=None, exact=False):\n \"\"\"\n Determine if the event verb is a crud_type (without the 'R') event.\n\n :param verb: The event verb to examine.\n :type verb: string (``str``/`unicode``)\n :param exact: If True, match crud_type to event verb string exactly.\n :param type: ``bool``\n :returns: The CrudType state object.\n :rtype: ``CrudType``\n \"\"\"\n verb = verb or event.verb\n\n def check(crud_type_event):\n if exact:\n return verb == crud_type_event\n else:\n return verb.endswith(crud_type_event)\n\n created = updated = deleted = False\n\n if check(\"updated\"):\n updated = True\n elif check(\"created\"):\n created = True\n elif check(\"deleted\"):\n deleted = True\n\n return cls(created=created, updated=updated, deleted=deleted)\n\n\ndef _handle_crud_like_event(\n target_cls,\n event,\n data=None,\n verb=None,\n id=None,\n customer=None,\n crud_type=None,\n crud_exact=False,\n crud_valid=False,\n):\n \"\"\"\n Helper to process crud_type-like events for objects.\n\n Non-deletes (creates, updates and \"anything else\" events) are treated as\n update_or_create events - The object will be retrieved locally, then it is\n synchronised with the Stripe API for parity.\n\n Deletes only occur for delete events and cause the object to be deleted\n from the local database, if it existed. If it doesn't exist then it is\n ignored (but the event processing still succeeds).\n\n :param target_cls: The djstripe model being handled.\n :type: ``djstripe.models.StripeModel``\n :param data: The event object data (defaults to ``event.data``).\n :param verb: The event verb (defaults to ``event.verb``).\n :param id: The object Stripe ID (defaults to ``object.id``).\n :param customer: The customer object (defaults to ``event.customer``).\n :param crud_type: The CrudType object (determined by default).\n :param crud_exact: If True, match verb against CRUD type exactly.\n :param crud_valid: If True, CRUD type must match valid type.\n :returns: The object (if any) and the event CrudType.\n :rtype: ``tuple(obj, CrudType)``\n \"\"\"\n data = data or event.data\n id = id or data.get(\"object\", {}).get(\"id\", None)\n\n if not id:\n # We require an object when applying CRUD-like events, so if there's\n # no ID the event is ignored/dropped. This happens in events such as\n # invoice.upcoming, which refer to a future (non-existant) invoice.\n logger.debug(\n \"Ignoring %r Stripe event without object ID: %r\", event.type, event\n )\n return\n\n verb = verb or event.verb\n customer = customer or event.customer\n crud_type = crud_type or CrudType.determine(\n event=event, verb=verb, exact=crud_exact\n )\n obj = None\n\n if crud_valid and not crud_type.valid:\n logger.debug(\n \"Ignoring %r Stripe event without valid CRUD type: %r\", event.type, event\n )\n return\n\n if crud_type.deleted:\n qs = target_cls.objects.filter(id=id)\n if target_cls is models.Customer and qs.exists():\n qs.get().purge()\n else:\n obj = target_cls.objects.filter(id=id).delete()\n else:\n # Any other event type (creates, updates, etc.) - This can apply to\n # verbs that aren't strictly CRUD but Stripe do intend an update. Such\n # as invoice.payment_failed.\n kwargs = {\"id\": id}\n if hasattr(target_cls, \"customer\"):\n kwargs[\"customer\"] = customer\n data = target_cls(**kwargs).api_retrieve()\n obj = target_cls.sync_from_stripe_data(data)\n\n return obj, crud_type\n", "path": "djstripe/event_handlers.py"}], "after_files": [{"content": "\"\"\"\nWebhook event handlers for the various models\n\nStripe docs for Events: https://stripe.com/docs/api/events\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\nTODO: Implement webhook event handlers for all the models that need to\n respond to webhook events.\n\nNOTE:\n Event data is not guaranteed to be in the correct API version format.\n See #116. When writing a webhook handler, make sure to first\n re-retrieve the object you wish to process.\n\n\"\"\"\nimport logging\n\nfrom . import models, webhooks\nfrom .enums import SourceType\nfrom .utils import convert_tstamp\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"customer\")\ndef customer_webhook_handler(event):\n \"\"\"Handle updates to customer objects.\n\n First determines the crud_type and then handles the event if a customer\n exists locally.\n As customers are tied to local users, djstripe will not create customers that\n do not already exist locally.\n\n Docs and an example customer webhook response:\n https://stripe.com/docs/api#customer_object\n \"\"\"\n if event.customer:\n # As customers are tied to local users, djstripe will not create\n # customers that do not already exist locally.\n _handle_crud_like_event(\n target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True\n )\n\n\[email protected](\"customer.discount\")\ndef customer_discount_webhook_handler(event):\n \"\"\"Handle updates to customer discount objects.\n\n Docs: https://stripe.com/docs/api#discounts\n\n Because there is no concept of a \"Discount\" model in dj-stripe (due to the\n lack of a stripe id on them), this is a little different to the other\n handlers.\n \"\"\"\n\n crud_type = CrudType.determine(event=event)\n discount_data = event.data.get(\"object\", {})\n coupon_data = discount_data.get(\"coupon\", {})\n customer = event.customer\n\n if crud_type.created or crud_type.updated:\n coupon, _ = _handle_crud_like_event(\n target_cls=models.Coupon,\n event=event,\n data=coupon_data,\n id=coupon_data.get(\"id\"),\n )\n coupon_start = discount_data.get(\"start\")\n coupon_end = discount_data.get(\"end\")\n else:\n coupon = None\n coupon_start = None\n coupon_end = None\n\n customer.coupon = coupon\n customer.coupon_start = convert_tstamp(coupon_start)\n customer.coupon_end = convert_tstamp(coupon_end)\n customer.save()\n\n\[email protected](\"customer.source\")\ndef customer_source_webhook_handler(event):\n \"\"\"Handle updates to customer payment-source objects.\n\n Docs: https://stripe.com/docs/api#customer_object-sources.\n \"\"\"\n customer_data = event.data.get(\"object\", {})\n source_type = customer_data.get(\"object\", {})\n\n # TODO: handle other types of sources\n # (https://stripe.com/docs/api#customer_object-sources)\n if source_type == SourceType.card:\n if event.verb.endswith(\"deleted\") and customer_data:\n # On customer.source.deleted, we do not delete the object,\n # we merely unlink it.\n # customer = Customer.objects.get(id=customer_data[\"id\"])\n # NOTE: for now, customer.sources still points to Card\n # Also, https://github.com/dj-stripe/dj-stripe/issues/576\n models.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n models.DjstripePaymentMethod.objects.filter(\n id=customer_data.get(\"id\", \"\")\n ).delete()\n else:\n _handle_crud_like_event(target_cls=models.Card, event=event)\n\n\[email protected](\"customer.subscription\")\ndef customer_subscription_webhook_handler(event):\n \"\"\"Handle updates to customer subscription objects.\n\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n _handle_crud_like_event(target_cls=models.Subscription, event=event)\n\n\[email protected](\n \"transfer\",\n \"charge\",\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n \"payment_intent\",\n \"payment_method\",\n \"plan\",\n \"product\",\n \"setup_intent\",\n \"source\",\n)\ndef other_object_webhook_handler(event):\n \"\"\"\n Handle updates to transfer, charge, invoice, invoiceitem, plan, product\n and source objects.\n\n Docs for:\n - charge: https://stripe.com/docs/api#charges\n - coupon: https://stripe.com/docs/api#coupons\n - invoice: https://stripe.com/docs/api#invoices\n - invoiceitem: https://stripe.com/docs/api#invoiceitems\n - plan: https://stripe.com/docs/api#plans\n - product: https://stripe.com/docs/api#products\n - source: https://stripe.com/docs/api#sources\n - payment_method: https://stripe.com/docs/api/payment_methods\n - payment_intent: https://stripe.com/docs/api/payment_intents\n \"\"\"\n\n if event.parts[:2] == [\"charge\", \"dispute\"]:\n # Do not attempt to handle charge.dispute.* events.\n # We do not have a Dispute model yet.\n target_cls = models.Dispute\n else:\n target_cls = {\n \"charge\": models.Charge,\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n \"payment_intent\": models.PaymentIntent,\n \"payment_method\": models.PaymentMethod,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n \"setup_intent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n\n _handle_crud_like_event(target_cls=target_cls, event=event)\n\n\n#\n# Helpers\n#\n\n\nclass CrudType(object):\n \"\"\"Helper object to determine CRUD-like event state.\"\"\"\n\n created = False\n updated = False\n deleted = False\n\n def __init__(self, **kwargs):\n \"\"\"Set attributes.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def valid(self):\n \"\"\"Return True if this is a CRUD-like event.\"\"\"\n return self.created or self.updated or self.deleted\n\n @classmethod\n def determine(cls, event, verb=None, exact=False):\n \"\"\"\n Determine if the event verb is a crud_type (without the 'R') event.\n\n :param verb: The event verb to examine.\n :type verb: string (``str``/`unicode``)\n :param exact: If True, match crud_type to event verb string exactly.\n :param type: ``bool``\n :returns: The CrudType state object.\n :rtype: ``CrudType``\n \"\"\"\n verb = verb or event.verb\n\n def check(crud_type_event):\n if exact:\n return verb == crud_type_event\n else:\n return verb.endswith(crud_type_event)\n\n created = updated = deleted = False\n\n if check(\"updated\"):\n updated = True\n elif check(\"created\"):\n created = True\n elif check(\"deleted\"):\n deleted = True\n\n return cls(created=created, updated=updated, deleted=deleted)\n\n\ndef _handle_crud_like_event(\n target_cls,\n event,\n data=None,\n verb=None,\n id=None,\n customer=None,\n crud_type=None,\n crud_exact=False,\n crud_valid=False,\n):\n \"\"\"\n Helper to process crud_type-like events for objects.\n\n Non-deletes (creates, updates and \"anything else\" events) are treated as\n update_or_create events - The object will be retrieved locally, then it is\n synchronised with the Stripe API for parity.\n\n Deletes only occur for delete events and cause the object to be deleted\n from the local database, if it existed. If it doesn't exist then it is\n ignored (but the event processing still succeeds).\n\n :param target_cls: The djstripe model being handled.\n :type: ``djstripe.models.StripeModel``\n :param data: The event object data (defaults to ``event.data``).\n :param verb: The event verb (defaults to ``event.verb``).\n :param id: The object Stripe ID (defaults to ``object.id``).\n :param customer: The customer object (defaults to ``event.customer``).\n :param crud_type: The CrudType object (determined by default).\n :param crud_exact: If True, match verb against CRUD type exactly.\n :param crud_valid: If True, CRUD type must match valid type.\n :returns: The object (if any) and the event CrudType.\n :rtype: ``tuple(obj, CrudType)``\n \"\"\"\n data = data or event.data\n id = id or data.get(\"object\", {}).get(\"id\", None)\n\n if not id:\n # We require an object when applying CRUD-like events, so if there's\n # no ID the event is ignored/dropped. This happens in events such as\n # invoice.upcoming, which refer to a future (non-existant) invoice.\n logger.debug(\n \"Ignoring %r Stripe event without object ID: %r\", event.type, event\n )\n return\n\n verb = verb or event.verb\n customer = customer or event.customer\n crud_type = crud_type or CrudType.determine(\n event=event, verb=verb, exact=crud_exact\n )\n obj = None\n\n if crud_valid and not crud_type.valid:\n logger.debug(\n \"Ignoring %r Stripe event without valid CRUD type: %r\", event.type, event\n )\n return\n\n if crud_type.deleted:\n qs = target_cls.objects.filter(id=id)\n if target_cls is models.Customer and qs.exists():\n qs.get().purge()\n else:\n obj = target_cls.objects.filter(id=id).delete()\n else:\n # Any other event type (creates, updates, etc.) - This can apply to\n # verbs that aren't strictly CRUD but Stripe do intend an update. Such\n # as invoice.payment_failed.\n kwargs = {\"id\": id}\n if hasattr(target_cls, \"customer\"):\n kwargs[\"customer\"] = customer\n data = target_cls(**kwargs).api_retrieve()\n obj = target_cls.sync_from_stripe_data(data)\n\n return obj, crud_type\n", "path": "djstripe/event_handlers.py"}]}
3,966
259
gh_patches_debug_16823
rasdani/github-patches
git_diff
bids-standard__pybids-505
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 0.9.4 Release @tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/prep_zenodo.py` Content: ``` 1 #!/usr/bin/env python3 2 import git 3 import json 4 from subprocess import run, PIPE, CalledProcessError 5 from pathlib import Path 6 from tempfile import TemporaryDirectory 7 8 9 def decommify(name): 10 return ' '.join(name.split(', ')[::-1]) 11 12 13 # List of repositories whose commits should be counted as contributions 14 codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')] 15 16 # Last shablona commit 17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f' 18 19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir) 20 zenodo_file = git_root / '.zenodo.json' 21 22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {} 23 24 orig_creators = zenodo.get('creators', []) 25 creator_map = {decommify(creator['name']): creator 26 for creator in orig_creators} 27 28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE) 29 counts = [line.split('\t', 1)[::-1] 30 for line in shortlog.stdout.decode().split('\n') if line] 31 32 # Get additional commit counts from dependencies 33 with TemporaryDirectory() as tmpdir: 34 tmppath = Path(tmpdir) 35 for repo in codependents: 36 try: 37 repo, ref = repo 38 except (TypeError, ValueError): 39 ref = None 40 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0]) 41 try: 42 clone = run(['git', 'clone', '-q', repo, repo_dir], check=True) 43 except CalledProcessError as err: 44 raise RuntimeError("Could not clone {}".format(repo)) from err 45 46 if ref is None: 47 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE) 48 # latest tag 49 ref = tag.stdout.decode().strip().rsplit('\n', 1)[1] 50 51 dep_shortlog = run( 52 ['git', '-C', repo_dir, 'shortlog', '-ns', ref], 53 stdout=PIPE) 54 counts.extend(line.split('\t', 1)[::-1] 55 for line in dep_shortlog.stdout.decode().split('\n') 56 if line) 57 58 commit_counts = {} 59 for committer, commits in counts: 60 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits) 61 62 # Stable sort: 63 # Number of commits in reverse order 64 # Ties broken by alphabetical order of first name 65 committers = [committer 66 for committer, _ in sorted(commit_counts.items(), 67 key=lambda x: (-x[1], x[0]))] 68 69 # Tal to the top 70 first_author = 'Tal Yarkoni' 71 if committers[0] != first_author: 72 committers.remove(first_author) 73 committers.insert(0, first_author) 74 75 creators = [ 76 creator_map.get(committer, {'name': committer}) 77 for committer in committers 78 ] 79 80 zenodo['creators'] = creators 81 zenodo_file.write_text(json.dumps(zenodo, indent=2) + '\n') 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py --- a/tools/prep_zenodo.py +++ b/tools/prep_zenodo.py @@ -9,6 +9,11 @@ def decommify(name): return ' '.join(name.split(', ')[::-1]) +# Users who have asked not to be cited at this time +# XXX We should add a shortlog since the most recent tag and explicitly note +# that a blacklisted user has contributed again recently, and verify they still +# do not want to be cited. +blacklist = {'Cecile Madjar'} # List of repositories whose commits should be counted as contributions codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')] @@ -75,6 +80,7 @@ creators = [ creator_map.get(committer, {'name': committer}) for committer in committers + if committer not in blacklist ] zenodo['creators'] = creators
{"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -9,6 +9,11 @@\n def decommify(name):\n return ' '.join(name.split(', ')[::-1])\n \n+# Users who have asked not to be cited at this time\n+# XXX We should add a shortlog since the most recent tag and explicitly note\n+# that a blacklisted user has contributed again recently, and verify they still\n+# do not want to be cited.\n+blacklist = {'Cecile Madjar'}\n \n # List of repositories whose commits should be counted as contributions\n codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n@@ -75,6 +80,7 @@\n creators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n+ if committer not in blacklist\n ]\n \n zenodo['creators'] = creators\n", "issue": "0.9.4 Release\n@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n# Users who have asked not to be cited at this time\n# XXX We should add a shortlog since the most recent tag and explicitly note\n# that a blacklisted user has contributed again recently, and verify they still\n# do not want to be cited.\nblacklist = {'Cecile Madjar'}\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n if committer not in blacklist\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}]}
1,197
228
gh_patches_debug_26541
rasdani/github-patches
git_diff
OpenMined__PySyft-5732
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add tests for domain's RoleManager ## Description Add unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager` ## Type of Test - [x] **Unit test** (e.g. checking a loop, method, or function is working as intended) - [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended) - [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended) - [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage) - [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task) - [ ] Other... ## Expected Behavior Ideally, the tests should cover as many methods as possible and within reason. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packages/grid/apps/domain/src/main/core/manager/role_manager.py` Content: ``` 1 # stdlib 2 from typing import List 3 from typing import Union 4 5 # grid relative 6 from ..database.tables.roles import Role 7 from ..exceptions import RoleNotFoundError 8 from .database_manager import DatabaseManager 9 10 11 class RoleManager(DatabaseManager): 12 13 schema = Role 14 15 def __init__(self, database): 16 self._schema = RoleManager.schema 17 self.db = database 18 19 @property 20 def user_role(self): 21 return self.first(name="User") 22 23 @property 24 def owner_role(self): 25 return self.first(name="Owner") 26 27 @property 28 def compliance_officer_role(self): 29 return self.first(name="Compliance Officer") 30 31 @property 32 def admin_role(self): 33 return self.first(name="Administrator") 34 35 @property 36 def common_roles(self): 37 return self.db.session.query(self._schema).filter_by( 38 can_triage_requests=False, 39 can_edit_settings=False, 40 can_create_users=False, 41 can_create_groups=False, 42 can_upload_data=False, 43 can_edit_roles=False, 44 can_manage_infrastructure=False, 45 ) 46 47 @property 48 def org_roles(self): 49 return self.db.session.query(self._schema).except_(self.common_roles) 50 51 def first(self, **kwargs) -> Union[None, List]: 52 result = super().first(**kwargs) 53 if not result: 54 raise RoleNotFoundError 55 return result 56 57 def query(self, **kwargs) -> Union[None, List]: 58 results = super().query(**kwargs) 59 if len(results) == 0: 60 raise RoleNotFoundError 61 return results 62 63 def set(self, role_id, params): 64 if self.contain(id=role_id): 65 self.modify({"id": role_id}, params) 66 else: 67 raise RoleNotFoundError 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py --- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py +++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py @@ -2,6 +2,8 @@ from typing import List from typing import Union +from flask_sqlalchemy import BaseQuery + # grid relative from ..database.tables.roles import Role from ..exceptions import RoleNotFoundError @@ -32,8 +34,7 @@ def admin_role(self): return self.first(name="Administrator") - @property - def common_roles(self): + def _common_roles(self) -> BaseQuery: return self.db.session.query(self._schema).filter_by( can_triage_requests=False, can_edit_settings=False, @@ -44,9 +45,13 @@ can_manage_infrastructure=False, ) + @property + def common_roles(self): + return self._common_roles().all() + @property def org_roles(self): - return self.db.session.query(self._schema).except_(self.common_roles) + return self.db.session.query(self._schema).except_(self._common_roles).all() def first(self, **kwargs) -> Union[None, List]: result = super().first(**kwargs)
{"golden_diff": "diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n@@ -2,6 +2,8 @@\n from typing import List\n from typing import Union\n \n+from flask_sqlalchemy import BaseQuery\n+\n # grid relative\n from ..database.tables.roles import Role\n from ..exceptions import RoleNotFoundError\n@@ -32,8 +34,7 @@\n def admin_role(self):\n return self.first(name=\"Administrator\")\n \n- @property\n- def common_roles(self):\n+ def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n@@ -44,9 +45,13 @@\n can_manage_infrastructure=False,\n )\n \n+ @property\n+ def common_roles(self):\n+ return self._common_roles().all()\n+\n @property\n def org_roles(self):\n- return self.db.session.query(self._schema).except_(self.common_roles)\n+ return self.db.session.query(self._schema).except_(self._common_roles).all()\n \n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n", "issue": "Add tests for domain's RoleManager\n## Description\r\nAdd unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`\r\n\r\n## Type of Test\r\n- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)\r\n- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)\r\n- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)\r\n- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)\r\n- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)\r\n- [ ] Other...\r\n\r\n## Expected Behavior\r\nIdeally, the tests should cover as many methods as possible and within reason.\r\n\n", "before_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n @property\n def common_roles(self):\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self.common_roles)\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}], "after_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\nfrom flask_sqlalchemy import BaseQuery\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def common_roles(self):\n return self._common_roles().all()\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self._common_roles).all()\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}]}
975
308
gh_patches_debug_45891
rasdani/github-patches
git_diff
pypa__pip-3213
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip list outdated I install Flask version 0.8 with pip in editable mode `pip install -e git+https://github.com/mitsuhiko/[email protected]#egg=flask` Output of pip list ``` argparse (1.2.1) Flask (0.8dev-20130506, /home/pratz/VirtualEnv/learnflask/src/flask) Jinja2 (2.6) Werkzeug (0.8.3) wsgiref (0.1.2) ``` Ouput of pip list -o Nothing ( returns to console without any output ) Should not the output of pip list -o be `Flask (Current: 0.8dev Latest: 0.9)` Or is this how pip list -o works for editable mode ? NOTE: pip version is 1.3.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/commands/list.py` Content: ``` 1 from __future__ import absolute_import 2 3 import logging 4 import warnings 5 6 from pip.basecommand import Command 7 from pip.exceptions import DistributionNotFound 8 from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search 9 from pip.req import InstallRequirement 10 from pip.utils import ( 11 get_installed_distributions, dist_is_editable, canonicalize_name) 12 from pip.utils.deprecation import RemovedInPip10Warning 13 from pip.wheel import WheelCache 14 from pip.cmdoptions import make_option_group, index_group 15 16 17 logger = logging.getLogger(__name__) 18 19 20 class ListCommand(Command): 21 """ 22 List installed packages, including editables. 23 24 Packages are listed in a case-insensitive sorted order. 25 """ 26 name = 'list' 27 usage = """ 28 %prog [options]""" 29 summary = 'List installed packages.' 30 31 def __init__(self, *args, **kw): 32 super(ListCommand, self).__init__(*args, **kw) 33 34 cmd_opts = self.cmd_opts 35 36 cmd_opts.add_option( 37 '-o', '--outdated', 38 action='store_true', 39 default=False, 40 help='List outdated packages (excluding editables)') 41 cmd_opts.add_option( 42 '-u', '--uptodate', 43 action='store_true', 44 default=False, 45 help='List uptodate packages (excluding editables)') 46 cmd_opts.add_option( 47 '-e', '--editable', 48 action='store_true', 49 default=False, 50 help='List editable projects.') 51 cmd_opts.add_option( 52 '-l', '--local', 53 action='store_true', 54 default=False, 55 help=('If in a virtualenv that has global access, do not list ' 56 'globally-installed packages.'), 57 ) 58 self.cmd_opts.add_option( 59 '--user', 60 dest='user', 61 action='store_true', 62 default=False, 63 help='Only output packages installed in user-site.') 64 65 cmd_opts.add_option( 66 '--pre', 67 action='store_true', 68 default=False, 69 help=("Include pre-release and development versions. By default, " 70 "pip only finds stable versions."), 71 ) 72 73 index_opts = make_option_group(index_group, self.parser) 74 75 self.parser.insert_option_group(0, index_opts) 76 self.parser.insert_option_group(0, cmd_opts) 77 78 def _build_package_finder(self, options, index_urls, session): 79 """ 80 Create a package finder appropriate to this list command. 81 """ 82 return PackageFinder( 83 find_links=options.find_links, 84 index_urls=index_urls, 85 allow_all_prereleases=options.pre, 86 trusted_hosts=options.trusted_hosts, 87 process_dependency_links=options.process_dependency_links, 88 session=session, 89 ) 90 91 def run(self, options, args): 92 if options.allow_external: 93 warnings.warn( 94 "--allow-external has been deprecated and will be removed in " 95 "the future. Due to changes in the repository protocol, it no " 96 "longer has any effect.", 97 RemovedInPip10Warning, 98 ) 99 100 if options.allow_all_external: 101 warnings.warn( 102 "--allow-all-external has been deprecated and will be removed " 103 "in the future. Due to changes in the repository protocol, it " 104 "no longer has any effect.", 105 RemovedInPip10Warning, 106 ) 107 108 if options.allow_unverified: 109 warnings.warn( 110 "--allow-unverified has been deprecated and will be removed " 111 "in the future. Due to changes in the repository protocol, it " 112 "no longer has any effect.", 113 RemovedInPip10Warning, 114 ) 115 116 if options.outdated: 117 self.run_outdated(options) 118 elif options.uptodate: 119 self.run_uptodate(options) 120 elif options.editable: 121 self.run_editables(options) 122 else: 123 self.run_listing(options) 124 125 def run_outdated(self, options): 126 for dist, version, typ in self.find_packages_latest_versions(options): 127 if version > dist.parsed_version: 128 logger.info( 129 '%s (Current: %s Latest: %s [%s])', 130 dist.project_name, dist.version, version, typ, 131 ) 132 133 def find_packages_latest_versions(self, options): 134 index_urls = [options.index_url] + options.extra_index_urls 135 if options.no_index: 136 logger.info('Ignoring indexes: %s', ','.join(index_urls)) 137 index_urls = [] 138 139 dependency_links = [] 140 for dist in get_installed_distributions(local_only=options.local, 141 user_only=options.user): 142 if dist.has_metadata('dependency_links.txt'): 143 dependency_links.extend( 144 dist.get_metadata_lines('dependency_links.txt'), 145 ) 146 147 with self._build_session(options) as session: 148 finder = self._build_package_finder(options, index_urls, session) 149 finder.add_dependency_links(dependency_links) 150 151 installed_packages = get_installed_distributions( 152 local_only=options.local, 153 user_only=options.user, 154 include_editables=False, 155 ) 156 format_control = FormatControl(set(), set()) 157 wheel_cache = WheelCache(options.cache_dir, format_control) 158 for dist in installed_packages: 159 req = InstallRequirement.from_line( 160 dist.key, None, isolated=options.isolated_mode, 161 wheel_cache=wheel_cache 162 ) 163 typ = 'unknown' 164 try: 165 link = finder.find_requirement(req, True) 166 167 # If link is None, means installed version is most 168 # up-to-date 169 if link is None: 170 continue 171 except DistributionNotFound: 172 continue 173 else: 174 canonical_name = canonicalize_name(req.name) 175 formats = fmt_ctl_formats(format_control, canonical_name) 176 search = Search( 177 req.name, 178 canonical_name, 179 formats) 180 remote_version = finder._link_package_versions( 181 link, search).version 182 if link.is_wheel: 183 typ = 'wheel' 184 else: 185 typ = 'sdist' 186 yield dist, remote_version, typ 187 188 def run_listing(self, options): 189 installed_packages = get_installed_distributions( 190 local_only=options.local, 191 user_only=options.user, 192 ) 193 self.output_package_listing(installed_packages) 194 195 def run_editables(self, options): 196 installed_packages = get_installed_distributions( 197 local_only=options.local, 198 user_only=options.user, 199 editables_only=True, 200 ) 201 self.output_package_listing(installed_packages) 202 203 def output_package_listing(self, installed_packages): 204 installed_packages = sorted( 205 installed_packages, 206 key=lambda dist: dist.project_name.lower(), 207 ) 208 for dist in installed_packages: 209 if dist_is_editable(dist): 210 line = '%s (%s, %s)' % ( 211 dist.project_name, 212 dist.version, 213 dist.location, 214 ) 215 else: 216 line = '%s (%s)' % (dist.project_name, dist.version) 217 logger.info(line) 218 219 def run_uptodate(self, options): 220 uptodate = [] 221 for dist, version, typ in self.find_packages_latest_versions(options): 222 if dist.parsed_version == version: 223 uptodate.append(dist) 224 self.output_package_listing(uptodate) 225 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/commands/list.py b/pip/commands/list.py --- a/pip/commands/list.py +++ b/pip/commands/list.py @@ -4,7 +4,7 @@ import warnings from pip.basecommand import Command -from pip.exceptions import DistributionNotFound +from pip.exceptions import CommandError, DistributionNotFound from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search from pip.req import InstallRequirement from pip.utils import ( @@ -37,12 +37,12 @@ '-o', '--outdated', action='store_true', default=False, - help='List outdated packages (excluding editables)') + help='List outdated packages') cmd_opts.add_option( '-u', '--uptodate', action='store_true', default=False, - help='List uptodate packages (excluding editables)') + help='List uptodate packages') cmd_opts.add_option( '-e', '--editable', action='store_true', @@ -112,22 +112,25 @@ "no longer has any effect.", RemovedInPip10Warning, ) + if options.outdated and options.uptodate: + raise CommandError( + "Options --outdated and --uptodate cannot be combined.") if options.outdated: self.run_outdated(options) elif options.uptodate: self.run_uptodate(options) - elif options.editable: - self.run_editables(options) else: self.run_listing(options) def run_outdated(self, options): - for dist, version, typ in self.find_packages_latest_versions(options): - if version > dist.parsed_version: + for dist, latest_version, typ in sorted( + self.find_packages_latest_versions(options), + key=lambda p: p[0].project_name.lower()): + if latest_version > dist.parsed_version: logger.info( - '%s (Current: %s Latest: %s [%s])', - dist.project_name, dist.version, version, typ, + '%s - Latest: %s [%s]', + self.output_package(dist), latest_version, typ, ) def find_packages_latest_versions(self, options): @@ -137,8 +140,10 @@ index_urls = [] dependency_links = [] - for dist in get_installed_distributions(local_only=options.local, - user_only=options.user): + for dist in get_installed_distributions( + local_only=options.local, + user_only=options.user, + editables_only=options.editable): if dist.has_metadata('dependency_links.txt'): dependency_links.extend( dist.get_metadata_lines('dependency_links.txt'), @@ -151,7 +156,7 @@ installed_packages = get_installed_distributions( local_only=options.local, user_only=options.user, - include_editables=False, + editables_only=options.editable, ) format_control = FormatControl(set(), set()) wheel_cache = WheelCache(options.cache_dir, format_control) @@ -189,16 +194,19 @@ installed_packages = get_installed_distributions( local_only=options.local, user_only=options.user, + editables_only=options.editable, ) self.output_package_listing(installed_packages) - def run_editables(self, options): - installed_packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=True, - ) - self.output_package_listing(installed_packages) + def output_package(self, dist): + if dist_is_editable(dist): + return '%s (%s, %s)' % ( + dist.project_name, + dist.version, + dist.location, + ) + else: + return '%s (%s)' % (dist.project_name, dist.version) def output_package_listing(self, installed_packages): installed_packages = sorted( @@ -206,15 +214,7 @@ key=lambda dist: dist.project_name.lower(), ) for dist in installed_packages: - if dist_is_editable(dist): - line = '%s (%s, %s)' % ( - dist.project_name, - dist.version, - dist.location, - ) - else: - line = '%s (%s)' % (dist.project_name, dist.version) - logger.info(line) + logger.info(self.output_package(dist)) def run_uptodate(self, options): uptodate = []
{"golden_diff": "diff --git a/pip/commands/list.py b/pip/commands/list.py\n--- a/pip/commands/list.py\n+++ b/pip/commands/list.py\n@@ -4,7 +4,7 @@\n import warnings\n \n from pip.basecommand import Command\n-from pip.exceptions import DistributionNotFound\n+from pip.exceptions import CommandError, DistributionNotFound\n from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search\n from pip.req import InstallRequirement\n from pip.utils import (\n@@ -37,12 +37,12 @@\n '-o', '--outdated',\n action='store_true',\n default=False,\n- help='List outdated packages (excluding editables)')\n+ help='List outdated packages')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n- help='List uptodate packages (excluding editables)')\n+ help='List uptodate packages')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n@@ -112,22 +112,25 @@\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n+ if options.outdated and options.uptodate:\n+ raise CommandError(\n+ \"Options --outdated and --uptodate cannot be combined.\")\n \n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n- elif options.editable:\n- self.run_editables(options)\n else:\n self.run_listing(options)\n \n def run_outdated(self, options):\n- for dist, version, typ in self.find_packages_latest_versions(options):\n- if version > dist.parsed_version:\n+ for dist, latest_version, typ in sorted(\n+ self.find_packages_latest_versions(options),\n+ key=lambda p: p[0].project_name.lower()):\n+ if latest_version > dist.parsed_version:\n logger.info(\n- '%s (Current: %s Latest: %s [%s])',\n- dist.project_name, dist.version, version, typ,\n+ '%s - Latest: %s [%s]',\n+ self.output_package(dist), latest_version, typ,\n )\n \n def find_packages_latest_versions(self, options):\n@@ -137,8 +140,10 @@\n index_urls = []\n \n dependency_links = []\n- for dist in get_installed_distributions(local_only=options.local,\n- user_only=options.user):\n+ for dist in get_installed_distributions(\n+ local_only=options.local,\n+ user_only=options.user,\n+ editables_only=options.editable):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n@@ -151,7 +156,7 @@\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n- include_editables=False,\n+ editables_only=options.editable,\n )\n format_control = FormatControl(set(), set())\n wheel_cache = WheelCache(options.cache_dir, format_control)\n@@ -189,16 +194,19 @@\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n+ editables_only=options.editable,\n )\n self.output_package_listing(installed_packages)\n \n- def run_editables(self, options):\n- installed_packages = get_installed_distributions(\n- local_only=options.local,\n- user_only=options.user,\n- editables_only=True,\n- )\n- self.output_package_listing(installed_packages)\n+ def output_package(self, dist):\n+ if dist_is_editable(dist):\n+ return '%s (%s, %s)' % (\n+ dist.project_name,\n+ dist.version,\n+ dist.location,\n+ )\n+ else:\n+ return '%s (%s)' % (dist.project_name, dist.version)\n \n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n@@ -206,15 +214,7 @@\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n- if dist_is_editable(dist):\n- line = '%s (%s, %s)' % (\n- dist.project_name,\n- dist.version,\n- dist.location,\n- )\n- else:\n- line = '%s (%s)' % (dist.project_name, dist.version)\n- logger.info(line)\n+ logger.info(self.output_package(dist))\n \n def run_uptodate(self, options):\n uptodate = []\n", "issue": "pip list outdated\nI install Flask version 0.8 with pip in editable mode \n\n`pip install -e git+https://github.com/mitsuhiko/[email protected]#egg=flask`\n\nOutput of pip list \n\n```\nargparse (1.2.1)\nFlask (0.8dev-20130506, /home/pratz/VirtualEnv/learnflask/src/flask)\nJinja2 (2.6)\nWerkzeug (0.8.3)\nwsgiref (0.1.2)\n```\n\nOuput of pip list -o\nNothing ( returns to console without any output )\n\nShould not the output of pip list -o be\n`Flask (Current: 0.8dev Latest: 0.9)`\n\nOr is this how pip list -o works for editable mode ?\n\nNOTE: pip version is 1.3.1\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import DistributionNotFound\nfrom pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search\nfrom pip.req import InstallRequirement\nfrom pip.utils import (\n get_installed_distributions, dist_is_editable, canonicalize_name)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip.wheel import WheelCache\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages (excluding editables)')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages (excluding editables)')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n self.cmd_opts.add_option(\n '--user',\n dest='user',\n action='store_true',\n default=False,\n help='Only output packages installed in user-site.')\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_all_prereleases=options.pre,\n trusted_hosts=options.trusted_hosts,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n elif options.editable:\n self.run_editables(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, version, typ in self.find_packages_latest_versions(options):\n if version > dist.parsed_version:\n logger.info(\n '%s (Current: %s Latest: %s [%s])',\n dist.project_name, dist.version, version, typ,\n )\n\n def find_packages_latest_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n dependency_links = []\n for dist in get_installed_distributions(local_only=options.local,\n user_only=options.user):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n include_editables=False,\n )\n format_control = FormatControl(set(), set())\n wheel_cache = WheelCache(options.cache_dir, format_control)\n for dist in installed_packages:\n req = InstallRequirement.from_line(\n dist.key, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n typ = 'unknown'\n try:\n link = finder.find_requirement(req, True)\n\n # If link is None, means installed version is most\n # up-to-date\n if link is None:\n continue\n except DistributionNotFound:\n continue\n else:\n canonical_name = canonicalize_name(req.name)\n formats = fmt_ctl_formats(format_control, canonical_name)\n search = Search(\n req.name,\n canonical_name,\n formats)\n remote_version = finder._link_package_versions(\n link, search).version\n if link.is_wheel:\n typ = 'wheel'\n else:\n typ = 'sdist'\n yield dist, remote_version, typ\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n )\n self.output_package_listing(installed_packages)\n\n def run_editables(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=True,\n )\n self.output_package_listing(installed_packages)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n if dist_is_editable(dist):\n line = '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n line = '%s (%s)' % (dist.project_name, dist.version)\n logger.info(line)\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, version, typ in self.find_packages_latest_versions(options):\n if dist.parsed_version == version:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import CommandError, DistributionNotFound\nfrom pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search\nfrom pip.req import InstallRequirement\nfrom pip.utils import (\n get_installed_distributions, dist_is_editable, canonicalize_name)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip.wheel import WheelCache\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n self.cmd_opts.add_option(\n '--user',\n dest='user',\n action='store_true',\n default=False,\n help='Only output packages installed in user-site.')\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_all_prereleases=options.pre,\n trusted_hosts=options.trusted_hosts,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n if options.outdated and options.uptodate:\n raise CommandError(\n \"Options --outdated and --uptodate cannot be combined.\")\n\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, latest_version, typ in sorted(\n self.find_packages_latest_versions(options),\n key=lambda p: p[0].project_name.lower()):\n if latest_version > dist.parsed_version:\n logger.info(\n '%s - Latest: %s [%s]',\n self.output_package(dist), latest_version, typ,\n )\n\n def find_packages_latest_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n dependency_links = []\n for dist in get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n format_control = FormatControl(set(), set())\n wheel_cache = WheelCache(options.cache_dir, format_control)\n for dist in installed_packages:\n req = InstallRequirement.from_line(\n dist.key, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n typ = 'unknown'\n try:\n link = finder.find_requirement(req, True)\n\n # If link is None, means installed version is most\n # up-to-date\n if link is None:\n continue\n except DistributionNotFound:\n continue\n else:\n canonical_name = canonicalize_name(req.name)\n formats = fmt_ctl_formats(format_control, canonical_name)\n search = Search(\n req.name,\n canonical_name,\n formats)\n remote_version = finder._link_package_versions(\n link, search).version\n if link.is_wheel:\n typ = 'wheel'\n else:\n typ = 'sdist'\n yield dist, remote_version, typ\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n self.output_package_listing(installed_packages)\n\n def output_package(self, dist):\n if dist_is_editable(dist):\n return '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n return '%s (%s)' % (dist.project_name, dist.version)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n logger.info(self.output_package(dist))\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, version, typ in self.find_packages_latest_versions(options):\n if dist.parsed_version == version:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}]}
2,518
1,003
gh_patches_debug_59199
rasdani/github-patches
git_diff
Nitrate__Nitrate-380
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add more information links to package Add these links to `project_urls` argument of `setup.py`. - Source Code: https://github.com/Nitrate/Nitrate - Issue Tracker: https://github.com/Nitrate/Nitrate/issues - Documentation: https://nitrate.readthedocs.io/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 with open('VERSION.txt', 'r') as f: 9 pkg_version = f.read().strip() 10 11 12 def get_long_description(): 13 with open('README.rst', 'r') as f: 14 return f.read() 15 16 17 install_requires = [ 18 'beautifulsoup4 >= 4.1.1', 19 'django >= 1.11,<3.0', 20 'django-contrib-comments == 1.8.0', 21 'django-tinymce == 2.7.0', 22 'django-uuslug == 1.1.8', 23 'html2text', 24 'odfpy >= 0.9.6', 25 'python-bugzilla', 26 'six', 27 'xmltodict', 28 'kobo == 0.9.0' 29 ] 30 31 if sys.version_info.major < 3: 32 install_requires += [ 33 'enum34', 34 ] 35 36 extras_require = { 37 'mysql': ['PyMySQL == 0.9.2'], 38 'pgsql': ['psycopg2 == 2.7.5'], 39 40 # Required for tcms.core.contrib.auth.backends.KerberosBackend 41 'krbauth': [ 42 'kerberos == 1.2.5' 43 ], 44 45 # Packages for building documentation 46 'docs': [ 47 'Sphinx >= 1.1.2', 48 'sphinx_rtd_theme', 49 ], 50 51 # Necessary packages for running tests 52 'tests': [ 53 'beautifulsoup4', 54 'coverage', 55 'factory_boy', 56 'flake8', 57 'mock', 58 'pytest', 59 'pytest-cov', 60 'pytest-django', 61 ], 62 63 # Contain tools that assists the development 64 'devtools': [ 65 'django-debug-toolbar == 1.7', 66 'tox', 67 'django-extensions', 68 'pygraphviz', 69 'future-breakpoint', 70 ], 71 72 # Required packages required to run async tasks 73 'async': [ 74 'celery == 4.2.0', 75 ] 76 } 77 78 setup( 79 name='Nitrate', 80 version=pkg_version, 81 description='Test Case Management System', 82 long_description=get_long_description(), 83 author='Nitrate Team', 84 maintainer='Chenxiong Qi', 85 maintainer_email='[email protected]', 86 url='https://github.com/Nitrate/Nitrate/', 87 license='GPLv2+', 88 keywords='test case', 89 install_requires=install_requires, 90 extras_require=extras_require, 91 packages=find_packages(), 92 include_package_data=True, 93 classifiers=[ 94 'Framework :: Django', 95 'Framework :: Django :: 1.11', 96 'Framework :: Django :: 2.0', 97 'Framework :: Django :: 2.1', 98 'Intended Audience :: Developers', 99 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 100 'Programming Language :: Python :: 2', 101 'Programming Language :: Python :: 2.7', 102 'Programming Language :: Python :: 3', 103 'Programming Language :: Python :: 3.6', 104 'Programming Language :: Python :: 3.7', 105 'Topic :: Software Development :: Quality Assurance', 106 'Topic :: Software Development :: Testing', 107 ], 108 ) 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -105,4 +105,9 @@ 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', ], + project_urls={ + 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues', + 'Source Code': 'https://github.com/Nitrate/Nitrate', + 'Documentation': 'https://nitrate.readthedocs.io/', + }, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,4 +105,9 @@\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n+ project_urls={\n+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n+ 'Source Code': 'https://github.com/Nitrate/Nitrate',\n+ 'Documentation': 'https://nitrate.readthedocs.io/',\n+ },\n )\n", "issue": "Add more information links to package\nAdd these links to `project_urls` argument of `setup.py`.\r\n\r\n- Source Code: https://github.com/Nitrate/Nitrate\r\n- Issue Tracker: https://github.com/Nitrate/Nitrate/issues\r\n- Documentation: https://nitrate.readthedocs.io/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
1,267
116
gh_patches_debug_17383
rasdani/github-patches
git_diff
conan-io__conan-center-index-5508
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [request] sqlcipher/4.4.3 ### Package Details * Package Name/Version: ** sqlcipher/4.4.3** * Changelog: **https://github.com/sqlcipher/sqlcipher/blob/master/CHANGELOG.md** ---- * works on Mac Catalyst The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/sqlcipher/all/conanfile.py` Content: ``` 1 from conans import tools, ConanFile, AutoToolsBuildEnvironment, RunEnvironment 2 import platform 3 import sys 4 import os 5 6 7 class SqlcipherConan(ConanFile): 8 name = "sqlcipher" 9 license = "BSD-3-Clause" 10 url = "https://github.com/conan-io/conan-center-index" 11 homepage = "https://www.zetetic.net/sqlcipher/" 12 description = "SQLite extension that provides 256 bit AES encryption of database files." 13 settings = "os", "compiler", "build_type", "arch" 14 options = { 15 "shared": [True, False], 16 "fPIC": [True, False], 17 "crypto_library": ["openssl", "libressl"], 18 "with_largefile": [True, False], 19 "temporary_store": ["always_file", "default_file", "default_memory", "always_memory"] 20 } 21 default_options = { 22 "shared": False, 23 "fPIC": True, 24 "crypto_library": "openssl", 25 "with_largefile": True, 26 "temporary_store": "default_memory" 27 } 28 topics = ("database", "encryption", "SQLite") 29 exports_sources = "patches/*" 30 generators = "cmake" 31 _source_subfolder = "source_subfolder" 32 33 34 def config_options(self): 35 del self.settings.compiler.libcxx 36 del self.settings.compiler.cppstd 37 if self.settings.os != "Linux": 38 del self.options.with_largefile 39 if self.settings.os == "Windows": 40 del self.options.fPIC 41 42 def build_requirements(self): 43 # It is possible to have a MinGW cross-build toolchain (Linux to Windows) 44 # Only require msys2 when building on an actual Windows system 45 if self.settings.os == "Windows" and self.settings.compiler == "gcc" and tools.os_info.is_windows: 46 self.build_requires("msys2/20190524") 47 self.build_requires("tcl/8.6.10") 48 49 def requirements(self): 50 if self.options.crypto_library == "openssl": 51 self.requires("openssl/1.1.1h") 52 else: 53 self.requires("libressl/3.2.0") 54 55 def source(self): 56 tools.get(**self.conan_data["sources"][self.version]) 57 extracted_dir = self.name + "-" + self.version 58 os.rename(extracted_dir, self._source_subfolder) 59 60 @property 61 def _temp_store_nmake_value(self): 62 return {"always_file": "0", 63 "default_file": "1", 64 "default_memory": "2", 65 "always_memory": "3"}.get(str(self.options.temporary_store)) 66 67 @property 68 def _temp_store_autotools_value(self): 69 return {"always_file": "never", 70 "default_file": "no", 71 "default_memory": "yes", 72 "always_memory": "always"}.get(str(self.options.temporary_store)) 73 74 def _build_visual(self): 75 crypto_dep = self.deps_cpp_info[str(self.options.crypto_library)] 76 crypto_incdir = crypto_dep.include_paths[0] 77 crypto_libdir = crypto_dep.lib_paths[0] 78 libs = map(lambda lib : lib + ".lib", crypto_dep.libs) 79 system_libs = map(lambda lib : lib + ".lib", crypto_dep.system_libs) 80 81 nmake_flags = [ 82 "TLIBS=\"%s %s\"" % (" ".join(libs), " ".join(system_libs)), 83 "LTLIBPATHS=/LIBPATH:%s" % crypto_libdir, 84 "OPTS=\"-I%s -DSQLITE_HAS_CODEC\"" % (crypto_incdir), 85 "NO_TCL=1", 86 "USE_AMALGAMATION=1", 87 "OPT_FEATURE_FLAGS=-DSQLCIPHER_CRYPTO_OPENSSL", 88 "SQLITE_TEMP_STORE=%s" % self._temp_store_nmake_value, 89 "TCLSH_CMD=%s" % self.deps_env_info.TCLSH, 90 ] 91 92 main_target = "dll" if self.options.shared else "sqlcipher.lib" 93 94 if self.settings.compiler.runtime in ["MD", "MDd"]: 95 nmake_flags.append("USE_CRT_DLL=1") 96 if self.settings.build_type == "Debug": 97 nmake_flags.append("DEBUG=2") 98 nmake_flags.append("FOR_WIN10=1") 99 platforms = {"x86": "x86", "x86_64": "x64"} 100 nmake_flags.append("PLATFORM=%s" % platforms[self.settings.arch.value]) 101 vcvars = tools.vcvars_command(self.settings) 102 self.run("%s && nmake /f Makefile.msc %s %s" % (vcvars, main_target, " ".join(nmake_flags)), cwd=self._source_subfolder) 103 104 def _build_autotools(self): 105 self.run('chmod +x configure', cwd=self._source_subfolder) 106 absolute_install_dir = os.path.abspath(os.path.join(".", "install")) 107 absolute_install_dir = absolute_install_dir.replace("\\", "/") 108 autotools_env = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows) 109 if self.settings.os == "Linux": 110 autotools_env.libs.append("dl") 111 if not self.options.with_largefile: 112 autotools_env.defines.append("SQLITE_DISABLE_LFS=1") 113 autotools_env.defines.extend(["SQLITE_HAS_CODEC", "SQLCIPHER_CRYPTO_OPENSSL"]) 114 115 # sqlcipher config.sub does not contain android configurations... 116 # elf is the most basic `os' for Android 117 host = None 118 if self.settings.os == "Android": 119 host = "%s-linux-elf" % self._arch_id_str_compiler 120 elif self.settings.os == "Windows": 121 arch = str(self.settings.arch) 122 if arch == "x86": 123 arch = "i386" 124 host = "%s-pc-mingw32" % arch 125 elif self.settings.os == "iOS": 126 host = "%s-apple-darwin" % self.settings.arch 127 128 configure_args = self._get_configure_args(absolute_install_dir) 129 with tools.chdir(self._source_subfolder): 130 # Hack, uname -p returns i386, configure guesses x86_64, we must force i386 so that cross-compilation is correctly detected. 131 # Otherwise host/build are the same, and configure tries to launch a sample executable, and fails miserably. 132 env_vars = autotools_env.vars 133 if self.settings.os == "iOS": 134 build = "i386-apple-darwin" 135 # same for mingw... 136 elif self.settings.os == "Windows": 137 build = "x86_64-linux" 138 env_vars["config_TARGET_EXEEXT"] = ".exe" 139 else: 140 build = None 141 tclsh_cmd = self.deps_env_info.TCLSH 142 env_vars["TCLSH_CMD"] = tclsh_cmd.replace("\\", "/") 143 autotools_env.configure(args=configure_args, host=host, build=build, vars=env_vars) 144 if self.settings.os == "Windows": 145 # sqlcipher will create .exe for the build machine, which we defined to Linux... 146 tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, "Makefile"), "BEXE = .exe", "BEXE = ") 147 autotools_env.make(args=["install"]) 148 149 @property 150 def _arch_id_str_compiler(self): 151 return {"x86": "i686", 152 "armv6": "arm", 153 "armv7": "arm", 154 "armv7hf": "arm", 155 # Hack: config.guess of sqlcipher does not like aarch64 156 "armv8": "armv8", 157 "mips64": "mips64"}.get(str(self.settings.arch), 158 str(self.settings.arch)) 159 160 def _get_configure_args(self, absolute_install_dir): 161 args = [ 162 "--prefix=%s" % absolute_install_dir, 163 164 self._autotools_bool_arg("shared", self.options.shared), 165 self._autotools_bool_arg("static", not self.options.shared), 166 "--enable-tempstore=%s" % self._temp_store_autotools_value, 167 "--disable-tcl", 168 ] 169 if self.settings.os == "Windows": 170 args.extend(["config_BUILD_EXEEXT='.exe'", "config_TARGET_EXEEXT='.exe'"]) 171 return args 172 173 def _autotools_bool_arg(self, arg_base_name, value): 174 prefix = "--enable-" if value else "--disable-" 175 176 return prefix + arg_base_name 177 178 def build(self): 179 for patch in self.conan_data["patches"][self.version]: 180 tools.patch(**patch) 181 if self.settings.os == "Macos": 182 tools.replace_in_file(os.path.join(self._source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ") 183 184 if self.settings.compiler == "Visual Studio": 185 self._build_visual() 186 else: 187 self._build_autotools() 188 189 def _package_unix(self): 190 self.copy("*sqlite3.h", src="install") 191 self.copy("*.so*", dst="lib", src="install", keep_path=False, symlinks=True) 192 self.copy("*.a", dst="lib", src="install", keep_path=False) 193 self.copy("*.lib", dst="lib", src="install", keep_path=False) 194 self.copy("*.dll", dst="bin", src="install", keep_path=False) 195 self.copy("*.dylib", dst="lib", src="install", keep_path=False) 196 self.copy("*LICENSE", dst="licenses", keep_path=False) 197 198 def _package_visual(self): 199 self.copy("*.dll", dst="bin", keep_path=False) 200 self.copy("*.lib", dst="lib", keep_path=False) 201 self.copy("*LICENSE", dst="licenses", keep_path=False) 202 self.copy("sqlite3.h", src=self._source_subfolder, dst=os.path.join("include", "sqlcipher")) 203 204 def package(self): 205 if self.settings.compiler == "Visual Studio": 206 self._package_visual() 207 else: 208 self._package_unix() 209 210 def package_info(self): 211 self.cpp_info.libs = ["sqlcipher"] 212 if self.settings.os == "Linux": 213 self.cpp_info.system_libs.extend(["pthread", "dl"]) 214 self.cpp_info.defines = ["SQLITE_HAS_CODEC", 'SQLCIPHER_CRYPTO_OPENSSL', 'SQLITE_TEMP_STORE=%s' % self._temp_store_nmake_value] 215 # Allow using #include <sqlite3.h> even with sqlcipher (for libs like sqlpp11-connector-sqlite3) 216 self.cpp_info.includedirs.append(os.path.join("include", "sqlcipher")) 217 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/sqlcipher/all/conanfile.py b/recipes/sqlcipher/all/conanfile.py --- a/recipes/sqlcipher/all/conanfile.py +++ b/recipes/sqlcipher/all/conanfile.py @@ -39,6 +39,10 @@ if self.settings.os == "Windows": del self.options.fPIC + def configure(self): + if self.options.shared: + del self.options.fPIC + def build_requirements(self): # It is possible to have a MinGW cross-build toolchain (Linux to Windows) # Only require msys2 when building on an actual Windows system @@ -48,7 +52,7 @@ def requirements(self): if self.options.crypto_library == "openssl": - self.requires("openssl/1.1.1h") + self.requires("openssl/1.1.1k") else: self.requires("libressl/3.2.0")
{"golden_diff": "diff --git a/recipes/sqlcipher/all/conanfile.py b/recipes/sqlcipher/all/conanfile.py\n--- a/recipes/sqlcipher/all/conanfile.py\n+++ b/recipes/sqlcipher/all/conanfile.py\n@@ -39,6 +39,10 @@\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n \n+ def configure(self):\n+ if self.options.shared:\n+ del self.options.fPIC\n+\n def build_requirements(self):\n # It is possible to have a MinGW cross-build toolchain (Linux to Windows)\n # Only require msys2 when building on an actual Windows system\n@@ -48,7 +52,7 @@\n \n def requirements(self):\n if self.options.crypto_library == \"openssl\":\n- self.requires(\"openssl/1.1.1h\")\n+ self.requires(\"openssl/1.1.1k\")\n else:\n self.requires(\"libressl/3.2.0\")\n", "issue": "[request] sqlcipher/4.4.3\n### Package Details\r\n * Package Name/Version: ** sqlcipher/4.4.3**\r\n * Changelog: **https://github.com/sqlcipher/sqlcipher/blob/master/CHANGELOG.md**\r\n----\r\n * works on Mac Catalyst \r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conans import tools, ConanFile, AutoToolsBuildEnvironment, RunEnvironment\nimport platform\nimport sys\nimport os\n\n\nclass SqlcipherConan(ConanFile):\n name = \"sqlcipher\"\n license = \"BSD-3-Clause\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.zetetic.net/sqlcipher/\"\n description = \"SQLite extension that provides 256 bit AES encryption of database files.\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"crypto_library\": [\"openssl\", \"libressl\"],\n \"with_largefile\": [True, False],\n \"temporary_store\": [\"always_file\", \"default_file\", \"default_memory\", \"always_memory\"]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"crypto_library\": \"openssl\",\n \"with_largefile\": True,\n \"temporary_store\": \"default_memory\"\n }\n topics = (\"database\", \"encryption\", \"SQLite\")\n exports_sources = \"patches/*\"\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n\n def config_options(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os != \"Linux\":\n del self.options.with_largefile\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def build_requirements(self):\n # It is possible to have a MinGW cross-build toolchain (Linux to Windows)\n # Only require msys2 when building on an actual Windows system\n if self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\" and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n self.build_requires(\"tcl/8.6.10\")\n\n def requirements(self):\n if self.options.crypto_library == \"openssl\":\n self.requires(\"openssl/1.1.1h\")\n else:\n self.requires(\"libressl/3.2.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def _temp_store_nmake_value(self):\n return {\"always_file\": \"0\",\n \"default_file\": \"1\",\n \"default_memory\": \"2\",\n \"always_memory\": \"3\"}.get(str(self.options.temporary_store))\n\n @property\n def _temp_store_autotools_value(self):\n return {\"always_file\": \"never\",\n \"default_file\": \"no\",\n \"default_memory\": \"yes\",\n \"always_memory\": \"always\"}.get(str(self.options.temporary_store))\n\n def _build_visual(self):\n crypto_dep = self.deps_cpp_info[str(self.options.crypto_library)]\n crypto_incdir = crypto_dep.include_paths[0]\n crypto_libdir = crypto_dep.lib_paths[0]\n libs = map(lambda lib : lib + \".lib\", crypto_dep.libs)\n system_libs = map(lambda lib : lib + \".lib\", crypto_dep.system_libs)\n\n nmake_flags = [\n \"TLIBS=\\\"%s %s\\\"\" % (\" \".join(libs), \" \".join(system_libs)),\n \"LTLIBPATHS=/LIBPATH:%s\" % crypto_libdir,\n \"OPTS=\\\"-I%s -DSQLITE_HAS_CODEC\\\"\" % (crypto_incdir),\n \"NO_TCL=1\",\n \"USE_AMALGAMATION=1\",\n \"OPT_FEATURE_FLAGS=-DSQLCIPHER_CRYPTO_OPENSSL\",\n \"SQLITE_TEMP_STORE=%s\" % self._temp_store_nmake_value,\n \"TCLSH_CMD=%s\" % self.deps_env_info.TCLSH,\n ]\n\n main_target = \"dll\" if self.options.shared else \"sqlcipher.lib\"\n\n if self.settings.compiler.runtime in [\"MD\", \"MDd\"]:\n nmake_flags.append(\"USE_CRT_DLL=1\")\n if self.settings.build_type == \"Debug\":\n nmake_flags.append(\"DEBUG=2\")\n nmake_flags.append(\"FOR_WIN10=1\")\n platforms = {\"x86\": \"x86\", \"x86_64\": \"x64\"}\n nmake_flags.append(\"PLATFORM=%s\" % platforms[self.settings.arch.value])\n vcvars = tools.vcvars_command(self.settings)\n self.run(\"%s && nmake /f Makefile.msc %s %s\" % (vcvars, main_target, \" \".join(nmake_flags)), cwd=self._source_subfolder)\n\n def _build_autotools(self):\n self.run('chmod +x configure', cwd=self._source_subfolder)\n absolute_install_dir = os.path.abspath(os.path.join(\".\", \"install\"))\n absolute_install_dir = absolute_install_dir.replace(\"\\\\\", \"/\")\n autotools_env = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self.settings.os == \"Linux\":\n autotools_env.libs.append(\"dl\")\n if not self.options.with_largefile:\n autotools_env.defines.append(\"SQLITE_DISABLE_LFS=1\")\n autotools_env.defines.extend([\"SQLITE_HAS_CODEC\", \"SQLCIPHER_CRYPTO_OPENSSL\"])\n\n # sqlcipher config.sub does not contain android configurations...\n # elf is the most basic `os' for Android\n host = None\n if self.settings.os == \"Android\":\n host = \"%s-linux-elf\" % self._arch_id_str_compiler\n elif self.settings.os == \"Windows\":\n arch = str(self.settings.arch)\n if arch == \"x86\":\n arch = \"i386\"\n host = \"%s-pc-mingw32\" % arch\n elif self.settings.os == \"iOS\":\n host = \"%s-apple-darwin\" % self.settings.arch\n\n configure_args = self._get_configure_args(absolute_install_dir)\n with tools.chdir(self._source_subfolder):\n # Hack, uname -p returns i386, configure guesses x86_64, we must force i386 so that cross-compilation is correctly detected.\n # Otherwise host/build are the same, and configure tries to launch a sample executable, and fails miserably.\n env_vars = autotools_env.vars\n if self.settings.os == \"iOS\":\n build = \"i386-apple-darwin\"\n # same for mingw...\n elif self.settings.os == \"Windows\":\n build = \"x86_64-linux\"\n env_vars[\"config_TARGET_EXEEXT\"] = \".exe\"\n else:\n build = None\n tclsh_cmd = self.deps_env_info.TCLSH\n env_vars[\"TCLSH_CMD\"] = tclsh_cmd.replace(\"\\\\\", \"/\")\n autotools_env.configure(args=configure_args, host=host, build=build, vars=env_vars)\n if self.settings.os == \"Windows\":\n # sqlcipher will create .exe for the build machine, which we defined to Linux...\n tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, \"Makefile\"), \"BEXE = .exe\", \"BEXE = \")\n autotools_env.make(args=[\"install\"])\n\n @property\n def _arch_id_str_compiler(self):\n return {\"x86\": \"i686\",\n \"armv6\": \"arm\",\n \"armv7\": \"arm\",\n \"armv7hf\": \"arm\",\n # Hack: config.guess of sqlcipher does not like aarch64\n \"armv8\": \"armv8\",\n \"mips64\": \"mips64\"}.get(str(self.settings.arch),\n str(self.settings.arch))\n\n def _get_configure_args(self, absolute_install_dir):\n args = [\n \"--prefix=%s\" % absolute_install_dir,\n\n self._autotools_bool_arg(\"shared\", self.options.shared),\n self._autotools_bool_arg(\"static\", not self.options.shared),\n \"--enable-tempstore=%s\" % self._temp_store_autotools_value,\n \"--disable-tcl\",\n ]\n if self.settings.os == \"Windows\":\n args.extend([\"config_BUILD_EXEEXT='.exe'\", \"config_TARGET_EXEEXT='.exe'\"])\n return args\n\n def _autotools_bool_arg(self, arg_base_name, value):\n prefix = \"--enable-\" if value else \"--disable-\"\n\n return prefix + arg_base_name\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.settings.os == \"Macos\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"), r\"-install_name \\$rpath/\", \"-install_name \")\n\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual()\n else:\n self._build_autotools()\n\n def _package_unix(self):\n self.copy(\"*sqlite3.h\", src=\"install\")\n self.copy(\"*.so*\", dst=\"lib\", src=\"install\", keep_path=False, symlinks=True)\n self.copy(\"*.a\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", src=\"install\", keep_path=False)\n self.copy(\"*.dylib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n\n def _package_visual(self):\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n self.copy(\"sqlite3.h\", src=self._source_subfolder, dst=os.path.join(\"include\", \"sqlcipher\"))\n\n def package(self):\n if self.settings.compiler == \"Visual Studio\":\n self._package_visual()\n else:\n self._package_unix()\n\n def package_info(self):\n self.cpp_info.libs = [\"sqlcipher\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\"])\n self.cpp_info.defines = [\"SQLITE_HAS_CODEC\", 'SQLCIPHER_CRYPTO_OPENSSL', 'SQLITE_TEMP_STORE=%s' % self._temp_store_nmake_value]\n # Allow using #include <sqlite3.h> even with sqlcipher (for libs like sqlpp11-connector-sqlite3)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"sqlcipher\"))\n", "path": "recipes/sqlcipher/all/conanfile.py"}], "after_files": [{"content": "from conans import tools, ConanFile, AutoToolsBuildEnvironment, RunEnvironment\nimport platform\nimport sys\nimport os\n\n\nclass SqlcipherConan(ConanFile):\n name = \"sqlcipher\"\n license = \"BSD-3-Clause\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.zetetic.net/sqlcipher/\"\n description = \"SQLite extension that provides 256 bit AES encryption of database files.\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"crypto_library\": [\"openssl\", \"libressl\"],\n \"with_largefile\": [True, False],\n \"temporary_store\": [\"always_file\", \"default_file\", \"default_memory\", \"always_memory\"]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"crypto_library\": \"openssl\",\n \"with_largefile\": True,\n \"temporary_store\": \"default_memory\"\n }\n topics = (\"database\", \"encryption\", \"SQLite\")\n exports_sources = \"patches/*\"\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n\n def config_options(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os != \"Linux\":\n del self.options.with_largefile\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def build_requirements(self):\n # It is possible to have a MinGW cross-build toolchain (Linux to Windows)\n # Only require msys2 when building on an actual Windows system\n if self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\" and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n self.build_requires(\"tcl/8.6.10\")\n\n def requirements(self):\n if self.options.crypto_library == \"openssl\":\n self.requires(\"openssl/1.1.1k\")\n else:\n self.requires(\"libressl/3.2.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def _temp_store_nmake_value(self):\n return {\"always_file\": \"0\",\n \"default_file\": \"1\",\n \"default_memory\": \"2\",\n \"always_memory\": \"3\"}.get(str(self.options.temporary_store))\n\n @property\n def _temp_store_autotools_value(self):\n return {\"always_file\": \"never\",\n \"default_file\": \"no\",\n \"default_memory\": \"yes\",\n \"always_memory\": \"always\"}.get(str(self.options.temporary_store))\n\n def _build_visual(self):\n crypto_dep = self.deps_cpp_info[str(self.options.crypto_library)]\n crypto_incdir = crypto_dep.include_paths[0]\n crypto_libdir = crypto_dep.lib_paths[0]\n libs = map(lambda lib : lib + \".lib\", crypto_dep.libs)\n system_libs = map(lambda lib : lib + \".lib\", crypto_dep.system_libs)\n\n nmake_flags = [\n \"TLIBS=\\\"%s %s\\\"\" % (\" \".join(libs), \" \".join(system_libs)),\n \"LTLIBPATHS=/LIBPATH:%s\" % crypto_libdir,\n \"OPTS=\\\"-I%s -DSQLITE_HAS_CODEC\\\"\" % (crypto_incdir),\n \"NO_TCL=1\",\n \"USE_AMALGAMATION=1\",\n \"OPT_FEATURE_FLAGS=-DSQLCIPHER_CRYPTO_OPENSSL\",\n \"SQLITE_TEMP_STORE=%s\" % self._temp_store_nmake_value,\n \"TCLSH_CMD=%s\" % self.deps_env_info.TCLSH,\n ]\n\n main_target = \"dll\" if self.options.shared else \"sqlcipher.lib\"\n\n if self.settings.compiler.runtime in [\"MD\", \"MDd\"]:\n nmake_flags.append(\"USE_CRT_DLL=1\")\n if self.settings.build_type == \"Debug\":\n nmake_flags.append(\"DEBUG=2\")\n nmake_flags.append(\"FOR_WIN10=1\")\n platforms = {\"x86\": \"x86\", \"x86_64\": \"x64\"}\n nmake_flags.append(\"PLATFORM=%s\" % platforms[self.settings.arch.value])\n vcvars = tools.vcvars_command(self.settings)\n self.run(\"%s && nmake /f Makefile.msc %s %s\" % (vcvars, main_target, \" \".join(nmake_flags)), cwd=self._source_subfolder)\n\n def _build_autotools(self):\n self.run('chmod +x configure', cwd=self._source_subfolder)\n absolute_install_dir = os.path.abspath(os.path.join(\".\", \"install\"))\n absolute_install_dir = absolute_install_dir.replace(\"\\\\\", \"/\")\n autotools_env = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self.settings.os == \"Linux\":\n autotools_env.libs.append(\"dl\")\n if not self.options.with_largefile:\n autotools_env.defines.append(\"SQLITE_DISABLE_LFS=1\")\n autotools_env.defines.extend([\"SQLITE_HAS_CODEC\", \"SQLCIPHER_CRYPTO_OPENSSL\"])\n\n # sqlcipher config.sub does not contain android configurations...\n # elf is the most basic `os' for Android\n host = None\n if self.settings.os == \"Android\":\n host = \"%s-linux-elf\" % self._arch_id_str_compiler\n elif self.settings.os == \"Windows\":\n arch = str(self.settings.arch)\n if arch == \"x86\":\n arch = \"i386\"\n host = \"%s-pc-mingw32\" % arch\n elif self.settings.os == \"iOS\":\n host = \"%s-apple-darwin\" % self.settings.arch\n\n configure_args = self._get_configure_args(absolute_install_dir)\n with tools.chdir(self._source_subfolder):\n # Hack, uname -p returns i386, configure guesses x86_64, we must force i386 so that cross-compilation is correctly detected.\n # Otherwise host/build are the same, and configure tries to launch a sample executable, and fails miserably.\n env_vars = autotools_env.vars\n if self.settings.os == \"iOS\":\n build = \"i386-apple-darwin\"\n # same for mingw...\n elif self.settings.os == \"Windows\":\n build = \"x86_64-linux\"\n env_vars[\"config_TARGET_EXEEXT\"] = \".exe\"\n else:\n build = None\n tclsh_cmd = self.deps_env_info.TCLSH\n env_vars[\"TCLSH_CMD\"] = tclsh_cmd.replace(\"\\\\\", \"/\")\n autotools_env.configure(args=configure_args, host=host, build=build, vars=env_vars)\n if self.settings.os == \"Windows\":\n # sqlcipher will create .exe for the build machine, which we defined to Linux...\n tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, \"Makefile\"), \"BEXE = .exe\", \"BEXE = \")\n autotools_env.make(args=[\"install\"])\n\n @property\n def _arch_id_str_compiler(self):\n return {\"x86\": \"i686\",\n \"armv6\": \"arm\",\n \"armv7\": \"arm\",\n \"armv7hf\": \"arm\",\n # Hack: config.guess of sqlcipher does not like aarch64\n \"armv8\": \"armv8\",\n \"mips64\": \"mips64\"}.get(str(self.settings.arch),\n str(self.settings.arch))\n\n def _get_configure_args(self, absolute_install_dir):\n args = [\n \"--prefix=%s\" % absolute_install_dir,\n\n self._autotools_bool_arg(\"shared\", self.options.shared),\n self._autotools_bool_arg(\"static\", not self.options.shared),\n \"--enable-tempstore=%s\" % self._temp_store_autotools_value,\n \"--disable-tcl\",\n ]\n if self.settings.os == \"Windows\":\n args.extend([\"config_BUILD_EXEEXT='.exe'\", \"config_TARGET_EXEEXT='.exe'\"])\n return args\n\n def _autotools_bool_arg(self, arg_base_name, value):\n prefix = \"--enable-\" if value else \"--disable-\"\n\n return prefix + arg_base_name\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.settings.os == \"Macos\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"), r\"-install_name \\$rpath/\", \"-install_name \")\n\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual()\n else:\n self._build_autotools()\n\n def _package_unix(self):\n self.copy(\"*sqlite3.h\", src=\"install\")\n self.copy(\"*.so*\", dst=\"lib\", src=\"install\", keep_path=False, symlinks=True)\n self.copy(\"*.a\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", src=\"install\", keep_path=False)\n self.copy(\"*.dylib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n\n def _package_visual(self):\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n self.copy(\"sqlite3.h\", src=self._source_subfolder, dst=os.path.join(\"include\", \"sqlcipher\"))\n\n def package(self):\n if self.settings.compiler == \"Visual Studio\":\n self._package_visual()\n else:\n self._package_unix()\n\n def package_info(self):\n self.cpp_info.libs = [\"sqlcipher\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\"])\n self.cpp_info.defines = [\"SQLITE_HAS_CODEC\", 'SQLCIPHER_CRYPTO_OPENSSL', 'SQLITE_TEMP_STORE=%s' % self._temp_store_nmake_value]\n # Allow using #include <sqlite3.h> even with sqlcipher (for libs like sqlpp11-connector-sqlite3)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"sqlcipher\"))\n", "path": "recipes/sqlcipher/all/conanfile.py"}]}
3,238
212
gh_patches_debug_21139
rasdani/github-patches
git_diff
conan-io__conan-3600
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Listing package content Maybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)? TIA --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/local_file_getter.py` Content: ``` 1 import os 2 3 from conans.errors import NotFoundException 4 from conans.model.manifest import discarded_file 5 from conans.model.ref import PackageReference 6 from conans.util.files import load 7 8 9 def get_path(client_cache, conan_ref, package_id, path): 10 """ 11 :param client_cache: Conan's client cache 12 :param conan_ref: Specified reference in the conan get command 13 :param package_id: Specified package id (can be None) 14 :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well) 15 :return: The real path in the local cache for the specified parameters 16 """ 17 if package_id is None: # Get the file in the exported files 18 folder = client_cache.export(conan_ref) 19 else: 20 folder = client_cache.package(PackageReference(conan_ref, package_id)) 21 22 abs_path = os.path.join(folder, path) 23 if not os.path.exists(abs_path): 24 raise NotFoundException("The specified path doesn't exist") 25 if os.path.isdir(abs_path): 26 return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)]) 27 else: 28 return load(abs_path) 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py --- a/conans/client/local_file_getter.py +++ b/conans/client/local_file_getter.py @@ -11,13 +11,15 @@ :param client_cache: Conan's client cache :param conan_ref: Specified reference in the conan get command :param package_id: Specified package id (can be None) - :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well) + :param path: Path to a file, subfolder of exports (if only ref) + or package (if package_id defined) :return: The real path in the local cache for the specified parameters """ if package_id is None: # Get the file in the exported files folder = client_cache.export(conan_ref) else: - folder = client_cache.package(PackageReference(conan_ref, package_id)) + folder = client_cache.package(PackageReference(conan_ref, package_id), + short_paths=None) abs_path = os.path.join(folder, path) if not os.path.exists(abs_path):
{"golden_diff": "diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py\n--- a/conans/client/local_file_getter.py\n+++ b/conans/client/local_file_getter.py\n@@ -11,13 +11,15 @@\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n+ :param path: Path to a file, subfolder of exports (if only ref)\n+ or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n- folder = client_cache.package(PackageReference(conan_ref, package_id))\n+ folder = client_cache.package(PackageReference(conan_ref, package_id),\n+ short_paths=None)\n \n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n", "issue": "Listing package content\nMaybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?\r\nTIA\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id))\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}], "after_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref)\n or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id),\n short_paths=None)\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}]}
621
270
gh_patches_debug_16510
rasdani/github-patches
git_diff
cupy__cupy-5771
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [RFC] Drop Python 3.6 support in v10 We are now discussing to drop support for Python 3.6 in CuPy v10. Feel free to leave a comment here if you have any concerns. Background: * CUDA Python is unlikely to provide a wheel for Python 3.6, although it can be built from the source without any issue. CUDA Python currently requires [`-std=c++14`](https://github.com/NVIDIA/cuda-python/blob/427c597959e6fe1409195a30d42fc4a1886bc89a/setup.py#L38) so recent versions of gcc, which is not in RHEL/CentOS 7 by default, is needed. We want to avoid requiring CuPy wheel users to manually install non-default GCC. * NumPy dropped Python 3.6 support in June 2020: https://numpy.org/neps/nep-0029-deprecation_policy.html * Python 3.6 support become EOL in December 2021. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import glob 4 import os 5 from setuptools import setup, find_packages 6 import sys 7 8 source_root = os.path.abspath(os.path.dirname(__file__)) 9 sys.path.append(os.path.join(source_root, 'install')) 10 11 import cupy_builder # NOQA 12 from cupy_builder import cupy_setup_build # NOQA 13 14 ctx = cupy_builder.Context(source_root) 15 cupy_builder.initialize(ctx) 16 if not cupy_builder.preflight_check(ctx): 17 sys.exit(1) 18 19 20 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619) 21 setup_requires = [ 22 'Cython>=0.29.22,<3', 23 'fastrlock>=0.5', 24 ] 25 install_requires = [ 26 'numpy>=1.17,<1.24', # see #4773 27 'fastrlock>=0.5', 28 ] 29 extras_require = { 30 'all': [ 31 'scipy>=1.4,<1.10', # see #4773 32 'Cython>=0.29.22,<3', 33 'optuna>=2.0', 34 ], 35 'stylecheck': [ 36 'autopep8==1.5.5', 37 'flake8==3.8.4', 38 'pbr==5.5.1', 39 'pycodestyle==2.6.0', 40 ], 41 'test': [ 42 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI. 43 'pytest>=6.2', 44 ], 45 # TODO(kmaehashi): Remove 'jenkins' requirements. 46 'jenkins': [ 47 'pytest>=6.2', 48 'pytest-timeout', 49 'pytest-cov', 50 'coveralls', 51 'codecov', 52 'coverage<5', # Otherwise, Python must be built with sqlite 53 ], 54 } 55 tests_require = extras_require['test'] 56 57 58 # List of files that needs to be in the distribution (sdist/wheel). 59 # Notes: 60 # - Files only needed in sdist should be added to `MANIFEST.in`. 61 # - The following glob (`**`) ignores items starting with `.`. 62 cupy_package_data = [ 63 'cupy/cuda/cupy_thrust.cu', 64 'cupy/cuda/cupy_cub.cu', 65 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback 66 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback 67 'cupy/cuda/cupy_cufft.h', # for cuFFT callback 68 'cupy/cuda/cufft.pxd', # for cuFFT callback 69 'cupy/cuda/cufft.pyx', # for cuFFT callback 70 'cupy/random/cupy_distributions.cu', 71 'cupy/random/cupy_distributions.cuh', 72 ] + [ 73 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True) 74 if os.path.isfile(x) 75 ] 76 77 package_data = { 78 'cupy': [ 79 os.path.relpath(x, 'cupy') for x in cupy_package_data 80 ], 81 } 82 83 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx) 84 85 ext_modules = cupy_setup_build.get_ext_modules(False, ctx) 86 build_ext = cupy_setup_build.custom_build_ext 87 88 # Get __version__ variable 89 with open(os.path.join(source_root, 'cupy', '_version.py')) as f: 90 exec(f.read()) 91 92 long_description = None 93 if ctx.long_description_path is not None: 94 with open(ctx.long_description_path) as f: 95 long_description = f.read() 96 97 98 CLASSIFIERS = """\ 99 Development Status :: 5 - Production/Stable 100 Intended Audience :: Science/Research 101 Intended Audience :: Developers 102 License :: OSI Approved :: MIT License 103 Programming Language :: Python 104 Programming Language :: Python :: 3 105 Programming Language :: Python :: 3.6 106 Programming Language :: Python :: 3.7 107 Programming Language :: Python :: 3.8 108 Programming Language :: Python :: 3.9 109 Programming Language :: Python :: 3 :: Only 110 Programming Language :: Cython 111 Topic :: Software Development 112 Topic :: Scientific/Engineering 113 Operating System :: POSIX 114 Operating System :: Microsoft :: Windows 115 """ 116 117 118 setup( 119 name=ctx.package_name, 120 version=__version__, # NOQA 121 description='CuPy: NumPy & SciPy for GPU', 122 long_description=long_description, 123 author='Seiya Tokui', 124 author_email='[email protected]', 125 maintainer='CuPy Developers', 126 url='https://cupy.dev/', 127 license='MIT License', 128 project_urls={ 129 "Bug Tracker": "https://github.com/cupy/cupy/issues", 130 "Documentation": "https://docs.cupy.dev/", 131 "Source Code": "https://github.com/cupy/cupy", 132 }, 133 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], 134 packages=find_packages(exclude=['install', 'tests']), 135 package_data=package_data, 136 zip_safe=False, 137 python_requires='>=3.6.0', 138 setup_requires=setup_requires, 139 install_requires=install_requires, 140 tests_require=tests_require, 141 extras_require=extras_require, 142 ext_modules=ext_modules, 143 cmdclass={'build_ext': build_ext}, 144 ) 145 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -102,7 +102,6 @@ License :: OSI Approved :: MIT License Programming Language :: Python Programming Language :: Python :: 3 -Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 @@ -134,7 +133,7 @@ packages=find_packages(exclude=['install', 'tests']), package_data=package_data, zip_safe=False, - python_requires='>=3.6.0', + python_requires='>=3.7', setup_requires=setup_requires, install_requires=install_requires, tests_require=tests_require,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,7 +102,6 @@\n License :: OSI Approved :: MIT License\n Programming Language :: Python\n Programming Language :: Python :: 3\n-Programming Language :: Python :: 3.6\n Programming Language :: Python :: 3.7\n Programming Language :: Python :: 3.8\n Programming Language :: Python :: 3.9\n@@ -134,7 +133,7 @@\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n- python_requires='>=3.6.0',\n+ python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n", "issue": "[RFC] Drop Python 3.6 support in v10\nWe are now discussing to drop support for Python 3.6 in CuPy v10. Feel free to leave a comment here if you have any concerns.\r\n\r\nBackground:\r\n* CUDA Python is unlikely to provide a wheel for Python 3.6, although it can be built from the source without any issue. CUDA Python currently requires [`-std=c++14`](https://github.com/NVIDIA/cuda-python/blob/427c597959e6fe1409195a30d42fc4a1886bc89a/setup.py#L38) so recent versions of gcc, which is not in RHEL/CentOS 7 by default, is needed. We want to avoid requiring CuPy wheel users to manually install non-default GCC.\r\n* NumPy dropped Python 3.6 support in June 2020: https://numpy.org/neps/nep-0029-deprecation_policy.html\r\n* Python 3.6 support become EOL in December 2021.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.17,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.17,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}]}
2,006
177
gh_patches_debug_56202
rasdani/github-patches
git_diff
svthalia__concrexit-3558
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Separate promotion permissions in eventadmin inline from the main promotion perms ### What? Currently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page. ### Why? <!-- A clear and concise motivation why we should consider implementing this. --> Least privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same. ### How? Override has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/events/admin/inlines.py` Content: ``` 1 from django.contrib import admin 2 3 from events import models 4 from pizzas.models import FoodEvent 5 from promotion.models import PromotionRequest 6 7 from .forms import RegistrationInformationFieldForm 8 9 10 class RegistrationInformationFieldInline(admin.TabularInline): 11 """The inline for registration information fields in the Event admin.""" 12 13 form = RegistrationInformationFieldForm 14 extra = 0 15 model = models.RegistrationInformationField 16 ordering = ("_order",) 17 18 radio_fields = {"type": admin.VERTICAL} 19 20 def get_formset(self, request, obj=None, **kwargs): 21 formset = super().get_formset(request, obj, **kwargs) 22 if obj is not None: 23 count = obj.registrationinformationfield_set.count() 24 formset.form.declared_fields["order"].initial = count 25 return formset 26 27 28 class PizzaEventInline(admin.StackedInline): 29 """The inline for pizza events in the Event admin.""" 30 31 model = FoodEvent 32 extra = 0 33 max_num = 1 34 35 36 class PromotionRequestInline(admin.StackedInline): 37 model = PromotionRequest 38 readonly_fields = ( 39 "assigned_to", 40 "status", 41 "drive_folder", 42 ) 43 extra = 0 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py --- a/website/events/admin/inlines.py +++ b/website/events/admin/inlines.py @@ -39,5 +39,19 @@ "assigned_to", "status", "drive_folder", + "status_updated", ) + + def has_add_permission(self, request, obj=None): + return True + + def has_view_permission(self, request, obj=None): + return True + + def has_change_permission(self, request, obj=None): + return True + + def has_delete_permission(self, request, obj=None): + return True + extra = 0
{"golden_diff": "diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py\n--- a/website/events/admin/inlines.py\n+++ b/website/events/admin/inlines.py\n@@ -39,5 +39,19 @@\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n+ \"status_updated\",\n )\n+\n+ def has_add_permission(self, request, obj=None):\n+ return True\n+\n+ def has_view_permission(self, request, obj=None):\n+ return True\n+\n+ def has_change_permission(self, request, obj=None):\n+ return True\n+\n+ def has_delete_permission(self, request, obj=None):\n+ return True\n+\n extra = 0\n", "issue": "Separate promotion permissions in eventadmin inline from the main promotion perms\n### What?\r\nCurrently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page.\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nLeast privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same.\r\n\r\n### How?\r\nOverride has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance.\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom events import models\nfrom pizzas.models import FoodEvent\nfrom promotion.models import PromotionRequest\n\nfrom .forms import RegistrationInformationFieldForm\n\n\nclass RegistrationInformationFieldInline(admin.TabularInline):\n \"\"\"The inline for registration information fields in the Event admin.\"\"\"\n\n form = RegistrationInformationFieldForm\n extra = 0\n model = models.RegistrationInformationField\n ordering = (\"_order\",)\n\n radio_fields = {\"type\": admin.VERTICAL}\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n if obj is not None:\n count = obj.registrationinformationfield_set.count()\n formset.form.declared_fields[\"order\"].initial = count\n return formset\n\n\nclass PizzaEventInline(admin.StackedInline):\n \"\"\"The inline for pizza events in the Event admin.\"\"\"\n\n model = FoodEvent\n extra = 0\n max_num = 1\n\n\nclass PromotionRequestInline(admin.StackedInline):\n model = PromotionRequest\n readonly_fields = (\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n )\n extra = 0\n", "path": "website/events/admin/inlines.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom events import models\nfrom pizzas.models import FoodEvent\nfrom promotion.models import PromotionRequest\n\nfrom .forms import RegistrationInformationFieldForm\n\n\nclass RegistrationInformationFieldInline(admin.TabularInline):\n \"\"\"The inline for registration information fields in the Event admin.\"\"\"\n\n form = RegistrationInformationFieldForm\n extra = 0\n model = models.RegistrationInformationField\n ordering = (\"_order\",)\n\n radio_fields = {\"type\": admin.VERTICAL}\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n if obj is not None:\n count = obj.registrationinformationfield_set.count()\n formset.form.declared_fields[\"order\"].initial = count\n return formset\n\n\nclass PizzaEventInline(admin.StackedInline):\n \"\"\"The inline for pizza events in the Event admin.\"\"\"\n\n model = FoodEvent\n extra = 0\n max_num = 1\n\n\nclass PromotionRequestInline(admin.StackedInline):\n model = PromotionRequest\n readonly_fields = (\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"status_updated\",\n )\n\n def has_add_permission(self, request, obj=None):\n return True\n\n def has_view_permission(self, request, obj=None):\n return True\n\n def has_change_permission(self, request, obj=None):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n extra = 0\n", "path": "website/events/admin/inlines.py"}]}
827
158
gh_patches_debug_32880
rasdani/github-patches
git_diff
learningequality__kolibri-7214
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Learner activity records partially not visible after upgrade to 0.14b3 # Observed Behaviour Upgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again. # Expected behavior All learner activity should be visible as prior to upgrade # User-facing consequences Confusion and fear of data loss. # Errors and logs None on screen # Steps to reproduce Upgrade from 0.13.3 to 0.14.0-b3 and check learner activity records. # Context Ubuntu 18.04.3 Package : 0.14.beta3 .deb --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/core/query.py` Content: ``` 1 from django.db import connection 2 from django.db.models import Aggregate 3 from django.db.models import CharField 4 from django.db.models import IntegerField 5 from django.db.models import Subquery 6 7 try: 8 from django.contrib.postgres.aggregates import ArrayAgg 9 10 class NotNullArrayAgg(ArrayAgg): 11 def convert_value(self, value, expression, connection, context): 12 if not value: 13 return [] 14 return filter(lambda x: x is not None, value) 15 16 17 except ImportError: 18 NotNullArrayAgg = None 19 20 21 class SQCount(Subquery): 22 # Include ALIAS at the end to support Postgres 23 template = "(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)" 24 output_field = IntegerField() 25 26 27 class SQSum(Subquery): 28 # Include ALIAS at the end to support Postgres 29 template = "(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)" 30 output_field = IntegerField() 31 32 33 class GroupConcatSubquery(Subquery): 34 template = "(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)" 35 output_field = CharField() 36 37 def as_postgresql(self, compiler, connection): 38 self.template = ( 39 "(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)" 40 ) 41 return super(GroupConcatSubquery, self).as_sql(compiler, connection) 42 43 44 class GroupConcat(Aggregate): 45 template = "GROUP_CONCAT(%(field)s)" 46 output_field = CharField() 47 48 def __init__(self, *args, **kwargs): 49 self.result_field = kwargs.pop("result_field", None) 50 super(GroupConcat, self).__init__(*args, **kwargs) 51 52 def convert_value(self, value, expression, connection, context): 53 if not value: 54 return [] 55 results = value.split(",") 56 if self.result_field is not None: 57 return map(self.result_field.to_python, results) 58 return results 59 60 61 def get_source_field(model, field_path): 62 # Get the source field from the model so that we can properly coerce values 63 # this is necessary when we are using GroupConcat to return non-string fields. 64 paths = field_path.split("__") 65 while len(paths) > 1: 66 model = model._meta.get_field(paths.pop(0)).related_model 67 return model._meta.get_field(paths[0]) 68 69 70 def annotate_array_aggregate(queryset, **kwargs): 71 if connection.vendor == "postgresql" and NotNullArrayAgg is not None: 72 return queryset.annotate( 73 **{target: NotNullArrayAgg(source) for target, source in kwargs.items()} 74 ) 75 model = queryset.model 76 # Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT 77 # is called by row and not across the entire queryset. 78 return queryset.values("pk").annotate( 79 **{ 80 target: GroupConcat(source, result_field=get_source_field(model, source)) 81 for target, source in kwargs.items() 82 } 83 ) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/query.py b/kolibri/core/query.py --- a/kolibri/core/query.py +++ b/kolibri/core/query.py @@ -8,10 +8,17 @@ from django.contrib.postgres.aggregates import ArrayAgg class NotNullArrayAgg(ArrayAgg): + def __init__(self, *args, **kwargs): + self.result_field = kwargs.pop("result_field", None) + super(NotNullArrayAgg, self).__init__(*args, **kwargs) + def convert_value(self, value, expression, connection, context): if not value: return [] - return filter(lambda x: x is not None, value) + results = list(filter(lambda x: x is not None, value)) + if self.result_field is not None: + return list(map(self.result_field.to_python, results)) + return results except ImportError: @@ -54,7 +61,7 @@ return [] results = value.split(",") if self.result_field is not None: - return map(self.result_field.to_python, results) + return list(map(self.result_field.to_python, results)) return results @@ -68,11 +75,16 @@ def annotate_array_aggregate(queryset, **kwargs): + model = queryset.model if connection.vendor == "postgresql" and NotNullArrayAgg is not None: return queryset.annotate( - **{target: NotNullArrayAgg(source) for target, source in kwargs.items()} + **{ + target: NotNullArrayAgg( + source, result_field=get_source_field(model, source) + ) + for target, source in kwargs.items() + } ) - model = queryset.model # Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT # is called by row and not across the entire queryset. return queryset.values("pk").annotate(
{"golden_diff": "diff --git a/kolibri/core/query.py b/kolibri/core/query.py\n--- a/kolibri/core/query.py\n+++ b/kolibri/core/query.py\n@@ -8,10 +8,17 @@\n from django.contrib.postgres.aggregates import ArrayAgg\n \n class NotNullArrayAgg(ArrayAgg):\n+ def __init__(self, *args, **kwargs):\n+ self.result_field = kwargs.pop(\"result_field\", None)\n+ super(NotNullArrayAgg, self).__init__(*args, **kwargs)\n+\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n- return filter(lambda x: x is not None, value)\n+ results = list(filter(lambda x: x is not None, value))\n+ if self.result_field is not None:\n+ return list(map(self.result_field.to_python, results))\n+ return results\n \n \n except ImportError:\n@@ -54,7 +61,7 @@\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n- return map(self.result_field.to_python, results)\n+ return list(map(self.result_field.to_python, results))\n return results\n \n \n@@ -68,11 +75,16 @@\n \n \n def annotate_array_aggregate(queryset, **kwargs):\n+ model = queryset.model\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n- **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n+ **{\n+ target: NotNullArrayAgg(\n+ source, result_field=get_source_field(model, source)\n+ )\n+ for target, source in kwargs.items()\n+ }\n )\n- model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n", "issue": "Learner activity records partially not visible after upgrade to 0.14b3\n# Observed Behaviour\n\nUpgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again.\n\n# Expected behavior\n\nAll learner activity should be visible as prior to upgrade\n\n\n# User-facing consequences\n\n Confusion and fear of data loss.\n\n# Errors and logs\n\nNone on screen\n\n# Steps to reproduce\n\nUpgrade from 0.13.3 to 0.14.0-b3 and check learner activity records.\n\n\n\n# Context\nUbuntu 18.04.3\nPackage : 0.14.beta3 .deb\n\n", "before_files": [{"content": "from django.db import connection\nfrom django.db.models import Aggregate\nfrom django.db.models import CharField\nfrom django.db.models import IntegerField\nfrom django.db.models import Subquery\n\ntry:\n from django.contrib.postgres.aggregates import ArrayAgg\n\n class NotNullArrayAgg(ArrayAgg):\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n return filter(lambda x: x is not None, value)\n\n\nexcept ImportError:\n NotNullArrayAgg = None\n\n\nclass SQCount(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass SQSum(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass GroupConcatSubquery(Subquery):\n template = \"(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = CharField()\n\n def as_postgresql(self, compiler, connection):\n self.template = (\n \"(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)\"\n )\n return super(GroupConcatSubquery, self).as_sql(compiler, connection)\n\n\nclass GroupConcat(Aggregate):\n template = \"GROUP_CONCAT(%(field)s)\"\n output_field = CharField()\n\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(GroupConcat, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n return map(self.result_field.to_python, results)\n return results\n\n\ndef get_source_field(model, field_path):\n # Get the source field from the model so that we can properly coerce values\n # this is necessary when we are using GroupConcat to return non-string fields.\n paths = field_path.split(\"__\")\n while len(paths) > 1:\n model = model._meta.get_field(paths.pop(0)).related_model\n return model._meta.get_field(paths[0])\n\n\ndef annotate_array_aggregate(queryset, **kwargs):\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n )\n model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n **{\n target: GroupConcat(source, result_field=get_source_field(model, source))\n for target, source in kwargs.items()\n }\n )\n", "path": "kolibri/core/query.py"}], "after_files": [{"content": "from django.db import connection\nfrom django.db.models import Aggregate\nfrom django.db.models import CharField\nfrom django.db.models import IntegerField\nfrom django.db.models import Subquery\n\ntry:\n from django.contrib.postgres.aggregates import ArrayAgg\n\n class NotNullArrayAgg(ArrayAgg):\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(NotNullArrayAgg, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = list(filter(lambda x: x is not None, value))\n if self.result_field is not None:\n return list(map(self.result_field.to_python, results))\n return results\n\n\nexcept ImportError:\n NotNullArrayAgg = None\n\n\nclass SQCount(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass SQSum(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass GroupConcatSubquery(Subquery):\n template = \"(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = CharField()\n\n def as_postgresql(self, compiler, connection):\n self.template = (\n \"(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)\"\n )\n return super(GroupConcatSubquery, self).as_sql(compiler, connection)\n\n\nclass GroupConcat(Aggregate):\n template = \"GROUP_CONCAT(%(field)s)\"\n output_field = CharField()\n\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(GroupConcat, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n return list(map(self.result_field.to_python, results))\n return results\n\n\ndef get_source_field(model, field_path):\n # Get the source field from the model so that we can properly coerce values\n # this is necessary when we are using GroupConcat to return non-string fields.\n paths = field_path.split(\"__\")\n while len(paths) > 1:\n model = model._meta.get_field(paths.pop(0)).related_model\n return model._meta.get_field(paths[0])\n\n\ndef annotate_array_aggregate(queryset, **kwargs):\n model = queryset.model\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n **{\n target: NotNullArrayAgg(\n source, result_field=get_source_field(model, source)\n )\n for target, source in kwargs.items()\n }\n )\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n **{\n target: GroupConcat(source, result_field=get_source_field(model, source))\n for target, source in kwargs.items()\n }\n )\n", "path": "kolibri/core/query.py"}]}
1,264
434
gh_patches_debug_28157
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1833
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gRPC server throws ValueError for Unix sockets **Describe your environment** ``` Ubuntu 20.04 x86_64 Python 3.10.9 ``` **Steps to reproduce** I'll post a PR that addresses the issue with a test that reproduces the problem shortly. This problem can be triggered by running the `grpc` instrumentation and utilizing Unix sockets for connections. ```py server.add_insecure_port("unix:///tmp/grpc.sock") channel = grpc.insecure_channel("unix:///tmp/grpc.sock") ``` **What is the expected behavior?** The `grpc` instrumentation should collect traces when running on a Unix socket. **What is the actual behavior?** A `ValueError` is thrown here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L254-L256 `context.peer()` is `unix:` which results in `ip, port = [""]`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # pylint:disable=relative-beyond-top-level 16 # pylint:disable=arguments-differ 17 # pylint:disable=no-member 18 # pylint:disable=signature-differs 19 20 """ 21 Implementation of the service-side open-telemetry interceptor. 22 """ 23 24 import logging 25 from contextlib import contextmanager 26 from urllib.parse import unquote 27 28 import grpc 29 30 from opentelemetry import trace 31 from opentelemetry.context import attach, detach 32 from opentelemetry.propagate import extract 33 from opentelemetry.semconv.trace import SpanAttributes 34 from opentelemetry.trace.status import Status, StatusCode 35 36 logger = logging.getLogger(__name__) 37 38 39 # wrap an RPC call 40 # see https://github.com/grpc/grpc/issues/18191 41 def _wrap_rpc_behavior(handler, continuation): 42 if handler is None: 43 return None 44 45 if handler.request_streaming and handler.response_streaming: 46 behavior_fn = handler.stream_stream 47 handler_factory = grpc.stream_stream_rpc_method_handler 48 elif handler.request_streaming and not handler.response_streaming: 49 behavior_fn = handler.stream_unary 50 handler_factory = grpc.stream_unary_rpc_method_handler 51 elif not handler.request_streaming and handler.response_streaming: 52 behavior_fn = handler.unary_stream 53 handler_factory = grpc.unary_stream_rpc_method_handler 54 else: 55 behavior_fn = handler.unary_unary 56 handler_factory = grpc.unary_unary_rpc_method_handler 57 58 return handler_factory( 59 continuation( 60 behavior_fn, handler.request_streaming, handler.response_streaming 61 ), 62 request_deserializer=handler.request_deserializer, 63 response_serializer=handler.response_serializer, 64 ) 65 66 67 # pylint:disable=abstract-method 68 class _OpenTelemetryServicerContext(grpc.ServicerContext): 69 def __init__(self, servicer_context, active_span): 70 self._servicer_context = servicer_context 71 self._active_span = active_span 72 self._code = grpc.StatusCode.OK 73 self._details = None 74 super().__init__() 75 76 def __getattr__(self, attr): 77 return getattr(self._servicer_context, attr) 78 79 def is_active(self, *args, **kwargs): 80 return self._servicer_context.is_active(*args, **kwargs) 81 82 def time_remaining(self, *args, **kwargs): 83 return self._servicer_context.time_remaining(*args, **kwargs) 84 85 def cancel(self, *args, **kwargs): 86 return self._servicer_context.cancel(*args, **kwargs) 87 88 def add_callback(self, *args, **kwargs): 89 return self._servicer_context.add_callback(*args, **kwargs) 90 91 def disable_next_message_compression(self): 92 return self._service_context.disable_next_message_compression() 93 94 def invocation_metadata(self, *args, **kwargs): 95 return self._servicer_context.invocation_metadata(*args, **kwargs) 96 97 def peer(self): 98 return self._servicer_context.peer() 99 100 def peer_identities(self): 101 return self._servicer_context.peer_identities() 102 103 def peer_identity_key(self): 104 return self._servicer_context.peer_identity_key() 105 106 def auth_context(self): 107 return self._servicer_context.auth_context() 108 109 def set_compression(self, compression): 110 return self._servicer_context.set_compression(compression) 111 112 def send_initial_metadata(self, *args, **kwargs): 113 return self._servicer_context.send_initial_metadata(*args, **kwargs) 114 115 def set_trailing_metadata(self, *args, **kwargs): 116 return self._servicer_context.set_trailing_metadata(*args, **kwargs) 117 118 def trailing_metadata(self): 119 return self._servicer_context.trailing_metadata() 120 121 def abort(self, code, details): 122 self._code = code 123 self._details = details 124 self._active_span.set_attribute( 125 SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] 126 ) 127 self._active_span.set_status( 128 Status( 129 status_code=StatusCode.ERROR, 130 description=f"{code}:{details}", 131 ) 132 ) 133 return self._servicer_context.abort(code, details) 134 135 def abort_with_status(self, status): 136 return self._servicer_context.abort_with_status(status) 137 138 def code(self): 139 if not hasattr(self._servicer_context, "code"): 140 raise RuntimeError( 141 "code() is not supported with the installed version of grpcio" 142 ) 143 return self._servicer_context.code() 144 145 def details(self): 146 if not hasattr(self._servicer_context, "details"): 147 raise RuntimeError( 148 "details() is not supported with the installed version of " 149 "grpcio" 150 ) 151 return self._servicer_context.details() 152 153 def set_code(self, code): 154 self._code = code 155 # use details if we already have it, otherwise the status description 156 details = self._details or code.value[1] 157 self._active_span.set_attribute( 158 SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] 159 ) 160 if code != grpc.StatusCode.OK: 161 self._active_span.set_status( 162 Status( 163 status_code=StatusCode.ERROR, 164 description=f"{code}:{details}", 165 ) 166 ) 167 return self._servicer_context.set_code(code) 168 169 def set_details(self, details): 170 self._details = details 171 if self._code != grpc.StatusCode.OK: 172 self._active_span.set_status( 173 Status( 174 status_code=StatusCode.ERROR, 175 description=f"{self._code}:{details}", 176 ) 177 ) 178 return self._servicer_context.set_details(details) 179 180 181 # pylint:disable=abstract-method 182 # pylint:disable=no-self-use 183 # pylint:disable=unused-argument 184 class OpenTelemetryServerInterceptor(grpc.ServerInterceptor): 185 """ 186 A gRPC server interceptor, to add OpenTelemetry. 187 188 Usage:: 189 190 tracer = some OpenTelemetry tracer 191 filter = filters.negate(filters.method_name("service.Foo")) 192 193 interceptors = [ 194 OpenTelemetryServerInterceptor(tracer, filter), 195 ] 196 197 server = grpc.server( 198 futures.ThreadPoolExecutor(max_workers=concurrency), 199 interceptors = interceptors) 200 201 """ 202 203 def __init__(self, tracer, filter_=None): 204 self._tracer = tracer 205 self._filter = filter_ 206 207 @contextmanager 208 def _set_remote_context(self, servicer_context): 209 metadata = servicer_context.invocation_metadata() 210 if metadata: 211 md_dict = {md.key: md.value for md in metadata} 212 ctx = extract(md_dict) 213 token = attach(ctx) 214 try: 215 yield 216 finally: 217 detach(token) 218 else: 219 yield 220 221 def _start_span( 222 self, handler_call_details, context, set_status_on_exception=False 223 ): 224 # standard attributes 225 attributes = { 226 SpanAttributes.RPC_SYSTEM: "grpc", 227 SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0], 228 } 229 230 # if we have details about the call, split into service and method 231 if handler_call_details.method: 232 service, method = handler_call_details.method.lstrip("/").split( 233 "/", 1 234 ) 235 attributes.update( 236 { 237 SpanAttributes.RPC_METHOD: method, 238 SpanAttributes.RPC_SERVICE: service, 239 } 240 ) 241 242 # add some attributes from the metadata 243 metadata = dict(context.invocation_metadata()) 244 if "user-agent" in metadata: 245 attributes["rpc.user_agent"] = metadata["user-agent"] 246 247 # Split up the peer to keep with how other telemetry sources 248 # do it. This looks like: 249 # * ipv6:[::1]:57284 250 # * ipv4:127.0.0.1:57284 251 # * ipv4:10.2.1.1:57284,127.0.0.1:57284 252 # 253 try: 254 ip, port = ( 255 context.peer().split(",")[0].split(":", 1)[1].rsplit(":", 1) 256 ) 257 ip = unquote(ip) 258 attributes.update( 259 { 260 SpanAttributes.NET_PEER_IP: ip, 261 SpanAttributes.NET_PEER_PORT: port, 262 } 263 ) 264 265 # other telemetry sources add this, so we will too 266 if ip in ("[::1]", "127.0.0.1"): 267 attributes[SpanAttributes.NET_PEER_NAME] = "localhost" 268 269 except IndexError: 270 logger.warning("Failed to parse peer address '%s'", context.peer()) 271 272 return self._tracer.start_as_current_span( 273 name=handler_call_details.method, 274 kind=trace.SpanKind.SERVER, 275 attributes=attributes, 276 set_status_on_exception=set_status_on_exception, 277 ) 278 279 def intercept_service(self, continuation, handler_call_details): 280 if self._filter is not None and not self._filter(handler_call_details): 281 return continuation(handler_call_details) 282 283 def telemetry_wrapper(behavior, request_streaming, response_streaming): 284 def telemetry_interceptor(request_or_iterator, context): 285 # handle streaming responses specially 286 if response_streaming: 287 return self._intercept_server_stream( 288 behavior, 289 handler_call_details, 290 request_or_iterator, 291 context, 292 ) 293 294 with self._set_remote_context(context): 295 with self._start_span( 296 handler_call_details, 297 context, 298 set_status_on_exception=False, 299 ) as span: 300 # wrap the context 301 context = _OpenTelemetryServicerContext(context, span) 302 303 # And now we run the actual RPC. 304 try: 305 return behavior(request_or_iterator, context) 306 307 except Exception as error: 308 # Bare exceptions are likely to be gRPC aborts, which 309 # we handle in our context wrapper. 310 # Here, we're interested in uncaught exceptions. 311 # pylint:disable=unidiomatic-typecheck 312 if type(error) != Exception: 313 span.record_exception(error) 314 raise error 315 316 return telemetry_interceptor 317 318 return _wrap_rpc_behavior( 319 continuation(handler_call_details), telemetry_wrapper 320 ) 321 322 # Handle streaming responses separately - we have to do this 323 # to return a *new* generator or various upstream things 324 # get confused, or we'll lose the consistent trace 325 def _intercept_server_stream( 326 self, behavior, handler_call_details, request_or_iterator, context 327 ): 328 with self._set_remote_context(context): 329 with self._start_span( 330 handler_call_details, context, set_status_on_exception=False 331 ) as span: 332 context = _OpenTelemetryServicerContext(context, span) 333 334 try: 335 yield from behavior(request_or_iterator, context) 336 337 except Exception as error: 338 # pylint:disable=unidiomatic-typecheck 339 if type(error) != Exception: 340 span.record_exception(error) 341 raise error 342 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py @@ -250,24 +250,30 @@ # * ipv4:127.0.0.1:57284 # * ipv4:10.2.1.1:57284,127.0.0.1:57284 # - try: - ip, port = ( - context.peer().split(",")[0].split(":", 1)[1].rsplit(":", 1) - ) - ip = unquote(ip) - attributes.update( - { - SpanAttributes.NET_PEER_IP: ip, - SpanAttributes.NET_PEER_PORT: port, - } - ) + if context.peer() != "unix:": + try: + ip, port = ( + context.peer() + .split(",")[0] + .split(":", 1)[1] + .rsplit(":", 1) + ) + ip = unquote(ip) + attributes.update( + { + SpanAttributes.NET_PEER_IP: ip, + SpanAttributes.NET_PEER_PORT: port, + } + ) - # other telemetry sources add this, so we will too - if ip in ("[::1]", "127.0.0.1"): - attributes[SpanAttributes.NET_PEER_NAME] = "localhost" + # other telemetry sources add this, so we will too + if ip in ("[::1]", "127.0.0.1"): + attributes[SpanAttributes.NET_PEER_NAME] = "localhost" - except IndexError: - logger.warning("Failed to parse peer address '%s'", context.peer()) + except IndexError: + logger.warning( + "Failed to parse peer address '%s'", context.peer() + ) return self._tracer.start_as_current_span( name=handler_call_details.method,
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py\n@@ -250,24 +250,30 @@\n # * ipv4:127.0.0.1:57284\n # * ipv4:10.2.1.1:57284,127.0.0.1:57284\n #\n- try:\n- ip, port = (\n- context.peer().split(\",\")[0].split(\":\", 1)[1].rsplit(\":\", 1)\n- )\n- ip = unquote(ip)\n- attributes.update(\n- {\n- SpanAttributes.NET_PEER_IP: ip,\n- SpanAttributes.NET_PEER_PORT: port,\n- }\n- )\n+ if context.peer() != \"unix:\":\n+ try:\n+ ip, port = (\n+ context.peer()\n+ .split(\",\")[0]\n+ .split(\":\", 1)[1]\n+ .rsplit(\":\", 1)\n+ )\n+ ip = unquote(ip)\n+ attributes.update(\n+ {\n+ SpanAttributes.NET_PEER_IP: ip,\n+ SpanAttributes.NET_PEER_PORT: port,\n+ }\n+ )\n \n- # other telemetry sources add this, so we will too\n- if ip in (\"[::1]\", \"127.0.0.1\"):\n- attributes[SpanAttributes.NET_PEER_NAME] = \"localhost\"\n+ # other telemetry sources add this, so we will too\n+ if ip in (\"[::1]\", \"127.0.0.1\"):\n+ attributes[SpanAttributes.NET_PEER_NAME] = \"localhost\"\n \n- except IndexError:\n- logger.warning(\"Failed to parse peer address '%s'\", context.peer())\n+ except IndexError:\n+ logger.warning(\n+ \"Failed to parse peer address '%s'\", context.peer()\n+ )\n \n return self._tracer.start_as_current_span(\n name=handler_call_details.method,\n", "issue": "gRPC server throws ValueError for Unix sockets\n**Describe your environment**\r\n\r\n```\r\nUbuntu 20.04\r\nx86_64\r\nPython 3.10.9\r\n```\r\n\r\n**Steps to reproduce**\r\n\r\nI'll post a PR that addresses the issue with a test that reproduces the problem shortly.\r\nThis problem can be triggered by running the `grpc` instrumentation and utilizing Unix sockets for connections.\r\n\r\n```py\r\nserver.add_insecure_port(\"unix:///tmp/grpc.sock\")\r\nchannel = grpc.insecure_channel(\"unix:///tmp/grpc.sock\")\r\n```\r\n\r\n**What is the expected behavior?**\r\n\r\nThe `grpc` instrumentation should collect traces when running on a Unix socket.\r\n\r\n**What is the actual behavior?**\r\n\r\nA `ValueError` is thrown here:\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L254-L256\r\n\r\n`context.peer()` is `unix:` which results in `ip, port = [\"\"]`.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint:disable=relative-beyond-top-level\n# pylint:disable=arguments-differ\n# pylint:disable=no-member\n# pylint:disable=signature-differs\n\n\"\"\"\nImplementation of the service-side open-telemetry interceptor.\n\"\"\"\n\nimport logging\nfrom contextlib import contextmanager\nfrom urllib.parse import unquote\n\nimport grpc\n\nfrom opentelemetry import trace\nfrom opentelemetry.context import attach, detach\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\n\n# wrap an RPC call\n# see https://github.com/grpc/grpc/issues/18191\ndef _wrap_rpc_behavior(handler, continuation):\n if handler is None:\n return None\n\n if handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.stream_stream\n handler_factory = grpc.stream_stream_rpc_method_handler\n elif handler.request_streaming and not handler.response_streaming:\n behavior_fn = handler.stream_unary\n handler_factory = grpc.stream_unary_rpc_method_handler\n elif not handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.unary_stream\n handler_factory = grpc.unary_stream_rpc_method_handler\n else:\n behavior_fn = handler.unary_unary\n handler_factory = grpc.unary_unary_rpc_method_handler\n\n return handler_factory(\n continuation(\n behavior_fn, handler.request_streaming, handler.response_streaming\n ),\n request_deserializer=handler.request_deserializer,\n response_serializer=handler.response_serializer,\n )\n\n\n# pylint:disable=abstract-method\nclass _OpenTelemetryServicerContext(grpc.ServicerContext):\n def __init__(self, servicer_context, active_span):\n self._servicer_context = servicer_context\n self._active_span = active_span\n self._code = grpc.StatusCode.OK\n self._details = None\n super().__init__()\n\n def __getattr__(self, attr):\n return getattr(self._servicer_context, attr)\n\n def is_active(self, *args, **kwargs):\n return self._servicer_context.is_active(*args, **kwargs)\n\n def time_remaining(self, *args, **kwargs):\n return self._servicer_context.time_remaining(*args, **kwargs)\n\n def cancel(self, *args, **kwargs):\n return self._servicer_context.cancel(*args, **kwargs)\n\n def add_callback(self, *args, **kwargs):\n return self._servicer_context.add_callback(*args, **kwargs)\n\n def disable_next_message_compression(self):\n return self._service_context.disable_next_message_compression()\n\n def invocation_metadata(self, *args, **kwargs):\n return self._servicer_context.invocation_metadata(*args, **kwargs)\n\n def peer(self):\n return self._servicer_context.peer()\n\n def peer_identities(self):\n return self._servicer_context.peer_identities()\n\n def peer_identity_key(self):\n return self._servicer_context.peer_identity_key()\n\n def auth_context(self):\n return self._servicer_context.auth_context()\n\n def set_compression(self, compression):\n return self._servicer_context.set_compression(compression)\n\n def send_initial_metadata(self, *args, **kwargs):\n return self._servicer_context.send_initial_metadata(*args, **kwargs)\n\n def set_trailing_metadata(self, *args, **kwargs):\n return self._servicer_context.set_trailing_metadata(*args, **kwargs)\n\n def trailing_metadata(self):\n return self._servicer_context.trailing_metadata()\n\n def abort(self, code, details):\n self._code = code\n self._details = details\n self._active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return self._servicer_context.abort(code, details)\n\n def abort_with_status(self, status):\n return self._servicer_context.abort_with_status(status)\n\n def code(self):\n if not hasattr(self._servicer_context, \"code\"):\n raise RuntimeError(\n \"code() is not supported with the installed version of grpcio\"\n )\n return self._servicer_context.code()\n\n def details(self):\n if not hasattr(self._servicer_context, \"details\"):\n raise RuntimeError(\n \"details() is not supported with the installed version of \"\n \"grpcio\"\n )\n return self._servicer_context.details()\n\n def set_code(self, code):\n self._code = code\n # use details if we already have it, otherwise the status description\n details = self._details or code.value[1]\n self._active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n if code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return self._servicer_context.set_code(code)\n\n def set_details(self, details):\n self._details = details\n if self._code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{self._code}:{details}\",\n )\n )\n return self._servicer_context.set_details(details)\n\n\n# pylint:disable=abstract-method\n# pylint:disable=no-self-use\n# pylint:disable=unused-argument\nclass OpenTelemetryServerInterceptor(grpc.ServerInterceptor):\n \"\"\"\n A gRPC server interceptor, to add OpenTelemetry.\n\n Usage::\n\n tracer = some OpenTelemetry tracer\n filter = filters.negate(filters.method_name(\"service.Foo\"))\n\n interceptors = [\n OpenTelemetryServerInterceptor(tracer, filter),\n ]\n\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=concurrency),\n interceptors = interceptors)\n\n \"\"\"\n\n def __init__(self, tracer, filter_=None):\n self._tracer = tracer\n self._filter = filter_\n\n @contextmanager\n def _set_remote_context(self, servicer_context):\n metadata = servicer_context.invocation_metadata()\n if metadata:\n md_dict = {md.key: md.value for md in metadata}\n ctx = extract(md_dict)\n token = attach(ctx)\n try:\n yield\n finally:\n detach(token)\n else:\n yield\n\n def _start_span(\n self, handler_call_details, context, set_status_on_exception=False\n ):\n # standard attributes\n attributes = {\n SpanAttributes.RPC_SYSTEM: \"grpc\",\n SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0],\n }\n\n # if we have details about the call, split into service and method\n if handler_call_details.method:\n service, method = handler_call_details.method.lstrip(\"/\").split(\n \"/\", 1\n )\n attributes.update(\n {\n SpanAttributes.RPC_METHOD: method,\n SpanAttributes.RPC_SERVICE: service,\n }\n )\n\n # add some attributes from the metadata\n metadata = dict(context.invocation_metadata())\n if \"user-agent\" in metadata:\n attributes[\"rpc.user_agent\"] = metadata[\"user-agent\"]\n\n # Split up the peer to keep with how other telemetry sources\n # do it. This looks like:\n # * ipv6:[::1]:57284\n # * ipv4:127.0.0.1:57284\n # * ipv4:10.2.1.1:57284,127.0.0.1:57284\n #\n try:\n ip, port = (\n context.peer().split(\",\")[0].split(\":\", 1)[1].rsplit(\":\", 1)\n )\n ip = unquote(ip)\n attributes.update(\n {\n SpanAttributes.NET_PEER_IP: ip,\n SpanAttributes.NET_PEER_PORT: port,\n }\n )\n\n # other telemetry sources add this, so we will too\n if ip in (\"[::1]\", \"127.0.0.1\"):\n attributes[SpanAttributes.NET_PEER_NAME] = \"localhost\"\n\n except IndexError:\n logger.warning(\"Failed to parse peer address '%s'\", context.peer())\n\n return self._tracer.start_as_current_span(\n name=handler_call_details.method,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n set_status_on_exception=set_status_on_exception,\n )\n\n def intercept_service(self, continuation, handler_call_details):\n if self._filter is not None and not self._filter(handler_call_details):\n return continuation(handler_call_details)\n\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n def telemetry_interceptor(request_or_iterator, context):\n # handle streaming responses specially\n if response_streaming:\n return self._intercept_server_stream(\n behavior,\n handler_call_details,\n request_or_iterator,\n context,\n )\n\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n # wrap the context\n context = _OpenTelemetryServicerContext(context, span)\n\n # And now we run the actual RPC.\n try:\n return behavior(request_or_iterator, context)\n\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n # Here, we're interested in uncaught exceptions.\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception:\n span.record_exception(error)\n raise error\n\n return telemetry_interceptor\n\n return _wrap_rpc_behavior(\n continuation(handler_call_details), telemetry_wrapper\n )\n\n # Handle streaming responses separately - we have to do this\n # to return a *new* generator or various upstream things\n # get confused, or we'll lose the consistent trace\n def _intercept_server_stream(\n self, behavior, handler_call_details, request_or_iterator, context\n ):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details, context, set_status_on_exception=False\n ) as span:\n context = _OpenTelemetryServicerContext(context, span)\n\n try:\n yield from behavior(request_or_iterator, context)\n\n except Exception as error:\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception:\n span.record_exception(error)\n raise error\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint:disable=relative-beyond-top-level\n# pylint:disable=arguments-differ\n# pylint:disable=no-member\n# pylint:disable=signature-differs\n\n\"\"\"\nImplementation of the service-side open-telemetry interceptor.\n\"\"\"\n\nimport logging\nfrom contextlib import contextmanager\nfrom urllib.parse import unquote\n\nimport grpc\n\nfrom opentelemetry import trace\nfrom opentelemetry.context import attach, detach\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\n\n# wrap an RPC call\n# see https://github.com/grpc/grpc/issues/18191\ndef _wrap_rpc_behavior(handler, continuation):\n if handler is None:\n return None\n\n if handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.stream_stream\n handler_factory = grpc.stream_stream_rpc_method_handler\n elif handler.request_streaming and not handler.response_streaming:\n behavior_fn = handler.stream_unary\n handler_factory = grpc.stream_unary_rpc_method_handler\n elif not handler.request_streaming and handler.response_streaming:\n behavior_fn = handler.unary_stream\n handler_factory = grpc.unary_stream_rpc_method_handler\n else:\n behavior_fn = handler.unary_unary\n handler_factory = grpc.unary_unary_rpc_method_handler\n\n return handler_factory(\n continuation(\n behavior_fn, handler.request_streaming, handler.response_streaming\n ),\n request_deserializer=handler.request_deserializer,\n response_serializer=handler.response_serializer,\n )\n\n\n# pylint:disable=abstract-method\nclass _OpenTelemetryServicerContext(grpc.ServicerContext):\n def __init__(self, servicer_context, active_span):\n self._servicer_context = servicer_context\n self._active_span = active_span\n self._code = grpc.StatusCode.OK\n self._details = None\n super().__init__()\n\n def __getattr__(self, attr):\n return getattr(self._servicer_context, attr)\n\n def is_active(self, *args, **kwargs):\n return self._servicer_context.is_active(*args, **kwargs)\n\n def time_remaining(self, *args, **kwargs):\n return self._servicer_context.time_remaining(*args, **kwargs)\n\n def cancel(self, *args, **kwargs):\n return self._servicer_context.cancel(*args, **kwargs)\n\n def add_callback(self, *args, **kwargs):\n return self._servicer_context.add_callback(*args, **kwargs)\n\n def disable_next_message_compression(self):\n return self._service_context.disable_next_message_compression()\n\n def invocation_metadata(self, *args, **kwargs):\n return self._servicer_context.invocation_metadata(*args, **kwargs)\n\n def peer(self):\n return self._servicer_context.peer()\n\n def peer_identities(self):\n return self._servicer_context.peer_identities()\n\n def peer_identity_key(self):\n return self._servicer_context.peer_identity_key()\n\n def auth_context(self):\n return self._servicer_context.auth_context()\n\n def set_compression(self, compression):\n return self._servicer_context.set_compression(compression)\n\n def send_initial_metadata(self, *args, **kwargs):\n return self._servicer_context.send_initial_metadata(*args, **kwargs)\n\n def set_trailing_metadata(self, *args, **kwargs):\n return self._servicer_context.set_trailing_metadata(*args, **kwargs)\n\n def trailing_metadata(self):\n return self._servicer_context.trailing_metadata()\n\n def abort(self, code, details):\n self._code = code\n self._details = details\n self._active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return self._servicer_context.abort(code, details)\n\n def abort_with_status(self, status):\n return self._servicer_context.abort_with_status(status)\n\n def code(self):\n if not hasattr(self._servicer_context, \"code\"):\n raise RuntimeError(\n \"code() is not supported with the installed version of grpcio\"\n )\n return self._servicer_context.code()\n\n def details(self):\n if not hasattr(self._servicer_context, \"details\"):\n raise RuntimeError(\n \"details() is not supported with the installed version of \"\n \"grpcio\"\n )\n return self._servicer_context.details()\n\n def set_code(self, code):\n self._code = code\n # use details if we already have it, otherwise the status description\n details = self._details or code.value[1]\n self._active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n if code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return self._servicer_context.set_code(code)\n\n def set_details(self, details):\n self._details = details\n if self._code != grpc.StatusCode.OK:\n self._active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{self._code}:{details}\",\n )\n )\n return self._servicer_context.set_details(details)\n\n\n# pylint:disable=abstract-method\n# pylint:disable=no-self-use\n# pylint:disable=unused-argument\nclass OpenTelemetryServerInterceptor(grpc.ServerInterceptor):\n \"\"\"\n A gRPC server interceptor, to add OpenTelemetry.\n\n Usage::\n\n tracer = some OpenTelemetry tracer\n filter = filters.negate(filters.method_name(\"service.Foo\"))\n\n interceptors = [\n OpenTelemetryServerInterceptor(tracer, filter),\n ]\n\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=concurrency),\n interceptors = interceptors)\n\n \"\"\"\n\n def __init__(self, tracer, filter_=None):\n self._tracer = tracer\n self._filter = filter_\n\n @contextmanager\n def _set_remote_context(self, servicer_context):\n metadata = servicer_context.invocation_metadata()\n if metadata:\n md_dict = {md.key: md.value for md in metadata}\n ctx = extract(md_dict)\n token = attach(ctx)\n try:\n yield\n finally:\n detach(token)\n else:\n yield\n\n def _start_span(\n self, handler_call_details, context, set_status_on_exception=False\n ):\n # standard attributes\n attributes = {\n SpanAttributes.RPC_SYSTEM: \"grpc\",\n SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0],\n }\n\n # if we have details about the call, split into service and method\n if handler_call_details.method:\n service, method = handler_call_details.method.lstrip(\"/\").split(\n \"/\", 1\n )\n attributes.update(\n {\n SpanAttributes.RPC_METHOD: method,\n SpanAttributes.RPC_SERVICE: service,\n }\n )\n\n # add some attributes from the metadata\n metadata = dict(context.invocation_metadata())\n if \"user-agent\" in metadata:\n attributes[\"rpc.user_agent\"] = metadata[\"user-agent\"]\n\n # Split up the peer to keep with how other telemetry sources\n # do it. This looks like:\n # * ipv6:[::1]:57284\n # * ipv4:127.0.0.1:57284\n # * ipv4:10.2.1.1:57284,127.0.0.1:57284\n #\n if context.peer() != \"unix:\":\n try:\n ip, port = (\n context.peer()\n .split(\",\")[0]\n .split(\":\", 1)[1]\n .rsplit(\":\", 1)\n )\n ip = unquote(ip)\n attributes.update(\n {\n SpanAttributes.NET_PEER_IP: ip,\n SpanAttributes.NET_PEER_PORT: port,\n }\n )\n\n # other telemetry sources add this, so we will too\n if ip in (\"[::1]\", \"127.0.0.1\"):\n attributes[SpanAttributes.NET_PEER_NAME] = \"localhost\"\n\n except IndexError:\n logger.warning(\n \"Failed to parse peer address '%s'\", context.peer()\n )\n\n return self._tracer.start_as_current_span(\n name=handler_call_details.method,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n set_status_on_exception=set_status_on_exception,\n )\n\n def intercept_service(self, continuation, handler_call_details):\n if self._filter is not None and not self._filter(handler_call_details):\n return continuation(handler_call_details)\n\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n def telemetry_interceptor(request_or_iterator, context):\n # handle streaming responses specially\n if response_streaming:\n return self._intercept_server_stream(\n behavior,\n handler_call_details,\n request_or_iterator,\n context,\n )\n\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n # wrap the context\n context = _OpenTelemetryServicerContext(context, span)\n\n # And now we run the actual RPC.\n try:\n return behavior(request_or_iterator, context)\n\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n # Here, we're interested in uncaught exceptions.\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception:\n span.record_exception(error)\n raise error\n\n return telemetry_interceptor\n\n return _wrap_rpc_behavior(\n continuation(handler_call_details), telemetry_wrapper\n )\n\n # Handle streaming responses separately - we have to do this\n # to return a *new* generator or various upstream things\n # get confused, or we'll lose the consistent trace\n def _intercept_server_stream(\n self, behavior, handler_call_details, request_or_iterator, context\n ):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details, context, set_status_on_exception=False\n ) as span:\n context = _OpenTelemetryServicerContext(context, span)\n\n try:\n yield from behavior(request_or_iterator, context)\n\n except Exception as error:\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception:\n span.record_exception(error)\n raise error\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py"}]}
3,857
526
gh_patches_debug_9104
rasdani/github-patches
git_diff
mesonbuild__meson-3943
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default value of UserFeatureOption is `enabled` instead of `auto` According to `meson --help`: ``` --auto-features {enabled,disabled,auto} Override value of all 'auto' features (default: auto). ``` However, in reality the default value is `enabled`. We should fix it to be `auto`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mesonbuild/optinterpreter.py` Content: ``` 1 # Copyright 2013-2014 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os, re 16 import functools 17 18 from . import mparser 19 from . import coredata 20 from . import mesonlib 21 from . import compilers 22 23 forbidden_option_names = coredata.get_builtin_options() 24 forbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_'] 25 26 def is_invalid_name(name): 27 if name in forbidden_option_names: 28 return True 29 pref = name.split('_')[0] + '_' 30 if pref in forbidden_prefixes: 31 return True 32 return False 33 34 class OptionException(mesonlib.MesonException): 35 pass 36 37 38 def permitted_kwargs(permitted): 39 """Function that validates kwargs for options.""" 40 def _wraps(func): 41 @functools.wraps(func) 42 def _inner(name, description, kwargs): 43 bad = [a for a in kwargs.keys() if a not in permitted] 44 if bad: 45 raise OptionException('Invalid kwargs for option "{}": "{}"'.format( 46 name, ' '.join(bad))) 47 return func(name, description, kwargs) 48 return _inner 49 return _wraps 50 51 52 optname_regex = re.compile('[^a-zA-Z0-9_-]') 53 54 @permitted_kwargs({'value', 'yield'}) 55 def StringParser(name, description, kwargs): 56 return coredata.UserStringOption(name, 57 description, 58 kwargs.get('value', ''), 59 kwargs.get('choices', []), 60 kwargs.get('yield', coredata.default_yielding)) 61 62 @permitted_kwargs({'value', 'yield'}) 63 def BooleanParser(name, description, kwargs): 64 return coredata.UserBooleanOption(name, description, 65 kwargs.get('value', True), 66 kwargs.get('yield', coredata.default_yielding)) 67 68 @permitted_kwargs({'value', 'yield', 'choices'}) 69 def ComboParser(name, description, kwargs): 70 if 'choices' not in kwargs: 71 raise OptionException('Combo option missing "choices" keyword.') 72 choices = kwargs['choices'] 73 if not isinstance(choices, list): 74 raise OptionException('Combo choices must be an array.') 75 for i in choices: 76 if not isinstance(i, str): 77 raise OptionException('Combo choice elements must be strings.') 78 return coredata.UserComboOption(name, 79 description, 80 choices, 81 kwargs.get('value', choices[0]), 82 kwargs.get('yield', coredata.default_yielding),) 83 84 85 @permitted_kwargs({'value', 'min', 'max', 'yield'}) 86 def IntegerParser(name, description, kwargs): 87 if 'value' not in kwargs: 88 raise OptionException('Integer option must contain value argument.') 89 return coredata.UserIntegerOption(name, 90 description, 91 kwargs.get('min', None), 92 kwargs.get('max', None), 93 kwargs['value'], 94 kwargs.get('yield', coredata.default_yielding)) 95 96 # FIXME: Cannot use FeatureNew while parsing options because we parse it before 97 # reading options in project(). See func_project() in interpreter.py 98 #@FeatureNew('array type option()', '0.44.0') 99 @permitted_kwargs({'value', 'yield', 'choices'}) 100 def string_array_parser(name, description, kwargs): 101 if 'choices' in kwargs: 102 choices = kwargs['choices'] 103 if not isinstance(choices, list): 104 raise OptionException('Array choices must be an array.') 105 for i in choices: 106 if not isinstance(i, str): 107 raise OptionException('Array choice elements must be strings.') 108 value = kwargs.get('value', choices) 109 else: 110 choices = None 111 value = kwargs.get('value', []) 112 if not isinstance(value, list): 113 raise OptionException('Array choices must be passed as an array.') 114 return coredata.UserArrayOption(name, 115 description, 116 value, 117 choices=choices, 118 yielding=kwargs.get('yield', coredata.default_yielding)) 119 120 @permitted_kwargs({'value', 'yield'}) 121 def FeatureParser(name, description, kwargs): 122 return coredata.UserFeatureOption(name, 123 description, 124 kwargs.get('value', 'enabled'), 125 yielding=kwargs.get('yield', coredata.default_yielding)) 126 127 option_types = {'string': StringParser, 128 'boolean': BooleanParser, 129 'combo': ComboParser, 130 'integer': IntegerParser, 131 'array': string_array_parser, 132 'feature': FeatureParser, 133 } 134 135 class OptionInterpreter: 136 def __init__(self, subproject): 137 self.options = {} 138 self.subproject = subproject 139 140 def process(self, option_file): 141 try: 142 with open(option_file, 'r', encoding='utf8') as f: 143 ast = mparser.Parser(f.read(), '').parse() 144 except mesonlib.MesonException as me: 145 me.file = option_file 146 raise me 147 if not isinstance(ast, mparser.CodeBlockNode): 148 e = OptionException('Option file is malformed.') 149 e.lineno = ast.lineno() 150 raise e 151 for cur in ast.lines: 152 try: 153 self.evaluate_statement(cur) 154 except Exception as e: 155 e.lineno = cur.lineno 156 e.colno = cur.colno 157 e.file = os.path.join('meson_options.txt') 158 raise e 159 160 def reduce_single(self, arg): 161 if isinstance(arg, str): 162 return arg 163 elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode, 164 mparser.NumberNode)): 165 return arg.value 166 elif isinstance(arg, mparser.ArrayNode): 167 return [self.reduce_single(curarg) for curarg in arg.args.arguments] 168 else: 169 raise OptionException('Arguments may only be string, int, bool, or array of those.') 170 171 def reduce_arguments(self, args): 172 assert(isinstance(args, mparser.ArgumentNode)) 173 if args.incorrect_order(): 174 raise OptionException('All keyword arguments must be after positional arguments.') 175 reduced_pos = [self.reduce_single(arg) for arg in args.arguments] 176 reduced_kw = {} 177 for key in args.kwargs.keys(): 178 if not isinstance(key, str): 179 raise OptionException('Keyword argument name is not a string.') 180 a = args.kwargs[key] 181 reduced_kw[key] = self.reduce_single(a) 182 return reduced_pos, reduced_kw 183 184 def evaluate_statement(self, node): 185 if not isinstance(node, mparser.FunctionNode): 186 raise OptionException('Option file may only contain option definitions') 187 func_name = node.func_name 188 if func_name != 'option': 189 raise OptionException('Only calls to option() are allowed in option files.') 190 (posargs, kwargs) = self.reduce_arguments(node.args) 191 192 # FIXME: Cannot use FeatureNew while parsing options because we parse 193 # it before reading options in project(). See func_project() in 194 # interpreter.py 195 #if 'yield' in kwargs: 196 # FeatureNew('option yield', '0.45.0').use(self.subproject) 197 198 if 'type' not in kwargs: 199 raise OptionException('Option call missing mandatory "type" keyword argument') 200 opt_type = kwargs.pop('type') 201 if opt_type not in option_types: 202 raise OptionException('Unknown type %s.' % opt_type) 203 if len(posargs) != 1: 204 raise OptionException('Option() must have one (and only one) positional argument') 205 opt_name = posargs[0] 206 if not isinstance(opt_name, str): 207 raise OptionException('Positional argument must be a string.') 208 if optname_regex.search(opt_name) is not None: 209 raise OptionException('Option names can only contain letters, numbers or dashes.') 210 if is_invalid_name(opt_name): 211 raise OptionException('Option name %s is reserved.' % opt_name) 212 if self.subproject != '': 213 opt_name = self.subproject + ':' + opt_name 214 opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs) 215 if opt.description == '': 216 opt.description = opt_name 217 self.options[opt_name] = opt 218 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mesonbuild/optinterpreter.py b/mesonbuild/optinterpreter.py --- a/mesonbuild/optinterpreter.py +++ b/mesonbuild/optinterpreter.py @@ -121,7 +121,7 @@ def FeatureParser(name, description, kwargs): return coredata.UserFeatureOption(name, description, - kwargs.get('value', 'enabled'), + kwargs.get('value', 'auto'), yielding=kwargs.get('yield', coredata.default_yielding)) option_types = {'string': StringParser,
{"golden_diff": "diff --git a/mesonbuild/optinterpreter.py b/mesonbuild/optinterpreter.py\n--- a/mesonbuild/optinterpreter.py\n+++ b/mesonbuild/optinterpreter.py\n@@ -121,7 +121,7 @@\n def FeatureParser(name, description, kwargs):\n return coredata.UserFeatureOption(name,\n description,\n- kwargs.get('value', 'enabled'),\n+ kwargs.get('value', 'auto'),\n yielding=kwargs.get('yield', coredata.default_yielding))\n \n option_types = {'string': StringParser,\n", "issue": "Default value of UserFeatureOption is `enabled` instead of `auto`\nAccording to `meson --help`:\r\n\r\n```\r\n --auto-features {enabled,disabled,auto}\r\n Override value of all 'auto' features (default: auto).\r\n```\r\n\r\nHowever, in reality the default value is `enabled`. We should fix it to be `auto`.\n", "before_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os, re\nimport functools\n\nfrom . import mparser\nfrom . import coredata\nfrom . import mesonlib\nfrom . import compilers\n\nforbidden_option_names = coredata.get_builtin_options()\nforbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_']\n\ndef is_invalid_name(name):\n if name in forbidden_option_names:\n return True\n pref = name.split('_')[0] + '_'\n if pref in forbidden_prefixes:\n return True\n return False\n\nclass OptionException(mesonlib.MesonException):\n pass\n\n\ndef permitted_kwargs(permitted):\n \"\"\"Function that validates kwargs for options.\"\"\"\n def _wraps(func):\n @functools.wraps(func)\n def _inner(name, description, kwargs):\n bad = [a for a in kwargs.keys() if a not in permitted]\n if bad:\n raise OptionException('Invalid kwargs for option \"{}\": \"{}\"'.format(\n name, ' '.join(bad)))\n return func(name, description, kwargs)\n return _inner\n return _wraps\n\n\noptname_regex = re.compile('[^a-zA-Z0-9_-]')\n\n@permitted_kwargs({'value', 'yield'})\ndef StringParser(name, description, kwargs):\n return coredata.UserStringOption(name,\n description,\n kwargs.get('value', ''),\n kwargs.get('choices', []),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef BooleanParser(name, description, kwargs):\n return coredata.UserBooleanOption(name, description,\n kwargs.get('value', True),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef ComboParser(name, description, kwargs):\n if 'choices' not in kwargs:\n raise OptionException('Combo option missing \"choices\" keyword.')\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Combo choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Combo choice elements must be strings.')\n return coredata.UserComboOption(name,\n description,\n choices,\n kwargs.get('value', choices[0]),\n kwargs.get('yield', coredata.default_yielding),)\n\n\n@permitted_kwargs({'value', 'min', 'max', 'yield'})\ndef IntegerParser(name, description, kwargs):\n if 'value' not in kwargs:\n raise OptionException('Integer option must contain value argument.')\n return coredata.UserIntegerOption(name,\n description,\n kwargs.get('min', None),\n kwargs.get('max', None),\n kwargs['value'],\n kwargs.get('yield', coredata.default_yielding))\n\n# FIXME: Cannot use FeatureNew while parsing options because we parse it before\n# reading options in project(). See func_project() in interpreter.py\n#@FeatureNew('array type option()', '0.44.0')\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef string_array_parser(name, description, kwargs):\n if 'choices' in kwargs:\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Array choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Array choice elements must be strings.')\n value = kwargs.get('value', choices)\n else:\n choices = None\n value = kwargs.get('value', [])\n if not isinstance(value, list):\n raise OptionException('Array choices must be passed as an array.')\n return coredata.UserArrayOption(name,\n description,\n value,\n choices=choices,\n yielding=kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef FeatureParser(name, description, kwargs):\n return coredata.UserFeatureOption(name,\n description,\n kwargs.get('value', 'enabled'),\n yielding=kwargs.get('yield', coredata.default_yielding))\n\noption_types = {'string': StringParser,\n 'boolean': BooleanParser,\n 'combo': ComboParser,\n 'integer': IntegerParser,\n 'array': string_array_parser,\n 'feature': FeatureParser,\n }\n\nclass OptionInterpreter:\n def __init__(self, subproject):\n self.options = {}\n self.subproject = subproject\n\n def process(self, option_file):\n try:\n with open(option_file, 'r', encoding='utf8') as f:\n ast = mparser.Parser(f.read(), '').parse()\n except mesonlib.MesonException as me:\n me.file = option_file\n raise me\n if not isinstance(ast, mparser.CodeBlockNode):\n e = OptionException('Option file is malformed.')\n e.lineno = ast.lineno()\n raise e\n for cur in ast.lines:\n try:\n self.evaluate_statement(cur)\n except Exception as e:\n e.lineno = cur.lineno\n e.colno = cur.colno\n e.file = os.path.join('meson_options.txt')\n raise e\n\n def reduce_single(self, arg):\n if isinstance(arg, str):\n return arg\n elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,\n mparser.NumberNode)):\n return arg.value\n elif isinstance(arg, mparser.ArrayNode):\n return [self.reduce_single(curarg) for curarg in arg.args.arguments]\n else:\n raise OptionException('Arguments may only be string, int, bool, or array of those.')\n\n def reduce_arguments(self, args):\n assert(isinstance(args, mparser.ArgumentNode))\n if args.incorrect_order():\n raise OptionException('All keyword arguments must be after positional arguments.')\n reduced_pos = [self.reduce_single(arg) for arg in args.arguments]\n reduced_kw = {}\n for key in args.kwargs.keys():\n if not isinstance(key, str):\n raise OptionException('Keyword argument name is not a string.')\n a = args.kwargs[key]\n reduced_kw[key] = self.reduce_single(a)\n return reduced_pos, reduced_kw\n\n def evaluate_statement(self, node):\n if not isinstance(node, mparser.FunctionNode):\n raise OptionException('Option file may only contain option definitions')\n func_name = node.func_name\n if func_name != 'option':\n raise OptionException('Only calls to option() are allowed in option files.')\n (posargs, kwargs) = self.reduce_arguments(node.args)\n\n # FIXME: Cannot use FeatureNew while parsing options because we parse\n # it before reading options in project(). See func_project() in\n # interpreter.py\n #if 'yield' in kwargs:\n # FeatureNew('option yield', '0.45.0').use(self.subproject)\n\n if 'type' not in kwargs:\n raise OptionException('Option call missing mandatory \"type\" keyword argument')\n opt_type = kwargs.pop('type')\n if opt_type not in option_types:\n raise OptionException('Unknown type %s.' % opt_type)\n if len(posargs) != 1:\n raise OptionException('Option() must have one (and only one) positional argument')\n opt_name = posargs[0]\n if not isinstance(opt_name, str):\n raise OptionException('Positional argument must be a string.')\n if optname_regex.search(opt_name) is not None:\n raise OptionException('Option names can only contain letters, numbers or dashes.')\n if is_invalid_name(opt_name):\n raise OptionException('Option name %s is reserved.' % opt_name)\n if self.subproject != '':\n opt_name = self.subproject + ':' + opt_name\n opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)\n if opt.description == '':\n opt.description = opt_name\n self.options[opt_name] = opt\n", "path": "mesonbuild/optinterpreter.py"}], "after_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os, re\nimport functools\n\nfrom . import mparser\nfrom . import coredata\nfrom . import mesonlib\nfrom . import compilers\n\nforbidden_option_names = coredata.get_builtin_options()\nforbidden_prefixes = [lang + '_' for lang in compilers.all_languages] + ['b_', 'backend_']\n\ndef is_invalid_name(name):\n if name in forbidden_option_names:\n return True\n pref = name.split('_')[0] + '_'\n if pref in forbidden_prefixes:\n return True\n return False\n\nclass OptionException(mesonlib.MesonException):\n pass\n\n\ndef permitted_kwargs(permitted):\n \"\"\"Function that validates kwargs for options.\"\"\"\n def _wraps(func):\n @functools.wraps(func)\n def _inner(name, description, kwargs):\n bad = [a for a in kwargs.keys() if a not in permitted]\n if bad:\n raise OptionException('Invalid kwargs for option \"{}\": \"{}\"'.format(\n name, ' '.join(bad)))\n return func(name, description, kwargs)\n return _inner\n return _wraps\n\n\noptname_regex = re.compile('[^a-zA-Z0-9_-]')\n\n@permitted_kwargs({'value', 'yield'})\ndef StringParser(name, description, kwargs):\n return coredata.UserStringOption(name,\n description,\n kwargs.get('value', ''),\n kwargs.get('choices', []),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef BooleanParser(name, description, kwargs):\n return coredata.UserBooleanOption(name, description,\n kwargs.get('value', True),\n kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef ComboParser(name, description, kwargs):\n if 'choices' not in kwargs:\n raise OptionException('Combo option missing \"choices\" keyword.')\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Combo choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Combo choice elements must be strings.')\n return coredata.UserComboOption(name,\n description,\n choices,\n kwargs.get('value', choices[0]),\n kwargs.get('yield', coredata.default_yielding),)\n\n\n@permitted_kwargs({'value', 'min', 'max', 'yield'})\ndef IntegerParser(name, description, kwargs):\n if 'value' not in kwargs:\n raise OptionException('Integer option must contain value argument.')\n return coredata.UserIntegerOption(name,\n description,\n kwargs.get('min', None),\n kwargs.get('max', None),\n kwargs['value'],\n kwargs.get('yield', coredata.default_yielding))\n\n# FIXME: Cannot use FeatureNew while parsing options because we parse it before\n# reading options in project(). See func_project() in interpreter.py\n#@FeatureNew('array type option()', '0.44.0')\n@permitted_kwargs({'value', 'yield', 'choices'})\ndef string_array_parser(name, description, kwargs):\n if 'choices' in kwargs:\n choices = kwargs['choices']\n if not isinstance(choices, list):\n raise OptionException('Array choices must be an array.')\n for i in choices:\n if not isinstance(i, str):\n raise OptionException('Array choice elements must be strings.')\n value = kwargs.get('value', choices)\n else:\n choices = None\n value = kwargs.get('value', [])\n if not isinstance(value, list):\n raise OptionException('Array choices must be passed as an array.')\n return coredata.UserArrayOption(name,\n description,\n value,\n choices=choices,\n yielding=kwargs.get('yield', coredata.default_yielding))\n\n@permitted_kwargs({'value', 'yield'})\ndef FeatureParser(name, description, kwargs):\n return coredata.UserFeatureOption(name,\n description,\n kwargs.get('value', 'auto'),\n yielding=kwargs.get('yield', coredata.default_yielding))\n\noption_types = {'string': StringParser,\n 'boolean': BooleanParser,\n 'combo': ComboParser,\n 'integer': IntegerParser,\n 'array': string_array_parser,\n 'feature': FeatureParser,\n }\n\nclass OptionInterpreter:\n def __init__(self, subproject):\n self.options = {}\n self.subproject = subproject\n\n def process(self, option_file):\n try:\n with open(option_file, 'r', encoding='utf8') as f:\n ast = mparser.Parser(f.read(), '').parse()\n except mesonlib.MesonException as me:\n me.file = option_file\n raise me\n if not isinstance(ast, mparser.CodeBlockNode):\n e = OptionException('Option file is malformed.')\n e.lineno = ast.lineno()\n raise e\n for cur in ast.lines:\n try:\n self.evaluate_statement(cur)\n except Exception as e:\n e.lineno = cur.lineno\n e.colno = cur.colno\n e.file = os.path.join('meson_options.txt')\n raise e\n\n def reduce_single(self, arg):\n if isinstance(arg, str):\n return arg\n elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,\n mparser.NumberNode)):\n return arg.value\n elif isinstance(arg, mparser.ArrayNode):\n return [self.reduce_single(curarg) for curarg in arg.args.arguments]\n else:\n raise OptionException('Arguments may only be string, int, bool, or array of those.')\n\n def reduce_arguments(self, args):\n assert(isinstance(args, mparser.ArgumentNode))\n if args.incorrect_order():\n raise OptionException('All keyword arguments must be after positional arguments.')\n reduced_pos = [self.reduce_single(arg) for arg in args.arguments]\n reduced_kw = {}\n for key in args.kwargs.keys():\n if not isinstance(key, str):\n raise OptionException('Keyword argument name is not a string.')\n a = args.kwargs[key]\n reduced_kw[key] = self.reduce_single(a)\n return reduced_pos, reduced_kw\n\n def evaluate_statement(self, node):\n if not isinstance(node, mparser.FunctionNode):\n raise OptionException('Option file may only contain option definitions')\n func_name = node.func_name\n if func_name != 'option':\n raise OptionException('Only calls to option() are allowed in option files.')\n (posargs, kwargs) = self.reduce_arguments(node.args)\n\n # FIXME: Cannot use FeatureNew while parsing options because we parse\n # it before reading options in project(). See func_project() in\n # interpreter.py\n #if 'yield' in kwargs:\n # FeatureNew('option yield', '0.45.0').use(self.subproject)\n\n if 'type' not in kwargs:\n raise OptionException('Option call missing mandatory \"type\" keyword argument')\n opt_type = kwargs.pop('type')\n if opt_type not in option_types:\n raise OptionException('Unknown type %s.' % opt_type)\n if len(posargs) != 1:\n raise OptionException('Option() must have one (and only one) positional argument')\n opt_name = posargs[0]\n if not isinstance(opt_name, str):\n raise OptionException('Positional argument must be a string.')\n if optname_regex.search(opt_name) is not None:\n raise OptionException('Option names can only contain letters, numbers or dashes.')\n if is_invalid_name(opt_name):\n raise OptionException('Option name %s is reserved.' % opt_name)\n if self.subproject != '':\n opt_name = self.subproject + ':' + opt_name\n opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)\n if opt.description == '':\n opt.description = opt_name\n self.options[opt_name] = opt\n", "path": "mesonbuild/optinterpreter.py"}]}
2,702
115
gh_patches_debug_38923
rasdani/github-patches
git_diff
goauthentik__authentik-8858
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- False version status in admin dashboard for AirGapped environments **Describe the bug** In an AirGapped environment with `AUTHENTIK_DISABLE_UPDATE_CHECK=true`, or when the version check has not yet been performed, the version tile on the admin dashboard will always state its `Up-to-date!` which may not actually be the case. **To Reproduce** Steps to reproduce the behavior: 1. Deploy a fresh Authentik installation on an older version (e.g. `2023.10.7`) and ensure `AUTHENTIK_DISABLE_UPDATE_CHECK=true` is set. 2. Go to the admin dashboard 3. Observe the version tile stating proudly that its up-to-date. **Expected behavior** Not incorrectly stating its up-to-date as that would give a false sense of security. **Screenshots** ![image](https://github.com/goauthentik/authentik/assets/68224306/0a470c2b-73f7-4ba1-ad83-7e35a7f9a053) **Logs** N/a **Version and Deployment (please complete the following information):** - authentik version: 2024.2.0 - Deployment: docker-compose **Additional context** N/a --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/admin/api/version.py` Content: ``` 1 """authentik administration overview""" 2 3 from django.core.cache import cache 4 from drf_spectacular.utils import extend_schema 5 from packaging.version import parse 6 from rest_framework.fields import SerializerMethodField 7 from rest_framework.permissions import IsAuthenticated 8 from rest_framework.request import Request 9 from rest_framework.response import Response 10 from rest_framework.views import APIView 11 12 from authentik import __version__, get_build_hash 13 from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version 14 from authentik.core.api.utils import PassiveSerializer 15 16 17 class VersionSerializer(PassiveSerializer): 18 """Get running and latest version.""" 19 20 version_current = SerializerMethodField() 21 version_latest = SerializerMethodField() 22 build_hash = SerializerMethodField() 23 outdated = SerializerMethodField() 24 25 def get_build_hash(self, _) -> str: 26 """Get build hash, if version is not latest or released""" 27 return get_build_hash() 28 29 def get_version_current(self, _) -> str: 30 """Get current version""" 31 return __version__ 32 33 def get_version_latest(self, _) -> str: 34 """Get latest version from cache""" 35 version_in_cache = cache.get(VERSION_CACHE_KEY) 36 if not version_in_cache: # pragma: no cover 37 update_latest_version.delay() 38 return __version__ 39 return version_in_cache 40 41 def get_outdated(self, instance) -> bool: 42 """Check if we're running the latest version""" 43 return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance)) 44 45 46 class VersionView(APIView): 47 """Get running and latest version.""" 48 49 permission_classes = [IsAuthenticated] 50 pagination_class = None 51 filter_backends = [] 52 53 @extend_schema(responses={200: VersionSerializer(many=False)}) 54 def get(self, request: Request) -> Response: 55 """Get running and latest version.""" 56 return Response(VersionSerializer(True).data) 57 ``` Path: `authentik/admin/tasks.py` Content: ``` 1 """authentik admin tasks""" 2 3 import re 4 5 from django.core.cache import cache 6 from django.core.validators import URLValidator 7 from django.db import DatabaseError, InternalError, ProgrammingError 8 from packaging.version import parse 9 from requests import RequestException 10 from structlog.stdlib import get_logger 11 12 from authentik import __version__, get_build_hash 13 from authentik.admin.apps import PROM_INFO 14 from authentik.events.models import Event, EventAction, Notification 15 from authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task 16 from authentik.lib.config import CONFIG 17 from authentik.lib.utils.http import get_http_session 18 from authentik.root.celery import CELERY_APP 19 20 LOGGER = get_logger() 21 VERSION_CACHE_KEY = "authentik_latest_version" 22 VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours 23 # Chop of the first ^ because we want to search the entire string 24 URL_FINDER = URLValidator.regex.pattern[1:] 25 LOCAL_VERSION = parse(__version__) 26 27 28 def _set_prom_info(): 29 """Set prometheus info for version""" 30 PROM_INFO.info( 31 { 32 "version": __version__, 33 "latest": cache.get(VERSION_CACHE_KEY, ""), 34 "build_hash": get_build_hash(), 35 } 36 ) 37 38 39 @CELERY_APP.task( 40 throws=(DatabaseError, ProgrammingError, InternalError), 41 ) 42 def clear_update_notifications(): 43 """Clear update notifications on startup if the notification was for the version 44 we're running now.""" 45 for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE): 46 if "new_version" not in notification.event.context: 47 continue 48 notification_version = notification.event.context["new_version"] 49 if LOCAL_VERSION >= parse(notification_version): 50 notification.delete() 51 52 53 @CELERY_APP.task(bind=True, base=SystemTask) 54 @prefill_task 55 def update_latest_version(self: SystemTask): 56 """Update latest version info""" 57 if CONFIG.get_bool("disable_update_check"): 58 cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) 59 self.set_status(TaskStatus.WARNING, "Version check disabled.") 60 return 61 try: 62 response = get_http_session().get( 63 "https://version.goauthentik.io/version.json", 64 ) 65 response.raise_for_status() 66 data = response.json() 67 upstream_version = data.get("stable", {}).get("version") 68 cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT) 69 self.set_status(TaskStatus.SUCCESSFUL, "Successfully updated latest Version") 70 _set_prom_info() 71 # Check if upstream version is newer than what we're running, 72 # and if no event exists yet, create one. 73 if LOCAL_VERSION < parse(upstream_version): 74 # Event has already been created, don't create duplicate 75 if Event.objects.filter( 76 action=EventAction.UPDATE_AVAILABLE, 77 context__new_version=upstream_version, 78 ).exists(): 79 return 80 event_dict = {"new_version": upstream_version} 81 if match := re.search(URL_FINDER, data.get("stable", {}).get("changelog", "")): 82 event_dict["message"] = f"Changelog: {match.group()}" 83 Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save() 84 except (RequestException, IndexError) as exc: 85 cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) 86 self.set_error(exc) 87 88 89 _set_prom_info() 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/admin/api/version.py b/authentik/admin/api/version.py --- a/authentik/admin/api/version.py +++ b/authentik/admin/api/version.py @@ -10,7 +10,7 @@ from rest_framework.views import APIView from authentik import __version__, get_build_hash -from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version +from authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version from authentik.core.api.utils import PassiveSerializer @@ -19,6 +19,7 @@ version_current = SerializerMethodField() version_latest = SerializerMethodField() + version_latest_valid = SerializerMethodField() build_hash = SerializerMethodField() outdated = SerializerMethodField() @@ -38,6 +39,10 @@ return __version__ return version_in_cache + def get_version_latest_valid(self, _) -> str: + """Check if latest version is valid""" + return cache.get(VERSION_CACHE_KEY) != VERSION_NULL + def get_outdated(self, instance) -> bool: """Check if we're running the latest version""" return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance)) diff --git a/authentik/admin/tasks.py b/authentik/admin/tasks.py --- a/authentik/admin/tasks.py +++ b/authentik/admin/tasks.py @@ -18,6 +18,7 @@ from authentik.root.celery import CELERY_APP LOGGER = get_logger() +VERSION_NULL = "0.0.0" VERSION_CACHE_KEY = "authentik_latest_version" VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours # Chop of the first ^ because we want to search the entire string @@ -55,7 +56,7 @@ def update_latest_version(self: SystemTask): """Update latest version info""" if CONFIG.get_bool("disable_update_check"): - cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) + cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT) self.set_status(TaskStatus.WARNING, "Version check disabled.") return try: @@ -82,7 +83,7 @@ event_dict["message"] = f"Changelog: {match.group()}" Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save() except (RequestException, IndexError) as exc: - cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT) + cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT) self.set_error(exc)
{"golden_diff": "diff --git a/authentik/admin/api/version.py b/authentik/admin/api/version.py\n--- a/authentik/admin/api/version.py\n+++ b/authentik/admin/api/version.py\n@@ -10,7 +10,7 @@\n from rest_framework.views import APIView\n \n from authentik import __version__, get_build_hash\n-from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version\n+from authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version\n from authentik.core.api.utils import PassiveSerializer\n \n \n@@ -19,6 +19,7 @@\n \n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n+ version_latest_valid = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n \n@@ -38,6 +39,10 @@\n return __version__\n return version_in_cache\n \n+ def get_version_latest_valid(self, _) -> str:\n+ \"\"\"Check if latest version is valid\"\"\"\n+ return cache.get(VERSION_CACHE_KEY) != VERSION_NULL\n+\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\ndiff --git a/authentik/admin/tasks.py b/authentik/admin/tasks.py\n--- a/authentik/admin/tasks.py\n+++ b/authentik/admin/tasks.py\n@@ -18,6 +18,7 @@\n from authentik.root.celery import CELERY_APP\n \n LOGGER = get_logger()\n+VERSION_NULL = \"0.0.0\"\n VERSION_CACHE_KEY = \"authentik_latest_version\"\n VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n # Chop of the first ^ because we want to search the entire string\n@@ -55,7 +56,7 @@\n def update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n- cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n@@ -82,7 +83,7 @@\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n- cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n", "issue": "False version status in admin dashboard for AirGapped environments\n**Describe the bug**\r\nIn an AirGapped environment with `AUTHENTIK_DISABLE_UPDATE_CHECK=true`, or when the version check has not yet been performed, the version tile on the admin dashboard will always state its `Up-to-date!` which may not actually be the case.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Deploy a fresh Authentik installation on an older version (e.g. `2023.10.7`) and ensure `AUTHENTIK_DISABLE_UPDATE_CHECK=true` is set.\r\n2. Go to the admin dashboard\r\n3. Observe the version tile stating proudly that its up-to-date.\r\n\r\n**Expected behavior**\r\nNot incorrectly stating its up-to-date as that would give a false sense of security.\r\n\r\n**Screenshots**\r\n![image](https://github.com/goauthentik/authentik/assets/68224306/0a470c2b-73f7-4ba1-ad83-7e35a7f9a053)\r\n\r\n**Logs**\r\nN/a\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2024.2.0\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nN/a\r\n\n", "before_files": [{"content": "\"\"\"authentik administration overview\"\"\"\n\nfrom django.core.cache import cache\nfrom drf_spectacular.utils import extend_schema\nfrom packaging.version import parse\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version\nfrom authentik.core.api.utils import PassiveSerializer\n\n\nclass VersionSerializer(PassiveSerializer):\n \"\"\"Get running and latest version.\"\"\"\n\n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n\n def get_build_hash(self, _) -> str:\n \"\"\"Get build hash, if version is not latest or released\"\"\"\n return get_build_hash()\n\n def get_version_current(self, _) -> str:\n \"\"\"Get current version\"\"\"\n return __version__\n\n def get_version_latest(self, _) -> str:\n \"\"\"Get latest version from cache\"\"\"\n version_in_cache = cache.get(VERSION_CACHE_KEY)\n if not version_in_cache: # pragma: no cover\n update_latest_version.delay()\n return __version__\n return version_in_cache\n\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\n\n\nclass VersionView(APIView):\n \"\"\"Get running and latest version.\"\"\"\n\n permission_classes = [IsAuthenticated]\n pagination_class = None\n filter_backends = []\n\n @extend_schema(responses={200: VersionSerializer(many=False)})\n def get(self, request: Request) -> Response:\n \"\"\"Get running and latest version.\"\"\"\n return Response(VersionSerializer(True).data)\n", "path": "authentik/admin/api/version.py"}, {"content": "\"\"\"authentik admin tasks\"\"\"\n\nimport re\n\nfrom django.core.cache import cache\nfrom django.core.validators import URLValidator\nfrom django.db import DatabaseError, InternalError, ProgrammingError\nfrom packaging.version import parse\nfrom requests import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.apps import PROM_INFO\nfrom authentik.events.models import Event, EventAction, Notification\nfrom authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\nVERSION_CACHE_KEY = \"authentik_latest_version\"\nVERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n# Chop of the first ^ because we want to search the entire string\nURL_FINDER = URLValidator.regex.pattern[1:]\nLOCAL_VERSION = parse(__version__)\n\n\ndef _set_prom_info():\n \"\"\"Set prometheus info for version\"\"\"\n PROM_INFO.info(\n {\n \"version\": __version__,\n \"latest\": cache.get(VERSION_CACHE_KEY, \"\"),\n \"build_hash\": get_build_hash(),\n }\n )\n\n\n@CELERY_APP.task(\n throws=(DatabaseError, ProgrammingError, InternalError),\n)\ndef clear_update_notifications():\n \"\"\"Clear update notifications on startup if the notification was for the version\n we're running now.\"\"\"\n for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE):\n if \"new_version\" not in notification.event.context:\n continue\n notification_version = notification.event.context[\"new_version\"]\n if LOCAL_VERSION >= parse(notification_version):\n notification.delete()\n\n\n@CELERY_APP.task(bind=True, base=SystemTask)\n@prefill_task\ndef update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n response = get_http_session().get(\n \"https://version.goauthentik.io/version.json\",\n )\n response.raise_for_status()\n data = response.json()\n upstream_version = data.get(\"stable\", {}).get(\"version\")\n cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.SUCCESSFUL, \"Successfully updated latest Version\")\n _set_prom_info()\n # Check if upstream version is newer than what we're running,\n # and if no event exists yet, create one.\n if LOCAL_VERSION < parse(upstream_version):\n # Event has already been created, don't create duplicate\n if Event.objects.filter(\n action=EventAction.UPDATE_AVAILABLE,\n context__new_version=upstream_version,\n ).exists():\n return\n event_dict = {\"new_version\": upstream_version}\n if match := re.search(URL_FINDER, data.get(\"stable\", {}).get(\"changelog\", \"\")):\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n\n\n_set_prom_info()\n", "path": "authentik/admin/tasks.py"}], "after_files": [{"content": "\"\"\"authentik administration overview\"\"\"\n\nfrom django.core.cache import cache\nfrom drf_spectacular.utils import extend_schema\nfrom packaging.version import parse\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version\nfrom authentik.core.api.utils import PassiveSerializer\n\n\nclass VersionSerializer(PassiveSerializer):\n \"\"\"Get running and latest version.\"\"\"\n\n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n version_latest_valid = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n\n def get_build_hash(self, _) -> str:\n \"\"\"Get build hash, if version is not latest or released\"\"\"\n return get_build_hash()\n\n def get_version_current(self, _) -> str:\n \"\"\"Get current version\"\"\"\n return __version__\n\n def get_version_latest(self, _) -> str:\n \"\"\"Get latest version from cache\"\"\"\n version_in_cache = cache.get(VERSION_CACHE_KEY)\n if not version_in_cache: # pragma: no cover\n update_latest_version.delay()\n return __version__\n return version_in_cache\n\n def get_version_latest_valid(self, _) -> str:\n \"\"\"Check if latest version is valid\"\"\"\n return cache.get(VERSION_CACHE_KEY) != VERSION_NULL\n\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\n\n\nclass VersionView(APIView):\n \"\"\"Get running and latest version.\"\"\"\n\n permission_classes = [IsAuthenticated]\n pagination_class = None\n filter_backends = []\n\n @extend_schema(responses={200: VersionSerializer(many=False)})\n def get(self, request: Request) -> Response:\n \"\"\"Get running and latest version.\"\"\"\n return Response(VersionSerializer(True).data)\n", "path": "authentik/admin/api/version.py"}, {"content": "\"\"\"authentik admin tasks\"\"\"\n\nimport re\n\nfrom django.core.cache import cache\nfrom django.core.validators import URLValidator\nfrom django.db import DatabaseError, InternalError, ProgrammingError\nfrom packaging.version import parse\nfrom requests import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.apps import PROM_INFO\nfrom authentik.events.models import Event, EventAction, Notification\nfrom authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\nVERSION_NULL = \"0.0.0\"\nVERSION_CACHE_KEY = \"authentik_latest_version\"\nVERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n# Chop of the first ^ because we want to search the entire string\nURL_FINDER = URLValidator.regex.pattern[1:]\nLOCAL_VERSION = parse(__version__)\n\n\ndef _set_prom_info():\n \"\"\"Set prometheus info for version\"\"\"\n PROM_INFO.info(\n {\n \"version\": __version__,\n \"latest\": cache.get(VERSION_CACHE_KEY, \"\"),\n \"build_hash\": get_build_hash(),\n }\n )\n\n\n@CELERY_APP.task(\n throws=(DatabaseError, ProgrammingError, InternalError),\n)\ndef clear_update_notifications():\n \"\"\"Clear update notifications on startup if the notification was for the version\n we're running now.\"\"\"\n for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE):\n if \"new_version\" not in notification.event.context:\n continue\n notification_version = notification.event.context[\"new_version\"]\n if LOCAL_VERSION >= parse(notification_version):\n notification.delete()\n\n\n@CELERY_APP.task(bind=True, base=SystemTask)\n@prefill_task\ndef update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n response = get_http_session().get(\n \"https://version.goauthentik.io/version.json\",\n )\n response.raise_for_status()\n data = response.json()\n upstream_version = data.get(\"stable\", {}).get(\"version\")\n cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.SUCCESSFUL, \"Successfully updated latest Version\")\n _set_prom_info()\n # Check if upstream version is newer than what we're running,\n # and if no event exists yet, create one.\n if LOCAL_VERSION < parse(upstream_version):\n # Event has already been created, don't create duplicate\n if Event.objects.filter(\n action=EventAction.UPDATE_AVAILABLE,\n context__new_version=upstream_version,\n ).exists():\n return\n event_dict = {\"new_version\": upstream_version}\n if match := re.search(URL_FINDER, data.get(\"stable\", {}).get(\"changelog\", \"\")):\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n\n\n_set_prom_info()\n", "path": "authentik/admin/tasks.py"}]}
1,975
587
gh_patches_debug_5058
rasdani/github-patches
git_diff
google__jax-7572
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- script "examples/advi.py" throws "ValueError" exception Run: ```python3 jax/examples/advi.py``` Output: ``` Optimizing variational parameters... Iteration 0 lower bound 0.4957694113254547 Traceback (most recent call last): File "jax/examples/advi.py", line 138, in <module> callback(params, t) File "jax/examples/advi.py", line 98, in callback X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1) File "jax/examples/advi.py", line 67, in mesh_eval return _mesh_eval(func, x_limits, y_limits, params, num_ticks) ValueError: Non-hashable static arguments are not supported. An error occured while trying to hash an object of type <class 'list'>, [-2, 2]. The error was: TypeError: unhashable type: 'list' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/advi.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Automatic differentiation variational inference in Numpy and JAX. 16 17 This demo fits a Gaussian approximation to an intractable, unnormalized 18 density, by differentiating through a Monte Carlo estimate of the 19 variational evidence lower bound (ELBO).""" 20 21 22 from functools import partial 23 import matplotlib.pyplot as plt 24 25 from jax import jit, grad, vmap 26 from jax import random 27 from jax.experimental import optimizers 28 import jax.numpy as jnp 29 import jax.scipy.stats.norm as norm 30 31 32 # ========= Functions to define the evidence lower bound. ========= 33 34 def diag_gaussian_sample(rng, mean, log_std): 35 # Take a single sample from a diagonal multivariate Gaussian. 36 return mean + jnp.exp(log_std) * random.normal(rng, mean.shape) 37 38 def diag_gaussian_logpdf(x, mean, log_std): 39 # Evaluate a single point on a diagonal multivariate Gaussian. 40 return jnp.sum(vmap(norm.logpdf)(x, mean, jnp.exp(log_std))) 41 42 def elbo(logprob, rng, mean, log_std): 43 # Single-sample Monte Carlo estimate of the variational lower bound. 44 sample = diag_gaussian_sample(rng, mean, log_std) 45 return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std) 46 47 def batch_elbo(logprob, rng, params, num_samples): 48 # Average over a batch of random samples. 49 rngs = random.split(rng, num_samples) 50 vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None)) 51 return jnp.mean(vectorized_elbo(rngs, *params)) 52 53 54 # ========= Helper function for plotting. ========= 55 56 @partial(jit, static_argnums=(0, 1, 2, 4)) 57 def _mesh_eval(func, x_limits, y_limits, params, num_ticks): 58 # Evaluate func on a 2D grid defined by x_limits and y_limits. 59 x = jnp.linspace(*x_limits, num=num_ticks) 60 y = jnp.linspace(*y_limits, num=num_ticks) 61 X, Y = jnp.meshgrid(x, y) 62 xy_vec = jnp.stack([X.ravel(), Y.ravel()]).T 63 zs = vmap(func, in_axes=(0, None))(xy_vec, params) 64 return X, Y, zs.reshape(X.shape) 65 66 def mesh_eval(func, x_limits, y_limits, params, num_ticks=101): 67 return _mesh_eval(func, x_limits, y_limits, params, num_ticks) 68 69 # ========= Define an intractable unnormalized density ========= 70 71 def funnel_log_density(params): 72 return norm.logpdf(params[0], 0, jnp.exp(params[1])) + \ 73 norm.logpdf(params[1], 0, 1.35) 74 75 76 if __name__ == "__main__": 77 num_samples = 40 78 79 @jit 80 def objective(params, t): 81 rng = random.PRNGKey(t) 82 return -batch_elbo(funnel_log_density, rng, params, num_samples) 83 84 # Set up figure. 85 fig = plt.figure(figsize=(8,8), facecolor='white') 86 ax = fig.add_subplot(111, frameon=False) 87 plt.ion() 88 plt.show(block=False) 89 x_limits = [-2, 2] 90 y_limits = [-4, 2] 91 target_dist = lambda x, _: jnp.exp(funnel_log_density(x)) 92 approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params)) 93 94 def callback(params, t): 95 print("Iteration {} lower bound {}".format(t, objective(params, t))) 96 97 plt.cla() 98 X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1) 99 ax.contour(X, Y, Z, cmap='summer') 100 X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params) 101 ax.contour(X, Y, Z, cmap='winter') 102 ax.set_xlim(x_limits) 103 ax.set_ylim(y_limits) 104 ax.set_yticks([]) 105 ax.set_xticks([]) 106 107 # Plot random samples from variational distribution. 108 # Here we clone the rng used in computing the objective 109 # so that we can show exactly the same samples. 110 rngs = random.split(random.PRNGKey(t), num_samples) 111 samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params) 112 ax.plot(samples[:, 0], samples[:, 1], 'b.') 113 114 plt.draw() 115 plt.pause(1.0/60.0) 116 117 118 # Set up optimizer. 119 D = 2 120 init_mean = jnp.zeros(D) 121 init_std = jnp.zeros(D) 122 init_params = (init_mean, init_std) 123 opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9) 124 opt_state = opt_init(init_params) 125 126 @jit 127 def update(i, opt_state): 128 params = get_params(opt_state) 129 gradient = grad(objective)(params, i) 130 return opt_update(i, gradient, opt_state) 131 132 133 # Main loop. 134 print("Optimizing variational parameters...") 135 for t in range(100): 136 opt_state = update(t, opt_state) 137 params = get_params(opt_state) 138 callback(params, t) 139 plt.show(block=True) 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/advi.py b/examples/advi.py --- a/examples/advi.py +++ b/examples/advi.py @@ -86,8 +86,8 @@ ax = fig.add_subplot(111, frameon=False) plt.ion() plt.show(block=False) - x_limits = [-2, 2] - y_limits = [-4, 2] + x_limits = (-2, 2) + y_limits = (-4, 2) target_dist = lambda x, _: jnp.exp(funnel_log_density(x)) approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))
{"golden_diff": "diff --git a/examples/advi.py b/examples/advi.py\n--- a/examples/advi.py\n+++ b/examples/advi.py\n@@ -86,8 +86,8 @@\n ax = fig.add_subplot(111, frameon=False)\n plt.ion()\n plt.show(block=False)\n- x_limits = [-2, 2]\n- y_limits = [-4, 2]\n+ x_limits = (-2, 2)\n+ y_limits = (-4, 2)\n target_dist = lambda x, _: jnp.exp(funnel_log_density(x))\n approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))\n", "issue": "script \"examples/advi.py\" throws \"ValueError\" exception\nRun:\r\n ```python3 jax/examples/advi.py```\r\nOutput:\r\n```\r\nOptimizing variational parameters...\r\nIteration 0 lower bound 0.4957694113254547\r\nTraceback (most recent call last):\r\n File \"jax/examples/advi.py\", line 138, in <module>\r\n callback(params, t)\r\n File \"jax/examples/advi.py\", line 98, in callback\r\n X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)\r\n File \"jax/examples/advi.py\", line 67, in mesh_eval\r\n return _mesh_eval(func, x_limits, y_limits, params, num_ticks)\r\nValueError: Non-hashable static arguments are not supported. An error occured while trying to hash an object of type <class 'list'>, [-2, 2]. The error was:\r\nTypeError: unhashable type: 'list'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Automatic differentiation variational inference in Numpy and JAX.\n\nThis demo fits a Gaussian approximation to an intractable, unnormalized\ndensity, by differentiating through a Monte Carlo estimate of the\nvariational evidence lower bound (ELBO).\"\"\"\n\n\nfrom functools import partial\nimport matplotlib.pyplot as plt\n\nfrom jax import jit, grad, vmap\nfrom jax import random\nfrom jax.experimental import optimizers\nimport jax.numpy as jnp\nimport jax.scipy.stats.norm as norm\n\n\n# ========= Functions to define the evidence lower bound. =========\n\ndef diag_gaussian_sample(rng, mean, log_std):\n # Take a single sample from a diagonal multivariate Gaussian.\n return mean + jnp.exp(log_std) * random.normal(rng, mean.shape)\n\ndef diag_gaussian_logpdf(x, mean, log_std):\n # Evaluate a single point on a diagonal multivariate Gaussian.\n return jnp.sum(vmap(norm.logpdf)(x, mean, jnp.exp(log_std)))\n\ndef elbo(logprob, rng, mean, log_std):\n # Single-sample Monte Carlo estimate of the variational lower bound.\n sample = diag_gaussian_sample(rng, mean, log_std)\n return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std)\n\ndef batch_elbo(logprob, rng, params, num_samples):\n # Average over a batch of random samples.\n rngs = random.split(rng, num_samples)\n vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None))\n return jnp.mean(vectorized_elbo(rngs, *params))\n\n\n# ========= Helper function for plotting. =========\n\n@partial(jit, static_argnums=(0, 1, 2, 4))\ndef _mesh_eval(func, x_limits, y_limits, params, num_ticks):\n # Evaluate func on a 2D grid defined by x_limits and y_limits.\n x = jnp.linspace(*x_limits, num=num_ticks)\n y = jnp.linspace(*y_limits, num=num_ticks)\n X, Y = jnp.meshgrid(x, y)\n xy_vec = jnp.stack([X.ravel(), Y.ravel()]).T\n zs = vmap(func, in_axes=(0, None))(xy_vec, params)\n return X, Y, zs.reshape(X.shape)\n\ndef mesh_eval(func, x_limits, y_limits, params, num_ticks=101):\n return _mesh_eval(func, x_limits, y_limits, params, num_ticks)\n\n# ========= Define an intractable unnormalized density =========\n\ndef funnel_log_density(params):\n return norm.logpdf(params[0], 0, jnp.exp(params[1])) + \\\n norm.logpdf(params[1], 0, 1.35)\n\n\nif __name__ == \"__main__\":\n num_samples = 40\n\n @jit\n def objective(params, t):\n rng = random.PRNGKey(t)\n return -batch_elbo(funnel_log_density, rng, params, num_samples)\n\n # Set up figure.\n fig = plt.figure(figsize=(8,8), facecolor='white')\n ax = fig.add_subplot(111, frameon=False)\n plt.ion()\n plt.show(block=False)\n x_limits = [-2, 2]\n y_limits = [-4, 2]\n target_dist = lambda x, _: jnp.exp(funnel_log_density(x))\n approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))\n\n def callback(params, t):\n print(\"Iteration {} lower bound {}\".format(t, objective(params, t)))\n\n plt.cla()\n X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)\n ax.contour(X, Y, Z, cmap='summer')\n X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params)\n ax.contour(X, Y, Z, cmap='winter')\n ax.set_xlim(x_limits)\n ax.set_ylim(y_limits)\n ax.set_yticks([])\n ax.set_xticks([])\n\n # Plot random samples from variational distribution.\n # Here we clone the rng used in computing the objective\n # so that we can show exactly the same samples.\n rngs = random.split(random.PRNGKey(t), num_samples)\n samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params)\n ax.plot(samples[:, 0], samples[:, 1], 'b.')\n\n plt.draw()\n plt.pause(1.0/60.0)\n\n\n # Set up optimizer.\n D = 2\n init_mean = jnp.zeros(D)\n init_std = jnp.zeros(D)\n init_params = (init_mean, init_std)\n opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9)\n opt_state = opt_init(init_params)\n\n @jit\n def update(i, opt_state):\n params = get_params(opt_state)\n gradient = grad(objective)(params, i)\n return opt_update(i, gradient, opt_state)\n\n\n # Main loop.\n print(\"Optimizing variational parameters...\")\n for t in range(100):\n opt_state = update(t, opt_state)\n params = get_params(opt_state)\n callback(params, t)\n plt.show(block=True)\n", "path": "examples/advi.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Automatic differentiation variational inference in Numpy and JAX.\n\nThis demo fits a Gaussian approximation to an intractable, unnormalized\ndensity, by differentiating through a Monte Carlo estimate of the\nvariational evidence lower bound (ELBO).\"\"\"\n\n\nfrom functools import partial\nimport matplotlib.pyplot as plt\n\nfrom jax import jit, grad, vmap\nfrom jax import random\nfrom jax.experimental import optimizers\nimport jax.numpy as jnp\nimport jax.scipy.stats.norm as norm\n\n\n# ========= Functions to define the evidence lower bound. =========\n\ndef diag_gaussian_sample(rng, mean, log_std):\n # Take a single sample from a diagonal multivariate Gaussian.\n return mean + jnp.exp(log_std) * random.normal(rng, mean.shape)\n\ndef diag_gaussian_logpdf(x, mean, log_std):\n # Evaluate a single point on a diagonal multivariate Gaussian.\n return jnp.sum(vmap(norm.logpdf)(x, mean, jnp.exp(log_std)))\n\ndef elbo(logprob, rng, mean, log_std):\n # Single-sample Monte Carlo estimate of the variational lower bound.\n sample = diag_gaussian_sample(rng, mean, log_std)\n return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std)\n\ndef batch_elbo(logprob, rng, params, num_samples):\n # Average over a batch of random samples.\n rngs = random.split(rng, num_samples)\n vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None))\n return jnp.mean(vectorized_elbo(rngs, *params))\n\n\n# ========= Helper function for plotting. =========\n\n@partial(jit, static_argnums=(0, 1, 2, 4))\ndef _mesh_eval(func, x_limits, y_limits, params, num_ticks):\n # Evaluate func on a 2D grid defined by x_limits and y_limits.\n x = jnp.linspace(*x_limits, num=num_ticks)\n y = jnp.linspace(*y_limits, num=num_ticks)\n X, Y = jnp.meshgrid(x, y)\n xy_vec = jnp.stack([X.ravel(), Y.ravel()]).T\n zs = vmap(func, in_axes=(0, None))(xy_vec, params)\n return X, Y, zs.reshape(X.shape)\n\ndef mesh_eval(func, x_limits, y_limits, params, num_ticks=101):\n return _mesh_eval(func, x_limits, y_limits, params, num_ticks)\n\n# ========= Define an intractable unnormalized density =========\n\ndef funnel_log_density(params):\n return norm.logpdf(params[0], 0, jnp.exp(params[1])) + \\\n norm.logpdf(params[1], 0, 1.35)\n\n\nif __name__ == \"__main__\":\n num_samples = 40\n\n @jit\n def objective(params, t):\n rng = random.PRNGKey(t)\n return -batch_elbo(funnel_log_density, rng, params, num_samples)\n\n # Set up figure.\n fig = plt.figure(figsize=(8,8), facecolor='white')\n ax = fig.add_subplot(111, frameon=False)\n plt.ion()\n plt.show(block=False)\n x_limits = (-2, 2)\n y_limits = (-4, 2)\n target_dist = lambda x, _: jnp.exp(funnel_log_density(x))\n approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))\n\n def callback(params, t):\n print(\"Iteration {} lower bound {}\".format(t, objective(params, t)))\n\n plt.cla()\n X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)\n ax.contour(X, Y, Z, cmap='summer')\n X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params)\n ax.contour(X, Y, Z, cmap='winter')\n ax.set_xlim(x_limits)\n ax.set_ylim(y_limits)\n ax.set_yticks([])\n ax.set_xticks([])\n\n # Plot random samples from variational distribution.\n # Here we clone the rng used in computing the objective\n # so that we can show exactly the same samples.\n rngs = random.split(random.PRNGKey(t), num_samples)\n samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params)\n ax.plot(samples[:, 0], samples[:, 1], 'b.')\n\n plt.draw()\n plt.pause(1.0/60.0)\n\n\n # Set up optimizer.\n D = 2\n init_mean = jnp.zeros(D)\n init_std = jnp.zeros(D)\n init_params = (init_mean, init_std)\n opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9)\n opt_state = opt_init(init_params)\n\n @jit\n def update(i, opt_state):\n params = get_params(opt_state)\n gradient = grad(objective)(params, i)\n return opt_update(i, gradient, opt_state)\n\n\n # Main loop.\n print(\"Optimizing variational parameters...\")\n for t in range(100):\n opt_state = update(t, opt_state)\n params = get_params(opt_state)\n callback(params, t)\n plt.show(block=True)\n", "path": "examples/advi.py"}]}
2,114
150
gh_patches_debug_32419
rasdani/github-patches
git_diff
xonsh__xonsh-5388
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature: Search substring in env variable completer ```xsh $TRA<Tab> # Show all variables with `*TRA*` ``` ## For community ⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment** --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xonsh/completers/environment.py` Content: ``` 1 from xonsh.built_ins import XSH 2 from xonsh.completers.tools import ( 3 RichCompletion, 4 contextual_completer, 5 get_filter_function, 6 non_exclusive_completer, 7 ) 8 from xonsh.parsers.completion_context import CompletionContext 9 10 11 @contextual_completer 12 @non_exclusive_completer 13 def complete_environment_vars(context: CompletionContext): 14 """Completes environment variables.""" 15 if context.command: 16 prefix = context.command.prefix 17 elif context.python: 18 prefix = context.python.prefix 19 else: 20 return None 21 22 dollar_location = prefix.rfind("$") 23 if dollar_location == -1: 24 return None 25 26 key = prefix[dollar_location + 1 :] 27 lprefix = len(key) + 1 28 if context.command is not None and context.command.is_after_closing_quote: 29 lprefix += 1 30 filter_func = get_filter_function() 31 env = XSH.env 32 33 return ( 34 RichCompletion( 35 "$" + k, 36 display=f"${k} [{type(v).__name__}]", 37 description=env.get_docs(k).doc, 38 ) 39 for k, v in env.items() 40 if filter_func(k, key) 41 ), lprefix 42 ``` Path: `xonsh/completer.py` Content: ``` 1 """A (tab-)completer for xonsh.""" 2 3 import collections.abc as cabc 4 import sys 5 import typing as tp 6 7 from xonsh.built_ins import XSH 8 from xonsh.completers.tools import ( 9 Completion, 10 RichCompletion, 11 apply_lprefix, 12 get_filter_function, 13 is_contextual_completer, 14 is_exclusive_completer, 15 ) 16 from xonsh.parsers.completion_context import CompletionContext, CompletionContextParser 17 from xonsh.tools import print_exception 18 19 20 class Completer: 21 """This provides a list of optional completions for the xonsh shell.""" 22 23 def __init__(self): 24 self.context_parser = CompletionContextParser() 25 26 def parse( 27 self, text: str, cursor_index: "None|int" = None, ctx=None 28 ) -> "CompletionContext": 29 """Parse the given text 30 31 Parameters 32 ---------- 33 text 34 multi-line text 35 cursor_index 36 position of the cursor. If not given, then it is considered to be at the end. 37 ctx 38 Execution context 39 """ 40 cursor_index = len(text) if cursor_index is None else cursor_index 41 return self.context_parser.parse(text, cursor_index, ctx) 42 43 def complete_line(self, text: str): 44 """Handy wrapper to build command-completion-context when cursor is at the end. 45 46 Notes 47 ----- 48 suffix is not supported; text after last space is parsed as prefix. 49 """ 50 ctx = self.parse(text) 51 cmd_ctx = ctx.command 52 if not cmd_ctx: 53 raise RuntimeError("Only Command context is empty") 54 prefix = cmd_ctx.prefix 55 56 line = text 57 begidx = text.rfind(prefix) 58 endidx = begidx + len(prefix) 59 60 return self.complete( 61 prefix, 62 line, 63 begidx, 64 endidx, 65 cursor_index=len(line), 66 multiline_text=line, 67 completion_context=ctx, 68 ) 69 70 def complete( 71 self, 72 prefix, 73 line, 74 begidx, 75 endidx, 76 ctx=None, 77 multiline_text=None, 78 cursor_index=None, 79 completion_context=None, 80 ): 81 """Complete the string, given a possible execution context. 82 83 Parameters 84 ---------- 85 prefix : str 86 The string to match 87 line : str 88 The line that prefix appears on. 89 begidx : int 90 The index in line that prefix starts on. 91 endidx : int 92 The index in line that prefix ends on. 93 ctx : dict, optional 94 Names in the current execution context. 95 multiline_text : str 96 The complete multiline text. Needed to get completion context. 97 cursor_index : int 98 The current cursor's index in the multiline text. 99 May be ``len(multiline_text)`` for cursor at the end. 100 Needed to get completion context. 101 102 Returns 103 ------- 104 rtn : list of str 105 Possible completions of prefix, sorted alphabetically. 106 lprefix : int 107 Length of the prefix to be replaced in the completion. 108 """ 109 110 if ( 111 (multiline_text is not None) 112 and (cursor_index is not None) 113 and (completion_context is None) 114 ): 115 completion_context: tp.Optional[CompletionContext] = self.parse( 116 multiline_text, 117 cursor_index, 118 ctx, 119 ) 120 121 ctx = ctx or {} 122 return self.complete_from_context( 123 completion_context, 124 (prefix, line, begidx, endidx, ctx), 125 ) 126 127 @staticmethod 128 def _format_completion( 129 completion, 130 completion_context, 131 completing_contextual_command: bool, 132 lprefix: int, 133 custom_lprefix: bool, 134 ) -> tuple[Completion, int]: 135 if ( 136 completing_contextual_command 137 and completion_context.command.is_after_closing_quote 138 ): 139 """ 140 The cursor is appending to a closed string literal, i.e. cursor at the end of ``ls "/usr/"``. 141 1. The closing quote will be appended to all completions. 142 I.e the completion ``/usr/bin`` will turn into ``/usr/bin"`` 143 To prevent this behavior, a completer can return a ``RichCompletion`` with ``append_closing_quote=False``. 144 2. If not specified, lprefix will cover the closing prefix. 145 I.e for ``ls "/usr/"``, the default lprefix will be 6 to include the closing quote. 146 To prevent this behavior, a completer can return a different lprefix or specify it inside ``RichCompletion``. 147 """ 148 closing_quote = completion_context.command.closing_quote 149 if not custom_lprefix: 150 lprefix += len(closing_quote) 151 if closing_quote: 152 if isinstance(completion, RichCompletion): 153 if completion.append_closing_quote: 154 completion = completion.replace( 155 value=completion.value + closing_quote 156 ) 157 else: 158 completion = completion + closing_quote 159 160 completion = list(apply_lprefix([completion], lprefix))[0] 161 162 if ( 163 isinstance(completion, RichCompletion) 164 and completion.append_space 165 and not completion.value.endswith(" ") 166 ): 167 # append spaces AFTER appending closing quote 168 completion = completion.replace(value=completion.value + " ") 169 170 return completion, lprefix 171 172 @staticmethod 173 def generate_completions( 174 completion_context, old_completer_args, trace: bool 175 ) -> tp.Iterator[tuple[Completion, int]]: 176 filter_func = get_filter_function() 177 178 for name, func in XSH.completers.items(): 179 try: 180 if is_contextual_completer(func): 181 if completion_context is None: 182 continue 183 out = func(completion_context) 184 else: 185 if old_completer_args is None: 186 continue 187 out = func(*old_completer_args) 188 except StopIteration: 189 # completer requested to stop collecting completions 190 break 191 except Exception as e: 192 name = func.__name__ if hasattr(func, "__name__") else str(func) 193 print_exception( 194 f"Completer {name} raises exception when gets " 195 f"old_args={old_completer_args[:-1]} / completion_context={completion_context!r}:\n" 196 f"{type(e)} - {e}" 197 ) 198 continue 199 200 completing_contextual_command = ( 201 is_contextual_completer(func) 202 and completion_context is not None 203 and completion_context.command is not None 204 ) 205 206 # -- set comp-defaults -- 207 208 # the default is that the completer function filters out as necessary 209 # we can change that once fuzzy/substring matches are added 210 is_filtered = True 211 custom_lprefix = False 212 prefix = "" 213 if completing_contextual_command: 214 prefix = completion_context.command.prefix 215 elif old_completer_args is not None: 216 prefix = old_completer_args[0] 217 lprefix = len(prefix) 218 219 if isinstance(out, cabc.Sequence): 220 # update comp-defaults from 221 res, lprefix_filtered = out 222 if isinstance(lprefix_filtered, bool): 223 is_filtered = lprefix_filtered 224 else: 225 lprefix = lprefix_filtered 226 custom_lprefix = True 227 else: 228 res = out 229 230 if res is None: 231 continue 232 233 items = [] 234 for comp in res: 235 if (not is_filtered) and (not filter_func(comp, prefix)): 236 continue 237 comp = Completer._format_completion( 238 comp, 239 completion_context, 240 completing_contextual_command, 241 lprefix or 0, 242 custom_lprefix, 243 ) 244 items.append(comp) 245 yield comp 246 247 if not items: # empty completion 248 continue 249 250 if trace: 251 print( 252 f"TRACE COMPLETIONS: Got {len(items)} results" 253 f" from {'' if is_exclusive_completer(func) else 'non-'}exclusive completer '{name}':" 254 ) 255 sys.displayhook(items) 256 257 if is_exclusive_completer(func): 258 # we got completions for an exclusive completer 259 break 260 261 def complete_from_context(self, completion_context, old_completer_args=None): 262 trace = XSH.env.get("XONSH_TRACE_COMPLETIONS") 263 if trace: 264 print("\nTRACE COMPLETIONS: Getting completions with context:") 265 sys.displayhook(completion_context) 266 lprefix = 0 267 268 # using dict to keep order py3.6+ 269 completions = {} 270 271 query_limit = XSH.env.get("COMPLETION_QUERY_LIMIT") 272 273 for comp in self.generate_completions( 274 completion_context, 275 old_completer_args, 276 trace, 277 ): 278 completion, lprefix = comp 279 completions[completion] = None 280 if query_limit and len(completions) >= query_limit: 281 if trace: 282 print( 283 "TRACE COMPLETIONS: Stopped after $COMPLETION_QUERY_LIMIT reached." 284 ) 285 break 286 287 def sortkey(s): 288 # todo: should sort with prefix > substring > fuzzy 289 return s.lstrip(''''"''').lower() 290 291 # the last completer's lprefix is returned. other lprefix values are inside the RichCompletions. 292 return tuple(sorted(completions, key=sortkey)), lprefix 293 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xonsh/completer.py b/xonsh/completer.py --- a/xonsh/completer.py +++ b/xonsh/completer.py @@ -284,9 +284,18 @@ ) break - def sortkey(s): - # todo: should sort with prefix > substring > fuzzy - return s.lstrip(''''"''').lower() + prefix = None + if completion_context: + prefix = completion_context.command.prefix + if prefix.startswith("$"): + prefix = prefix[1:] + + def sortkey(s): + """Sort values by prefix position and then alphabetically.""" + return (s.lower().find(prefix.lower()), s.lower()) + else: + # Fallback sort. + sortkey = lambda s: s.lstrip(''''"''').lower() # the last completer's lprefix is returned. other lprefix values are inside the RichCompletions. return tuple(sorted(completions, key=sortkey)), lprefix diff --git a/xonsh/completers/environment.py b/xonsh/completers/environment.py --- a/xonsh/completers/environment.py +++ b/xonsh/completers/environment.py @@ -2,7 +2,6 @@ from xonsh.completers.tools import ( RichCompletion, contextual_completer, - get_filter_function, non_exclusive_completer, ) from xonsh.parsers.completion_context import CompletionContext @@ -27,15 +26,14 @@ lprefix = len(key) + 1 if context.command is not None and context.command.is_after_closing_quote: lprefix += 1 - filter_func = get_filter_function() env = XSH.env + vars = [k for k, v in env.items() if key.lower() in k.lower()] return ( RichCompletion( "$" + k, - display=f"${k} [{type(v).__name__}]", + display=f"${k} [{type(env[k]).__name__}]", description=env.get_docs(k).doc, ) - for k, v in env.items() - if filter_func(k, key) + for k in vars ), lprefix
{"golden_diff": "diff --git a/xonsh/completer.py b/xonsh/completer.py\n--- a/xonsh/completer.py\n+++ b/xonsh/completer.py\n@@ -284,9 +284,18 @@\n )\n break\n \n- def sortkey(s):\n- # todo: should sort with prefix > substring > fuzzy\n- return s.lstrip(''''\"''').lower()\n+ prefix = None\n+ if completion_context:\n+ prefix = completion_context.command.prefix\n+ if prefix.startswith(\"$\"):\n+ prefix = prefix[1:]\n+\n+ def sortkey(s):\n+ \"\"\"Sort values by prefix position and then alphabetically.\"\"\"\n+ return (s.lower().find(prefix.lower()), s.lower())\n+ else:\n+ # Fallback sort.\n+ sortkey = lambda s: s.lstrip(''''\"''').lower()\n \n # the last completer's lprefix is returned. other lprefix values are inside the RichCompletions.\n return tuple(sorted(completions, key=sortkey)), lprefix\ndiff --git a/xonsh/completers/environment.py b/xonsh/completers/environment.py\n--- a/xonsh/completers/environment.py\n+++ b/xonsh/completers/environment.py\n@@ -2,7 +2,6 @@\n from xonsh.completers.tools import (\n RichCompletion,\n contextual_completer,\n- get_filter_function,\n non_exclusive_completer,\n )\n from xonsh.parsers.completion_context import CompletionContext\n@@ -27,15 +26,14 @@\n lprefix = len(key) + 1\n if context.command is not None and context.command.is_after_closing_quote:\n lprefix += 1\n- filter_func = get_filter_function()\n env = XSH.env\n \n+ vars = [k for k, v in env.items() if key.lower() in k.lower()]\n return (\n RichCompletion(\n \"$\" + k,\n- display=f\"${k} [{type(v).__name__}]\",\n+ display=f\"${k} [{type(env[k]).__name__}]\",\n description=env.get_docs(k).doc,\n )\n- for k, v in env.items()\n- if filter_func(k, key)\n+ for k in vars\n ), lprefix\n", "issue": "Feature: Search substring in env variable completer\n```xsh\r\n$TRA<Tab>\r\n# Show all variables with `*TRA*`\r\n```\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "from xonsh.built_ins import XSH\nfrom xonsh.completers.tools import (\n RichCompletion,\n contextual_completer,\n get_filter_function,\n non_exclusive_completer,\n)\nfrom xonsh.parsers.completion_context import CompletionContext\n\n\n@contextual_completer\n@non_exclusive_completer\ndef complete_environment_vars(context: CompletionContext):\n \"\"\"Completes environment variables.\"\"\"\n if context.command:\n prefix = context.command.prefix\n elif context.python:\n prefix = context.python.prefix\n else:\n return None\n\n dollar_location = prefix.rfind(\"$\")\n if dollar_location == -1:\n return None\n\n key = prefix[dollar_location + 1 :]\n lprefix = len(key) + 1\n if context.command is not None and context.command.is_after_closing_quote:\n lprefix += 1\n filter_func = get_filter_function()\n env = XSH.env\n\n return (\n RichCompletion(\n \"$\" + k,\n display=f\"${k} [{type(v).__name__}]\",\n description=env.get_docs(k).doc,\n )\n for k, v in env.items()\n if filter_func(k, key)\n ), lprefix\n", "path": "xonsh/completers/environment.py"}, {"content": "\"\"\"A (tab-)completer for xonsh.\"\"\"\n\nimport collections.abc as cabc\nimport sys\nimport typing as tp\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import (\n Completion,\n RichCompletion,\n apply_lprefix,\n get_filter_function,\n is_contextual_completer,\n is_exclusive_completer,\n)\nfrom xonsh.parsers.completion_context import CompletionContext, CompletionContextParser\nfrom xonsh.tools import print_exception\n\n\nclass Completer:\n \"\"\"This provides a list of optional completions for the xonsh shell.\"\"\"\n\n def __init__(self):\n self.context_parser = CompletionContextParser()\n\n def parse(\n self, text: str, cursor_index: \"None|int\" = None, ctx=None\n ) -> \"CompletionContext\":\n \"\"\"Parse the given text\n\n Parameters\n ----------\n text\n multi-line text\n cursor_index\n position of the cursor. If not given, then it is considered to be at the end.\n ctx\n Execution context\n \"\"\"\n cursor_index = len(text) if cursor_index is None else cursor_index\n return self.context_parser.parse(text, cursor_index, ctx)\n\n def complete_line(self, text: str):\n \"\"\"Handy wrapper to build command-completion-context when cursor is at the end.\n\n Notes\n -----\n suffix is not supported; text after last space is parsed as prefix.\n \"\"\"\n ctx = self.parse(text)\n cmd_ctx = ctx.command\n if not cmd_ctx:\n raise RuntimeError(\"Only Command context is empty\")\n prefix = cmd_ctx.prefix\n\n line = text\n begidx = text.rfind(prefix)\n endidx = begidx + len(prefix)\n\n return self.complete(\n prefix,\n line,\n begidx,\n endidx,\n cursor_index=len(line),\n multiline_text=line,\n completion_context=ctx,\n )\n\n def complete(\n self,\n prefix,\n line,\n begidx,\n endidx,\n ctx=None,\n multiline_text=None,\n cursor_index=None,\n completion_context=None,\n ):\n \"\"\"Complete the string, given a possible execution context.\n\n Parameters\n ----------\n prefix : str\n The string to match\n line : str\n The line that prefix appears on.\n begidx : int\n The index in line that prefix starts on.\n endidx : int\n The index in line that prefix ends on.\n ctx : dict, optional\n Names in the current execution context.\n multiline_text : str\n The complete multiline text. Needed to get completion context.\n cursor_index : int\n The current cursor's index in the multiline text.\n May be ``len(multiline_text)`` for cursor at the end.\n Needed to get completion context.\n\n Returns\n -------\n rtn : list of str\n Possible completions of prefix, sorted alphabetically.\n lprefix : int\n Length of the prefix to be replaced in the completion.\n \"\"\"\n\n if (\n (multiline_text is not None)\n and (cursor_index is not None)\n and (completion_context is None)\n ):\n completion_context: tp.Optional[CompletionContext] = self.parse(\n multiline_text,\n cursor_index,\n ctx,\n )\n\n ctx = ctx or {}\n return self.complete_from_context(\n completion_context,\n (prefix, line, begidx, endidx, ctx),\n )\n\n @staticmethod\n def _format_completion(\n completion,\n completion_context,\n completing_contextual_command: bool,\n lprefix: int,\n custom_lprefix: bool,\n ) -> tuple[Completion, int]:\n if (\n completing_contextual_command\n and completion_context.command.is_after_closing_quote\n ):\n \"\"\"\n The cursor is appending to a closed string literal, i.e. cursor at the end of ``ls \"/usr/\"``.\n 1. The closing quote will be appended to all completions.\n I.e the completion ``/usr/bin`` will turn into ``/usr/bin\"``\n To prevent this behavior, a completer can return a ``RichCompletion`` with ``append_closing_quote=False``.\n 2. If not specified, lprefix will cover the closing prefix.\n I.e for ``ls \"/usr/\"``, the default lprefix will be 6 to include the closing quote.\n To prevent this behavior, a completer can return a different lprefix or specify it inside ``RichCompletion``.\n \"\"\"\n closing_quote = completion_context.command.closing_quote\n if not custom_lprefix:\n lprefix += len(closing_quote)\n if closing_quote:\n if isinstance(completion, RichCompletion):\n if completion.append_closing_quote:\n completion = completion.replace(\n value=completion.value + closing_quote\n )\n else:\n completion = completion + closing_quote\n\n completion = list(apply_lprefix([completion], lprefix))[0]\n\n if (\n isinstance(completion, RichCompletion)\n and completion.append_space\n and not completion.value.endswith(\" \")\n ):\n # append spaces AFTER appending closing quote\n completion = completion.replace(value=completion.value + \" \")\n\n return completion, lprefix\n\n @staticmethod\n def generate_completions(\n completion_context, old_completer_args, trace: bool\n ) -> tp.Iterator[tuple[Completion, int]]:\n filter_func = get_filter_function()\n\n for name, func in XSH.completers.items():\n try:\n if is_contextual_completer(func):\n if completion_context is None:\n continue\n out = func(completion_context)\n else:\n if old_completer_args is None:\n continue\n out = func(*old_completer_args)\n except StopIteration:\n # completer requested to stop collecting completions\n break\n except Exception as e:\n name = func.__name__ if hasattr(func, \"__name__\") else str(func)\n print_exception(\n f\"Completer {name} raises exception when gets \"\n f\"old_args={old_completer_args[:-1]} / completion_context={completion_context!r}:\\n\"\n f\"{type(e)} - {e}\"\n )\n continue\n\n completing_contextual_command = (\n is_contextual_completer(func)\n and completion_context is not None\n and completion_context.command is not None\n )\n\n # -- set comp-defaults --\n\n # the default is that the completer function filters out as necessary\n # we can change that once fuzzy/substring matches are added\n is_filtered = True\n custom_lprefix = False\n prefix = \"\"\n if completing_contextual_command:\n prefix = completion_context.command.prefix\n elif old_completer_args is not None:\n prefix = old_completer_args[0]\n lprefix = len(prefix)\n\n if isinstance(out, cabc.Sequence):\n # update comp-defaults from\n res, lprefix_filtered = out\n if isinstance(lprefix_filtered, bool):\n is_filtered = lprefix_filtered\n else:\n lprefix = lprefix_filtered\n custom_lprefix = True\n else:\n res = out\n\n if res is None:\n continue\n\n items = []\n for comp in res:\n if (not is_filtered) and (not filter_func(comp, prefix)):\n continue\n comp = Completer._format_completion(\n comp,\n completion_context,\n completing_contextual_command,\n lprefix or 0,\n custom_lprefix,\n )\n items.append(comp)\n yield comp\n\n if not items: # empty completion\n continue\n\n if trace:\n print(\n f\"TRACE COMPLETIONS: Got {len(items)} results\"\n f\" from {'' if is_exclusive_completer(func) else 'non-'}exclusive completer '{name}':\"\n )\n sys.displayhook(items)\n\n if is_exclusive_completer(func):\n # we got completions for an exclusive completer\n break\n\n def complete_from_context(self, completion_context, old_completer_args=None):\n trace = XSH.env.get(\"XONSH_TRACE_COMPLETIONS\")\n if trace:\n print(\"\\nTRACE COMPLETIONS: Getting completions with context:\")\n sys.displayhook(completion_context)\n lprefix = 0\n\n # using dict to keep order py3.6+\n completions = {}\n\n query_limit = XSH.env.get(\"COMPLETION_QUERY_LIMIT\")\n\n for comp in self.generate_completions(\n completion_context,\n old_completer_args,\n trace,\n ):\n completion, lprefix = comp\n completions[completion] = None\n if query_limit and len(completions) >= query_limit:\n if trace:\n print(\n \"TRACE COMPLETIONS: Stopped after $COMPLETION_QUERY_LIMIT reached.\"\n )\n break\n\n def sortkey(s):\n # todo: should sort with prefix > substring > fuzzy\n return s.lstrip(''''\"''').lower()\n\n # the last completer's lprefix is returned. other lprefix values are inside the RichCompletions.\n return tuple(sorted(completions, key=sortkey)), lprefix\n", "path": "xonsh/completer.py"}], "after_files": [{"content": "from xonsh.built_ins import XSH\nfrom xonsh.completers.tools import (\n RichCompletion,\n contextual_completer,\n non_exclusive_completer,\n)\nfrom xonsh.parsers.completion_context import CompletionContext\n\n\n@contextual_completer\n@non_exclusive_completer\ndef complete_environment_vars(context: CompletionContext):\n \"\"\"Completes environment variables.\"\"\"\n if context.command:\n prefix = context.command.prefix\n elif context.python:\n prefix = context.python.prefix\n else:\n return None\n\n dollar_location = prefix.rfind(\"$\")\n if dollar_location == -1:\n return None\n\n key = prefix[dollar_location + 1 :]\n lprefix = len(key) + 1\n if context.command is not None and context.command.is_after_closing_quote:\n lprefix += 1\n env = XSH.env\n\n vars = [k for k, v in env.items() if key.lower() in k.lower()]\n return (\n RichCompletion(\n \"$\" + k,\n display=f\"${k} [{type(env[k]).__name__}]\",\n description=env.get_docs(k).doc,\n )\n for k in vars\n ), lprefix\n", "path": "xonsh/completers/environment.py"}, {"content": "\"\"\"A (tab-)completer for xonsh.\"\"\"\n\nimport collections.abc as cabc\nimport sys\nimport typing as tp\n\nfrom xonsh.built_ins import XSH\nfrom xonsh.completers.tools import (\n Completion,\n RichCompletion,\n apply_lprefix,\n get_filter_function,\n is_contextual_completer,\n is_exclusive_completer,\n)\nfrom xonsh.parsers.completion_context import CompletionContext, CompletionContextParser\nfrom xonsh.tools import print_exception\n\n\nclass Completer:\n \"\"\"This provides a list of optional completions for the xonsh shell.\"\"\"\n\n def __init__(self):\n self.context_parser = CompletionContextParser()\n\n def parse(\n self, text: str, cursor_index: \"None|int\" = None, ctx=None\n ) -> \"CompletionContext\":\n \"\"\"Parse the given text\n\n Parameters\n ----------\n text\n multi-line text\n cursor_index\n position of the cursor. If not given, then it is considered to be at the end.\n ctx\n Execution context\n \"\"\"\n cursor_index = len(text) if cursor_index is None else cursor_index\n return self.context_parser.parse(text, cursor_index, ctx)\n\n def complete_line(self, text: str):\n \"\"\"Handy wrapper to build command-completion-context when cursor is at the end.\n\n Notes\n -----\n suffix is not supported; text after last space is parsed as prefix.\n \"\"\"\n ctx = self.parse(text)\n cmd_ctx = ctx.command\n if not cmd_ctx:\n raise RuntimeError(\"Only Command context is empty\")\n prefix = cmd_ctx.prefix\n\n line = text\n begidx = text.rfind(prefix)\n endidx = begidx + len(prefix)\n\n return self.complete(\n prefix,\n line,\n begidx,\n endidx,\n cursor_index=len(line),\n multiline_text=line,\n completion_context=ctx,\n )\n\n def complete(\n self,\n prefix,\n line,\n begidx,\n endidx,\n ctx=None,\n multiline_text=None,\n cursor_index=None,\n completion_context=None,\n ):\n \"\"\"Complete the string, given a possible execution context.\n\n Parameters\n ----------\n prefix : str\n The string to match\n line : str\n The line that prefix appears on.\n begidx : int\n The index in line that prefix starts on.\n endidx : int\n The index in line that prefix ends on.\n ctx : dict, optional\n Names in the current execution context.\n multiline_text : str\n The complete multiline text. Needed to get completion context.\n cursor_index : int\n The current cursor's index in the multiline text.\n May be ``len(multiline_text)`` for cursor at the end.\n Needed to get completion context.\n\n Returns\n -------\n rtn : list of str\n Possible completions of prefix, sorted alphabetically.\n lprefix : int\n Length of the prefix to be replaced in the completion.\n \"\"\"\n\n if (\n (multiline_text is not None)\n and (cursor_index is not None)\n and (completion_context is None)\n ):\n completion_context: tp.Optional[CompletionContext] = self.parse(\n multiline_text,\n cursor_index,\n ctx,\n )\n\n ctx = ctx or {}\n return self.complete_from_context(\n completion_context,\n (prefix, line, begidx, endidx, ctx),\n )\n\n @staticmethod\n def _format_completion(\n completion,\n completion_context,\n completing_contextual_command: bool,\n lprefix: int,\n custom_lprefix: bool,\n ) -> tuple[Completion, int]:\n if (\n completing_contextual_command\n and completion_context.command.is_after_closing_quote\n ):\n \"\"\"\n The cursor is appending to a closed string literal, i.e. cursor at the end of ``ls \"/usr/\"``.\n 1. The closing quote will be appended to all completions.\n I.e the completion ``/usr/bin`` will turn into ``/usr/bin\"``\n To prevent this behavior, a completer can return a ``RichCompletion`` with ``append_closing_quote=False``.\n 2. If not specified, lprefix will cover the closing prefix.\n I.e for ``ls \"/usr/\"``, the default lprefix will be 6 to include the closing quote.\n To prevent this behavior, a completer can return a different lprefix or specify it inside ``RichCompletion``.\n \"\"\"\n closing_quote = completion_context.command.closing_quote\n if not custom_lprefix:\n lprefix += len(closing_quote)\n if closing_quote:\n if isinstance(completion, RichCompletion):\n if completion.append_closing_quote:\n completion = completion.replace(\n value=completion.value + closing_quote\n )\n else:\n completion = completion + closing_quote\n\n completion = list(apply_lprefix([completion], lprefix))[0]\n\n if (\n isinstance(completion, RichCompletion)\n and completion.append_space\n and not completion.value.endswith(\" \")\n ):\n # append spaces AFTER appending closing quote\n completion = completion.replace(value=completion.value + \" \")\n\n return completion, lprefix\n\n @staticmethod\n def generate_completions(\n completion_context, old_completer_args, trace: bool\n ) -> tp.Iterator[tuple[Completion, int]]:\n filter_func = get_filter_function()\n\n for name, func in XSH.completers.items():\n try:\n if is_contextual_completer(func):\n if completion_context is None:\n continue\n out = func(completion_context)\n else:\n if old_completer_args is None:\n continue\n out = func(*old_completer_args)\n except StopIteration:\n # completer requested to stop collecting completions\n break\n except Exception as e:\n name = func.__name__ if hasattr(func, \"__name__\") else str(func)\n print_exception(\n f\"Completer {name} raises exception when gets \"\n f\"old_args={old_completer_args[:-1]} / completion_context={completion_context!r}:\\n\"\n f\"{type(e)} - {e}\"\n )\n continue\n\n completing_contextual_command = (\n is_contextual_completer(func)\n and completion_context is not None\n and completion_context.command is not None\n )\n\n # -- set comp-defaults --\n\n # the default is that the completer function filters out as necessary\n # we can change that once fuzzy/substring matches are added\n is_filtered = True\n custom_lprefix = False\n prefix = \"\"\n if completing_contextual_command:\n prefix = completion_context.command.prefix\n elif old_completer_args is not None:\n prefix = old_completer_args[0]\n lprefix = len(prefix)\n\n if isinstance(out, cabc.Sequence):\n # update comp-defaults from\n res, lprefix_filtered = out\n if isinstance(lprefix_filtered, bool):\n is_filtered = lprefix_filtered\n else:\n lprefix = lprefix_filtered\n custom_lprefix = True\n else:\n res = out\n\n if res is None:\n continue\n\n items = []\n for comp in res:\n if (not is_filtered) and (not filter_func(comp, prefix)):\n continue\n comp = Completer._format_completion(\n comp,\n completion_context,\n completing_contextual_command,\n lprefix or 0,\n custom_lprefix,\n )\n items.append(comp)\n yield comp\n\n if not items: # empty completion\n continue\n\n if trace:\n print(\n f\"TRACE COMPLETIONS: Got {len(items)} results\"\n f\" from {'' if is_exclusive_completer(func) else 'non-'}exclusive completer '{name}':\"\n )\n sys.displayhook(items)\n\n if is_exclusive_completer(func):\n # we got completions for an exclusive completer\n break\n\n def complete_from_context(self, completion_context, old_completer_args=None):\n trace = XSH.env.get(\"XONSH_TRACE_COMPLETIONS\")\n if trace:\n print(\"\\nTRACE COMPLETIONS: Getting completions with context:\")\n sys.displayhook(completion_context)\n lprefix = 0\n\n # using dict to keep order py3.6+\n completions = {}\n\n query_limit = XSH.env.get(\"COMPLETION_QUERY_LIMIT\")\n\n for comp in self.generate_completions(\n completion_context,\n old_completer_args,\n trace,\n ):\n completion, lprefix = comp\n completions[completion] = None\n if query_limit and len(completions) >= query_limit:\n if trace:\n print(\n \"TRACE COMPLETIONS: Stopped after $COMPLETION_QUERY_LIMIT reached.\"\n )\n break\n\n prefix = None\n if completion_context:\n prefix = completion_context.command.prefix\n if prefix.startswith(\"$\"):\n prefix = prefix[1:]\n\n def sortkey(s):\n \"\"\"Sort values by prefix position and then alphabetically.\"\"\"\n return (s.lower().find(prefix.lower()), s.lower())\n else:\n # Fallback sort.\n sortkey = lambda s: s.lstrip(''''\"''').lower()\n\n # the last completer's lprefix is returned. other lprefix values are inside the RichCompletions.\n return tuple(sorted(completions, key=sortkey)), lprefix\n", "path": "xonsh/completer.py"}]}
3,443
508
gh_patches_debug_7473
rasdani/github-patches
git_diff
praw-dev__praw-1327
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PRAW installed by pip is missing the `images` directory and its contents ## Issue Description PRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code: https://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514 That image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided. It isn't just the wheel on PyPI that is missing the image. The source tarball is as well. I suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier. Adding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package. [This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """praw setup.py""" 2 3 import re 4 from codecs import open 5 from os import path 6 from setuptools import find_packages, setup 7 8 9 PACKAGE_NAME = "praw" 10 HERE = path.abspath(path.dirname(__file__)) 11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp: 12 README = fp.read() 13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp: 14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1) 15 16 extras = { 17 "ci": ["coveralls"], 18 "dev": ["pre-commit"], 19 "lint": ["black", "flake8", "pydocstyle", "sphinx", "sphinx_rtd_theme"], 20 "test": [ 21 "betamax >=0.8, <0.9", 22 "betamax-matchers >=0.3.0, <0.5", 23 "betamax-serializers >=0.2, <0.3", 24 "mock >=0.8", 25 "pytest >=2.7.3", 26 ], 27 } 28 extras["dev"] += extras["lint"] + extras["test"] 29 30 setup( 31 name=PACKAGE_NAME, 32 author="Bryce Boe", 33 author_email="[email protected]", 34 python_requires=">=3.5", 35 classifiers=[ 36 "Development Status :: 5 - Production/Stable", 37 "Environment :: Console", 38 "Intended Audience :: Developers", 39 "License :: OSI Approved :: BSD License", 40 "Natural Language :: English", 41 "Operating System :: OS Independent", 42 "Programming Language :: Python", 43 "Programming Language :: Python :: 3", 44 "Programming Language :: Python :: 3.5", 45 "Programming Language :: Python :: 3.6", 46 "Programming Language :: Python :: 3.7", 47 "Programming Language :: Python :: 3.8", 48 "Topic :: Utilities", 49 ], 50 description=( 51 "PRAW, an acronym for `Python Reddit API Wrapper`, is a " 52 "python package that allows for simple access to " 53 "reddit's API." 54 ), 55 extras_require=extras, 56 install_requires=[ 57 "prawcore >=1.0.1, <2.0", 58 "update_checker >=0.16", 59 "websocket-client >=0.54.0", 60 ], 61 keywords="reddit api wrapper", 62 license="Simplified BSD License", 63 long_description=README, 64 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]}, 65 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]), 66 url="https://praw.readthedocs.org/", 67 version=VERSION, 68 ) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,7 +61,10 @@ keywords="reddit api wrapper", license="Simplified BSD License", long_description=README, - package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]}, + package_data={ + "": ["LICENSE.txt"], + PACKAGE_NAME: ["*.ini", "images/*.jpg"], + }, packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]), url="https://praw.readthedocs.org/", version=VERSION,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,10 @@\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n- package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n+ package_data={\n+ \"\": [\"LICENSE.txt\"],\n+ PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n+ },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n", "issue": "PRAW installed by pip is missing the `images` directory and its contents\n## Issue Description\r\n\r\nPRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code:\r\n\r\nhttps://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514\r\n\r\nThat image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided.\r\n\r\nIt isn't just the wheel on PyPI that is missing the image. The source tarball is as well.\r\n\r\nI suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier.\r\n\r\nAdding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package.\r\n\r\n[This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)).\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\"black\", \"flake8\", \"pydocstyle\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"betamax-serializers >=0.2, <0.3\",\n \"mock >=0.8\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.0.1, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\"black\", \"flake8\", \"pydocstyle\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"betamax-serializers >=0.2, <0.3\",\n \"mock >=0.8\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.0.1, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\n \"\": [\"LICENSE.txt\"],\n PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}]}
1,420
139
gh_patches_debug_28202
rasdani/github-patches
git_diff
open-mmlab__mmpose-493
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Seed in sampler https://github.com/open-mmlab/mmdetection/pull/4665 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmpose/datasets/samplers/distributed_sampler.py` Content: ``` 1 import torch 2 from torch.utils.data import DistributedSampler as _DistributedSampler 3 4 5 class DistributedSampler(_DistributedSampler): 6 """DistributedSampler inheriting from 7 `torch.utils.data.DistributedSampler`. 8 9 In pytorch of lower versions, there is no `shuffle` argument. This child 10 class will port one to DistributedSampler. 11 """ 12 13 def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): 14 super().__init__(dataset, num_replicas=num_replicas, rank=rank) 15 self.shuffle = shuffle 16 17 def __iter__(self): 18 """Deterministically shuffle based on epoch.""" 19 if self.shuffle: 20 g = torch.Generator() 21 g.manual_seed(self.epoch) 22 indices = torch.randperm(len(self.dataset), generator=g).tolist() 23 else: 24 indices = torch.arange(len(self.dataset)).tolist() 25 26 # add extra samples to make it evenly divisible 27 indices += indices[:(self.total_size - len(indices))] 28 assert len(indices) == self.total_size 29 30 # subsample 31 indices = indices[self.rank:self.total_size:self.num_replicas] 32 assert len(indices) == self.num_samples 33 return iter(indices) 34 ``` Path: `mmpose/datasets/builder.py` Content: ``` 1 import platform 2 import random 3 from functools import partial 4 5 import numpy as np 6 from mmcv.parallel import collate 7 from mmcv.runner import get_dist_info 8 from mmcv.utils import build_from_cfg 9 from mmcv.utils.parrots_wrapper import _get_dataloader 10 11 from .dataset_wrappers import RepeatDataset 12 from .registry import DATASETS 13 from .samplers import DistributedSampler 14 15 if platform.system() != 'Windows': 16 # https://github.com/pytorch/pytorch/issues/973 17 import resource 18 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 19 hard_limit = rlimit[1] 20 soft_limit = min(4096, hard_limit) 21 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) 22 23 24 def build_dataset(cfg, default_args=None): 25 """Build a dataset from config dict. 26 27 Args: 28 cfg (dict): Config dict. It should at least contain the key "type". 29 default_args (dict, optional): Default initialization arguments. 30 Default: None. 31 32 Returns: 33 Dataset: The constructed dataset. 34 """ 35 if cfg['type'] == 'RepeatDataset': 36 dataset = RepeatDataset( 37 build_dataset(cfg['dataset'], default_args), cfg['times']) 38 else: 39 dataset = build_from_cfg(cfg, DATASETS, default_args) 40 return dataset 41 42 43 def build_dataloader(dataset, 44 samples_per_gpu, 45 workers_per_gpu, 46 num_gpus=1, 47 dist=True, 48 shuffle=True, 49 seed=None, 50 drop_last=True, 51 pin_memory=True, 52 **kwargs): 53 """Build PyTorch DataLoader. 54 55 In distributed training, each GPU/process has a dataloader. 56 In non-distributed training, there is only one dataloader for all GPUs. 57 58 Args: 59 dataset (Dataset): A PyTorch dataset. 60 samples_per_gpu (int): Number of training samples on each GPU, i.e., 61 batch size of each GPU. 62 workers_per_gpu (int): How many subprocesses to use for data loading 63 for each GPU. 64 num_gpus (int): Number of GPUs. Only used in non-distributed training. 65 dist (bool): Distributed training/test or not. Default: True. 66 shuffle (bool): Whether to shuffle the data at every epoch. 67 Default: True. 68 drop_last (bool): Whether to drop the last incomplete batch in epoch. 69 Default: True 70 pin_memory (bool): Whether to use pin_memory in DataLoader. 71 Default: True 72 kwargs: any keyword argument to be used to initialize DataLoader 73 74 Returns: 75 DataLoader: A PyTorch dataloader. 76 """ 77 rank, world_size = get_dist_info() 78 if dist: 79 sampler = DistributedSampler( 80 dataset, world_size, rank, shuffle=shuffle) 81 shuffle = False 82 batch_size = samples_per_gpu 83 num_workers = workers_per_gpu 84 else: 85 sampler = None 86 batch_size = num_gpus * samples_per_gpu 87 num_workers = num_gpus * workers_per_gpu 88 89 init_fn = partial( 90 worker_init_fn, num_workers=num_workers, rank=rank, 91 seed=seed) if seed is not None else None 92 93 _, DataLoader = _get_dataloader() 94 data_loader = DataLoader( 95 dataset, 96 batch_size=batch_size, 97 sampler=sampler, 98 num_workers=num_workers, 99 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), 100 pin_memory=pin_memory, 101 shuffle=shuffle, 102 worker_init_fn=init_fn, 103 drop_last=drop_last, 104 **kwargs) 105 106 return data_loader 107 108 109 def worker_init_fn(worker_id, num_workers, rank, seed): 110 """Init the random seed for various workers.""" 111 # The seed of each worker equals to 112 # num_worker * rank + worker_id + user_seed 113 worker_seed = num_workers * rank + worker_id + seed 114 np.random.seed(worker_seed) 115 random.seed(worker_seed) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py --- a/mmpose/datasets/builder.py +++ b/mmpose/datasets/builder.py @@ -77,7 +77,7 @@ rank, world_size = get_dist_info() if dist: sampler = DistributedSampler( - dataset, world_size, rank, shuffle=shuffle) + dataset, world_size, rank, shuffle=shuffle, seed=seed) shuffle = False batch_size = samples_per_gpu num_workers = workers_per_gpu diff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py --- a/mmpose/datasets/samplers/distributed_sampler.py +++ b/mmpose/datasets/samplers/distributed_sampler.py @@ -10,15 +10,22 @@ class will port one to DistributedSampler. """ - def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank) - self.shuffle = shuffle + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + seed=0): + super().__init__( + dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + # for the compatibility from PyTorch 1.3+ + self.seed = seed if seed is not None else 0 def __iter__(self): """Deterministically shuffle based on epoch.""" if self.shuffle: g = torch.Generator() - g.manual_seed(self.epoch) + g.manual_seed(self.epoch + self.seed) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist()
{"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -77,7 +77,7 @@\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n- dataset, world_size, rank, shuffle=shuffle)\n+ dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\ndiff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py\n--- a/mmpose/datasets/samplers/distributed_sampler.py\n+++ b/mmpose/datasets/samplers/distributed_sampler.py\n@@ -10,15 +10,22 @@\n class will port one to DistributedSampler.\n \"\"\"\n \n- def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n- super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n- self.shuffle = shuffle\n+ def __init__(self,\n+ dataset,\n+ num_replicas=None,\n+ rank=None,\n+ shuffle=True,\n+ seed=0):\n+ super().__init__(\n+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n+ # for the compatibility from PyTorch 1.3+\n+ self.seed = seed if seed is not None else 0\n \n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n- g.manual_seed(self.epoch)\n+ g.manual_seed(self.epoch + self.seed)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n", "issue": "Seed in sampler\nhttps://github.com/open-mmlab/mmdetection/pull/4665\n", "before_files": [{"content": "import torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\n\nclass DistributedSampler(_DistributedSampler):\n \"\"\"DistributedSampler inheriting from\n `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n", "path": "mmpose/datasets/samplers/distributed_sampler.py"}, {"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .dataset_wrappers import RepeatDataset\nfrom .registry import DATASETS\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}], "after_files": [{"content": "import torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\n\nclass DistributedSampler(_DistributedSampler):\n \"\"\"DistributedSampler inheriting from\n `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n \"\"\"\n\n def __init__(self,\n dataset,\n num_replicas=None,\n rank=None,\n shuffle=True,\n seed=0):\n super().__init__(\n dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n # for the compatibility from PyTorch 1.3+\n self.seed = seed if seed is not None else 0\n\n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch + self.seed)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n", "path": "mmpose/datasets/samplers/distributed_sampler.py"}, {"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .dataset_wrappers import RepeatDataset\nfrom .registry import DATASETS\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]}
1,715
426
gh_patches_debug_11774
rasdani/github-patches
git_diff
fossasia__open-event-server-6770
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Task send_event_fee_notification failing ``` raised unexpected: AttributeError("'Ticket' object has no attribute 'ticket_id'") Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/celery/app/trace.py", line 385, in trace_task R = retval = fun(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/flask_celeryext/app.py", line 101, in __call__ res = Task.__call__(self, *args, **kwargs) File "/usr/local/lib/python3.7/site-packages/celery/app/trace.py", line 650, in __protected_call__ return self.run(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 161, in _inner reraise(*exc_info) File "/usr/local/lib/python3.7/site-packages/sentry_sdk/_compat.py", line 57, in reraise raise value File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py", line 156, in _inner return f(*args, **kwargs) File "/data/app/app/api/helpers/scheduled_jobs.py", line 90, in send_event_fee_notification ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id') AttributeError: 'Ticket' object has no attribute 'ticket_id' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/helpers/scheduled_jobs.py` Content: ``` 1 import datetime 2 3 import pytz 4 from dateutil.relativedelta import relativedelta 5 from flask import render_template 6 from flask_celeryext import RequestContextTask 7 from app.instance import celery 8 9 from app.api.helpers.db import safe_query, save_to_db 10 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \ 11 send_followup_email_for_monthly_fee_payment 12 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \ 13 send_notif_after_event 14 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name 15 from app.api.helpers.utilities import monthdelta 16 from app.api.helpers.files import create_save_pdf 17 from app.api.helpers.storage import UPLOAD_PATHS 18 from app.models import db 19 from app.models.event import Event 20 from app.models.event_invoice import EventInvoice 21 from app.models.order import Order 22 from app.models.speaker import Speaker 23 from app.models.session import Session 24 from app.models.ticket import Ticket 25 from app.models.ticket_fee import TicketFees, get_fee 26 from app.models.ticket_holder import TicketHolder 27 28 from app.settings import get_settings 29 30 31 @celery.task(base=RequestContextTask, name='send.after.event.mail') 32 def send_after_event_mail(): 33 from app.instance import current_app as app 34 with app.app_context(): 35 events = Event.query.filter_by(state='published', deleted_at=None).all() 36 for event in events: 37 organizers = get_user_event_roles_by_role_name(event.id, 'organizer') 38 speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all() 39 owner = get_user_event_roles_by_role_name(event.id, 'owner').first() 40 current_time = datetime.datetime.now(pytz.timezone(event.timezone)) 41 time_difference = current_time - event.ends_at 42 time_difference_minutes = (time_difference.days * 24 * 60) + \ 43 (time_difference.seconds / 60) 44 frontend_url = get_settings()['frontend_url'] 45 if current_time > event.ends_at and time_difference_minutes < 1440: 46 for speaker in speakers: 47 if not speaker.is_email_overridden: 48 send_email_after_event(speaker.user.email, event.name, frontend_url) 49 send_notif_after_event(speaker.user, event.name) 50 for organizer in organizers: 51 send_email_after_event(organizer.user.email, event.name, frontend_url) 52 send_notif_after_event(organizer.user, event.name) 53 if owner: 54 send_email_after_event(owner.user.email, event.name, frontend_url) 55 send_notif_after_event(owner.user, event.name) 56 57 58 @celery.task(base=RequestContextTask, name='change.session.state.on.event.completion') 59 def change_session_state_on_event_completion(): 60 from app.instance import current_app as app 61 with app.app_context(): 62 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\ 63 .filter(Event.ends_at < datetime.datetime.now()) 64 for session in sessions_to_be_changed: 65 session.state = 'rejected' 66 save_to_db(session, 'Changed {} session state to rejected'.format(session.title)) 67 68 69 @celery.task(base=RequestContextTask, name='send.event.fee.notification') 70 def send_event_fee_notification(): 71 from app.instance import current_app as app 72 with app.app_context(): 73 events = Event.query.filter_by(deleted_at=None, state='published').all() 74 for event in events: 75 latest_invoice = EventInvoice.query.filter_by( 76 event_id=event.id).order_by(EventInvoice.created_at.desc()).first() 77 78 if latest_invoice: 79 orders = Order.query \ 80 .filter_by(event_id=event.id) \ 81 .filter_by(status='completed') \ 82 .filter(Order.completed_at > latest_invoice.created_at).all() 83 else: 84 orders = Order.query.filter_by( 85 event_id=event.id).filter_by(status='completed').all() 86 87 fee_total = 0 88 for order in orders: 89 for order_ticket in order.tickets: 90 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id') 91 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0: 92 fee = ticket.price * (get_fee(event.payment_country, order.event.payment_currency) / 100.0) 93 fee_total += fee 94 95 if fee_total > 0: 96 owner = get_user_event_roles_by_role_name(event.id, 'owner').first() 97 new_invoice = EventInvoice( 98 amount=fee_total, event_id=event.id, user_id=owner.user.id) 99 100 if event.discount_code_id and event.discount_code: 101 r = relativedelta(datetime.datetime.utcnow(), event.created_at) 102 if r <= event.discount_code.valid_till: 103 new_invoice.amount = fee_total - \ 104 (fee_total * (event.discount_code.value / 100.0)) 105 new_invoice.discount_code_id = event.discount_code_id 106 107 save_to_db(new_invoice) 108 prev_month = monthdelta(new_invoice.created_at, 1).strftime( 109 "%b %Y") # Displayed as Aug 2016 110 app_name = get_settings()['app_name'] 111 frontend_url = get_settings()['frontend_url'] 112 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier) 113 send_email_for_monthly_fee_payment(new_invoice.user.email, 114 event.name, 115 prev_month, 116 new_invoice.amount, 117 app_name, 118 link) 119 send_notif_monthly_fee_payment(new_invoice.user, 120 event.name, 121 prev_month, 122 new_invoice.amount, 123 app_name, 124 link, 125 new_invoice.event_id) 126 127 128 @celery.task(base=RequestContextTask, name='send.event.fee.notification.followup') 129 def send_event_fee_notification_followup(): 130 from app.instance import current_app as app 131 with app.app_context(): 132 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all() 133 for incomplete_invoice in incomplete_invoices: 134 if incomplete_invoice.amount > 0: 135 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime( 136 "%b %Y") # Displayed as Aug 2016 137 app_name = get_settings()['app_name'] 138 frontend_url = get_settings()['frontend_url'] 139 link = '{}/event-invoice/{}/review'.format(frontend_url, 140 incomplete_invoice.identifier) 141 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email, 142 incomplete_invoice.event.name, 143 prev_month, 144 incomplete_invoice.amount, 145 app_name, 146 link) 147 send_followup_notif_monthly_fee_payment(incomplete_invoice.user, 148 incomplete_invoice.event.name, 149 prev_month, 150 incomplete_invoice.amount, 151 app_name, 152 link, 153 incomplete_invoice.event.id) 154 155 156 @celery.task(base=RequestContextTask, name='expire.pending.tickets') 157 def expire_pending_tickets(): 158 from app.instance import current_app as app 159 with app.app_context(): 160 db.session.query(Order).filter(Order.status == 'pending', 161 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\ 162 update({'status': 'expired'}) 163 db.session.commit() 164 165 166 @celery.task(base=RequestContextTask, name='delete.ticket.holders.no.order.id') 167 def delete_ticket_holders_no_order_id(): 168 from app.instance import current_app as app 169 with app.app_context(): 170 order_expiry_time = get_settings()['order_expiry_time'] 171 TicketHolder.query.filter(TicketHolder.order_id == None, TicketHolder.deleted_at.is_(None), 172 TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time) 173 < datetime.datetime.utcnow()).delete(synchronize_session=False) 174 db.session.commit() 175 176 177 @celery.task(base=RequestContextTask, name='event.invoices.mark.due') 178 def event_invoices_mark_due(): 179 from app.instance import current_app as app 180 with app.app_context(): 181 db.session.query(EventInvoice).filter( 182 EventInvoice.status == 'upcoming', 183 Event.id == EventInvoice.event_id, 184 Event.ends_at >= datetime.datetime.now(), 185 (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now()) 186 ).update({EventInvoice.status: 'due'}, synchronize_session=False) 187 188 189 @celery.task(base=RequestContextTask, name='send.monthly.event.invoice') 190 def send_monthly_event_invoice(): 191 from app.instance import current_app as app 192 with app.app_context(): 193 events = Event.query.filter_by(deleted_at=None, state='published').all() 194 for event in events: 195 # calculate net & gross revenues 196 user = event.owner 197 admin_info = get_settings() 198 currency = event.payment_currency 199 ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one() 200 ticket_fee_percentage = ticket_fee_object.service_fee 201 ticket_fee_maximum = ticket_fee_object.maximum_fee 202 orders = Order.query.filter_by(event=event).all() 203 gross_revenue = event.calc_monthly_revenue() 204 ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100) 205 if ticket_fees > ticket_fee_maximum: 206 ticket_fees = ticket_fee_maximum 207 net_revenue = gross_revenue - ticket_fees 208 payment_details = { 209 'tickets_sold': event.tickets_sold, 210 'gross_revenue': gross_revenue, 211 'net_revenue': net_revenue, 212 'amount_payable': ticket_fees 213 } 214 # save invoice as pdf 215 pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user, 216 admin_info=admin_info, currency=currency, event=event, 217 ticket_fee_object=ticket_fee_object, payment_details=payment_details, 218 net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'], 219 dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier) 220 # save event_invoice info to DB 221 222 event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id) 223 save_to_db(event_invoice) 224 225 226 @celery.on_after_configure.connect 227 def setup_scheduled_task(sender, **kwargs): 228 from celery.schedules import crontab 229 sender.add_periodic_task(crontab(hour='*/5', minute=30), send_after_event_mail) 230 sender.add_periodic_task(crontab(day_of_week='0-6'), send_event_fee_notification) 231 sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup) 232 sender.add_periodic_task(crontab(hour='*/5', minute=30), change_session_state_on_event_completion) 233 sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets) 234 sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice) 235 sender.add_periodic_task(crontab(minute=0, hour='*/5'), event_invoices_mark_due) 236 sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id) 237 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py --- a/app/api/helpers/scheduled_jobs.py +++ b/app/api/helpers/scheduled_jobs.py @@ -86,8 +86,7 @@ fee_total = 0 for order in orders: - for order_ticket in order.tickets: - ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id') + for ticket in order.tickets: if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0: fee = ticket.price * (get_fee(event.payment_country, order.event.payment_currency) / 100.0) fee_total += fee
{"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -86,8 +86,7 @@\n \n fee_total = 0\n for order in orders:\n- for order_ticket in order.tickets:\n- ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n+ for ticket in order.tickets:\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(event.payment_country, order.event.payment_currency) / 100.0)\n fee_total += fee\n", "issue": "Task send_event_fee_notification failing\n```\r\nraised unexpected: AttributeError(\"'Ticket' object has no attribute 'ticket_id'\")\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/trace.py\", line 385, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/flask_celeryext/app.py\", line 101, in __call__\r\n res = Task.__call__(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/trace.py\", line 650, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 161, in _inner\r\n reraise(*exc_info)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/_compat.py\", line 57, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/celery.py\", line 156, in _inner\r\n return f(*args, **kwargs)\r\n File \"/data/app/app/api/helpers/scheduled_jobs.py\", line 90, in send_event_fee_notification\r\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\r\nAttributeError: 'Ticket' object has no attribute 'ticket_id'\r\n\r\n```\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom flask import render_template\nfrom flask_celeryext import RequestContextTask\nfrom app.instance import celery\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees, get_fee\nfrom app.models.ticket_holder import TicketHolder\n\nfrom app.settings import get_settings\n\n\[email protected](base=RequestContextTask, name='send.after.event.mail')\ndef send_after_event_mail():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n frontend_url = get_settings()['frontend_url']\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(speaker.user.email, event.name, frontend_url)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, frontend_url)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, frontend_url)\n send_notif_after_event(owner.user, event.name)\n\n\[email protected](base=RequestContextTask, name='change.session.state.on.event.completion')\ndef change_session_state_on_event_completion():\n from app.instance import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification')\ndef send_event_fee_notification():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(event.payment_country, order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification.followup')\ndef send_event_fee_notification_followup():\n from app.instance import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\[email protected](base=RequestContextTask, name='expire.pending.tickets')\ndef expire_pending_tickets():\n from app.instance import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='delete.ticket.holders.no.order.id')\ndef delete_ticket_holders_no_order_id():\n from app.instance import current_app as app\n with app.app_context():\n order_expiry_time = get_settings()['order_expiry_time']\n TicketHolder.query.filter(TicketHolder.order_id == None, TicketHolder.deleted_at.is_(None),\n TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time)\n < datetime.datetime.utcnow()).delete(synchronize_session=False)\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='event.invoices.mark.due')\ndef event_invoices_mark_due():\n from app.instance import current_app as app\n with app.app_context():\n db.session.query(EventInvoice).filter(\n EventInvoice.status == 'upcoming',\n Event.id == EventInvoice.event_id,\n Event.ends_at >= datetime.datetime.now(),\n (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())\n ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n\n\[email protected](base=RequestContextTask, name='send.monthly.event.invoice')\ndef send_monthly_event_invoice():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)\n if ticket_fees > ticket_fee_maximum:\n ticket_fees = ticket_fee_maximum\n net_revenue = gross_revenue - ticket_fees\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': ticket_fees\n }\n # save invoice as pdf\n pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,\n admin_info=admin_info, currency=currency, event=event,\n ticket_fee_object=ticket_fee_object, payment_details=payment_details,\n net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)\n save_to_db(event_invoice)\n\n\[email protected]_after_configure.connect\ndef setup_scheduled_task(sender, **kwargs):\n from celery.schedules import crontab\n sender.add_periodic_task(crontab(hour='*/5', minute=30), send_after_event_mail)\n sender.add_periodic_task(crontab(day_of_week='0-6'), send_event_fee_notification)\n sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup)\n sender.add_periodic_task(crontab(hour='*/5', minute=30), change_session_state_on_event_completion)\n sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets)\n sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice)\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), event_invoices_mark_due)\n sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id)\n", "path": "app/api/helpers/scheduled_jobs.py"}], "after_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom flask import render_template\nfrom flask_celeryext import RequestContextTask\nfrom app.instance import celery\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees, get_fee\nfrom app.models.ticket_holder import TicketHolder\n\nfrom app.settings import get_settings\n\n\[email protected](base=RequestContextTask, name='send.after.event.mail')\ndef send_after_event_mail():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n frontend_url = get_settings()['frontend_url']\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(speaker.user.email, event.name, frontend_url)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, frontend_url)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, frontend_url)\n send_notif_after_event(owner.user, event.name)\n\n\[email protected](base=RequestContextTask, name='change.session.state.on.event.completion')\ndef change_session_state_on_event_completion():\n from app.instance import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification')\ndef send_event_fee_notification():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for ticket in order.tickets:\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(event.payment_country, order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification.followup')\ndef send_event_fee_notification_followup():\n from app.instance import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'paid').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\[email protected](base=RequestContextTask, name='expire.pending.tickets')\ndef expire_pending_tickets():\n from app.instance import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='delete.ticket.holders.no.order.id')\ndef delete_ticket_holders_no_order_id():\n from app.instance import current_app as app\n with app.app_context():\n order_expiry_time = get_settings()['order_expiry_time']\n TicketHolder.query.filter(TicketHolder.order_id == None, TicketHolder.deleted_at.is_(None),\n TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time)\n < datetime.datetime.utcnow()).delete(synchronize_session=False)\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='event.invoices.mark.due')\ndef event_invoices_mark_due():\n from app.instance import current_app as app\n with app.app_context():\n db.session.query(EventInvoice).filter(\n EventInvoice.status == 'upcoming',\n Event.id == EventInvoice.event_id,\n Event.ends_at >= datetime.datetime.now(),\n (EventInvoice.created_at + datetime.timedelta(days=30) <= datetime.datetime.now())\n ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n\n\[email protected](base=RequestContextTask, name='send.monthly.event.invoice')\ndef send_monthly_event_invoice():\n from app.instance import current_app as app\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n ticket_fee_object = db.session.query(TicketFees).filter_by(currency=currency).one()\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n ticket_fees = event.tickets_sold * (ticket_fee_percentage / 100)\n if ticket_fees > ticket_fee_maximum:\n ticket_fees = ticket_fee_maximum\n net_revenue = gross_revenue - ticket_fees\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': ticket_fees\n }\n # save invoice as pdf\n pdf = create_save_pdf(render_template('pdf/event_invoice.html', orders=orders, user=user,\n admin_info=admin_info, currency=currency, event=event,\n ticket_fee_object=ticket_fee_object, payment_details=payment_details,\n net_revenue=net_revenue), UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/', identifier=event.identifier)\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(amount=net_revenue, invoice_pdf_url=pdf, event_id=event.id)\n save_to_db(event_invoice)\n\n\[email protected]_after_configure.connect\ndef setup_scheduled_task(sender, **kwargs):\n from celery.schedules import crontab\n sender.add_periodic_task(crontab(hour='*/5', minute=30), send_after_event_mail)\n sender.add_periodic_task(crontab(day_of_week='0-6'), send_event_fee_notification)\n sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup)\n sender.add_periodic_task(crontab(hour='*/5', minute=30), change_session_state_on_event_completion)\n sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets)\n sender.add_periodic_task(crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice)\n sender.add_periodic_task(crontab(minute=0, hour='*/5'), event_invoices_mark_due)\n sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id)\n", "path": "app/api/helpers/scheduled_jobs.py"}]}
3,583
162
gh_patches_debug_11059
rasdani/github-patches
git_diff
pyca__cryptography-4133
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Figure out how to fix docs build on rtd It appears to be running into https://github.com/sphinx-doc/sphinx/issues/3976 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # This file is dual licensed under the terms of the Apache License, Version 4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 5 # for complete details. 6 7 from __future__ import absolute_import, division, print_function 8 9 import os 10 import platform 11 import subprocess 12 import sys 13 from distutils.command.build import build 14 15 import pkg_resources 16 17 import setuptools 18 from setuptools import find_packages, setup 19 from setuptools.command.install import install 20 from setuptools.command.test import test 21 22 23 if ( 24 pkg_resources.parse_version(setuptools.__version__) < 25 pkg_resources.parse_version("18.5") 26 ): 27 raise RuntimeError( 28 "cryptography requires setuptools 18.5 or newer, please upgrade to a " 29 "newer version of setuptools" 30 ) 31 32 base_dir = os.path.dirname(__file__) 33 src_dir = os.path.join(base_dir, "src") 34 35 # When executing the setup.py, we need to be able to import ourselves, this 36 # means that we need to add the src/ directory to the sys.path. 37 sys.path.insert(0, src_dir) 38 39 about = {} 40 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f: 41 exec(f.read(), about) 42 43 44 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__']) 45 46 setup_requirements = [] 47 48 if platform.python_implementation() == "PyPy": 49 if sys.pypy_version_info < (5, 3): 50 raise RuntimeError( 51 "cryptography 1.9 is not compatible with PyPy < 5.3. Please " 52 "upgrade PyPy to use this library." 53 ) 54 else: 55 setup_requirements.append("cffi>=1.7,!=1.11.3") 56 57 test_requirements = [ 58 "pytest>=3.2.1,!=3.3.0", 59 "pretend", 60 "iso8601", 61 "pytz", 62 "hypothesis>=1.11.4", 63 ] 64 65 66 # If there's no vectors locally that probably means we are in a tarball and 67 # need to go and get the matching vectors package from PyPi 68 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")): 69 test_requirements.append(VECTORS_DEPENDENCY) 70 71 72 class PyTest(test): 73 def finalize_options(self): 74 test.finalize_options(self) 75 self.test_args = [] 76 self.test_suite = True 77 78 # This means there's a vectors/ folder with the package in here. 79 # cd into it, install the vectors package and then refresh sys.path 80 if VECTORS_DEPENDENCY not in test_requirements: 81 subprocess.check_call( 82 [sys.executable, "setup.py", "install"], cwd="vectors" 83 ) 84 pkg_resources.get_distribution("cryptography_vectors").activate() 85 86 def run_tests(self): 87 # Import here because in module scope the eggs are not loaded. 88 import pytest 89 test_args = [os.path.join(base_dir, "tests")] 90 errno = pytest.main(test_args) 91 sys.exit(errno) 92 93 94 def keywords_with_side_effects(argv): 95 """ 96 Get a dictionary with setup keywords that (can) have side effects. 97 98 :param argv: A list of strings with command line arguments. 99 :returns: A dictionary with keyword arguments for the ``setup()`` function. 100 101 This setup.py script uses the setuptools 'setup_requires' feature because 102 this is required by the cffi package to compile extension modules. The 103 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi 104 build process as a result of setup.py invocations that don't need the cffi 105 module to be built (setup.py serves the dual purpose of exposing package 106 metadata). 107 108 All of the options listed by ``python setup.py --help`` that print 109 information should be recognized here. The commands ``clean``, 110 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized. 111 Any combination of these options and commands is also supported. 112 113 This function was originally based on the `setup.py script`_ of SciPy (see 114 also the discussion in `pip issue #25`_). 115 116 .. _pip issue #25: https://github.com/pypa/pip/issues/25 117 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py 118 """ 119 no_setup_requires_arguments = ( 120 '-h', '--help', 121 '-n', '--dry-run', 122 '-q', '--quiet', 123 '-v', '--verbose', 124 '-V', '--version', 125 '--author', 126 '--author-email', 127 '--classifiers', 128 '--contact', 129 '--contact-email', 130 '--description', 131 '--egg-base', 132 '--fullname', 133 '--help-commands', 134 '--keywords', 135 '--licence', 136 '--license', 137 '--long-description', 138 '--maintainer', 139 '--maintainer-email', 140 '--name', 141 '--no-user-cfg', 142 '--obsoletes', 143 '--platforms', 144 '--provides', 145 '--requires', 146 '--url', 147 'clean', 148 'egg_info', 149 'register', 150 'sdist', 151 'upload', 152 ) 153 154 def is_short_option(argument): 155 """Check whether a command line argument is a short option.""" 156 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-' 157 158 def expand_short_options(argument): 159 """Expand combined short options into canonical short options.""" 160 return ('-' + char for char in argument[1:]) 161 162 def argument_without_setup_requirements(argv, i): 163 """Check whether a command line argument needs setup requirements.""" 164 if argv[i] in no_setup_requires_arguments: 165 # Simple case: An argument which is either an option or a command 166 # which doesn't need setup requirements. 167 return True 168 elif (is_short_option(argv[i]) and 169 all(option in no_setup_requires_arguments 170 for option in expand_short_options(argv[i]))): 171 # Not so simple case: Combined short options none of which need 172 # setup requirements. 173 return True 174 elif argv[i - 1:i] == ['--egg-base']: 175 # Tricky case: --egg-info takes an argument which should not make 176 # us use setup_requires (defeating the purpose of this code). 177 return True 178 else: 179 return False 180 181 if all(argument_without_setup_requirements(argv, i) 182 for i in range(1, len(argv))): 183 return { 184 "cmdclass": { 185 "build": DummyBuild, 186 "install": DummyInstall, 187 "test": DummyPyTest, 188 } 189 } 190 else: 191 cffi_modules = [ 192 "src/_cffi_src/build_openssl.py:ffi", 193 "src/_cffi_src/build_constant_time.py:ffi", 194 "src/_cffi_src/build_padding.py:ffi", 195 ] 196 197 return { 198 "setup_requires": setup_requirements, 199 "cmdclass": { 200 "test": PyTest, 201 }, 202 "cffi_modules": cffi_modules 203 } 204 205 206 setup_requires_error = ("Requested setup command that needs 'setup_requires' " 207 "while command line arguments implied a side effect " 208 "free command or option.") 209 210 211 class DummyBuild(build): 212 """ 213 This class makes it very obvious when ``keywords_with_side_effects()`` has 214 incorrectly interpreted the command line arguments to ``setup.py build`` as 215 one of the 'side effect free' commands or options. 216 """ 217 218 def run(self): 219 raise RuntimeError(setup_requires_error) 220 221 222 class DummyInstall(install): 223 """ 224 This class makes it very obvious when ``keywords_with_side_effects()`` has 225 incorrectly interpreted the command line arguments to ``setup.py install`` 226 as one of the 'side effect free' commands or options. 227 """ 228 229 def run(self): 230 raise RuntimeError(setup_requires_error) 231 232 233 class DummyPyTest(test): 234 """ 235 This class makes it very obvious when ``keywords_with_side_effects()`` has 236 incorrectly interpreted the command line arguments to ``setup.py test`` as 237 one of the 'side effect free' commands or options. 238 """ 239 240 def run_tests(self): 241 raise RuntimeError(setup_requires_error) 242 243 244 with open(os.path.join(base_dir, "README.rst")) as f: 245 long_description = f.read() 246 247 248 setup( 249 name=about["__title__"], 250 version=about["__version__"], 251 252 description=about["__summary__"], 253 long_description=long_description, 254 license=about["__license__"], 255 url=about["__uri__"], 256 257 author=about["__author__"], 258 author_email=about["__email__"], 259 260 classifiers=[ 261 "Intended Audience :: Developers", 262 "License :: OSI Approved :: Apache Software License", 263 "License :: OSI Approved :: BSD License", 264 "Natural Language :: English", 265 "Operating System :: MacOS :: MacOS X", 266 "Operating System :: POSIX", 267 "Operating System :: POSIX :: BSD", 268 "Operating System :: POSIX :: Linux", 269 "Operating System :: Microsoft :: Windows", 270 "Programming Language :: Python", 271 "Programming Language :: Python :: 2", 272 "Programming Language :: Python :: 2.7", 273 "Programming Language :: Python :: 3", 274 "Programming Language :: Python :: 3.4", 275 "Programming Language :: Python :: 3.5", 276 "Programming Language :: Python :: 3.6", 277 "Programming Language :: Python :: Implementation :: CPython", 278 "Programming Language :: Python :: Implementation :: PyPy", 279 "Topic :: Security :: Cryptography", 280 ], 281 282 package_dir={"": "src"}, 283 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]), 284 include_package_data=True, 285 286 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', 287 288 install_requires=[ 289 "idna >= 2.1", 290 "asn1crypto >= 0.21.0", 291 "six >= 1.4.1", 292 ], 293 tests_require=test_requirements, 294 extras_require={ 295 ":python_version < '3'": ["enum34", "ipaddress"], 296 ":platform_python_implementation != 'PyPy'": ["cffi >= 1.7"], 297 298 "test": test_requirements, 299 "docstest": [ 300 "doc8", 301 "pyenchant >= 1.6.11", 302 "readme_renderer >= 16.0", 303 "sphinx >= 1.6.5", 304 "sphinx_rtd_theme", 305 "sphinxcontrib-spelling >= 4.0.1", 306 ], 307 "pep8test": [ 308 "flake8", 309 "flake8-import-order", 310 "pep8-naming", 311 ], 312 }, 313 314 # for cffi 315 zip_safe=False, 316 ext_package="cryptography.hazmat.bindings", 317 **keywords_with_side_effects(sys.argv) 318 ) 319 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -296,12 +296,14 @@ ":platform_python_implementation != 'PyPy'": ["cffi >= 1.7"], "test": test_requirements, + "docs": [ + "sphinx >= 1.6.5", + "sphinx_rtd_theme", + ], "docstest": [ "doc8", "pyenchant >= 1.6.11", "readme_renderer >= 16.0", - "sphinx >= 1.6.5", - "sphinx_rtd_theme", "sphinxcontrib-spelling >= 4.0.1", ], "pep8test": [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -296,12 +296,14 @@\n \":platform_python_implementation != 'PyPy'\": [\"cffi >= 1.7\"],\n \n \"test\": test_requirements,\n+ \"docs\": [\n+ \"sphinx >= 1.6.5\",\n+ \"sphinx_rtd_theme\",\n+ ],\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"readme_renderer >= 16.0\",\n- \"sphinx >= 1.6.5\",\n- \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n", "issue": "Figure out how to fix docs build on rtd\nIt appears to be running into https://github.com/sphinx-doc/sphinx/issues/3976\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nimport setuptools\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nif (\n pkg_resources.parse_version(setuptools.__version__) <\n pkg_resources.parse_version(\"18.5\")\n):\n raise RuntimeError(\n \"cryptography requires setuptools 18.5 or newer, please upgrade to a \"\n \"newer version of setuptools\"\n )\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nsetup_requirements = []\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (5, 3):\n raise RuntimeError(\n \"cryptography 1.9 is not compatible with PyPy < 5.3. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n setup_requirements.append(\"cffi>=1.7,!=1.11.3\")\n\ntest_requirements = [\n \"pytest>=3.2.1,!=3.3.0\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4\",\n]\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n\n install_requires=[\n \"idna >= 2.1\",\n \"asn1crypto >= 0.21.0\",\n \"six >= 1.4.1\",\n ],\n tests_require=test_requirements,\n extras_require={\n \":python_version < '3'\": [\"enum34\", \"ipaddress\"],\n \":platform_python_implementation != 'PyPy'\": [\"cffi >= 1.7\"],\n\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"readme_renderer >= 16.0\",\n \"sphinx >= 1.6.5\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nimport setuptools\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nif (\n pkg_resources.parse_version(setuptools.__version__) <\n pkg_resources.parse_version(\"18.5\")\n):\n raise RuntimeError(\n \"cryptography requires setuptools 18.5 or newer, please upgrade to a \"\n \"newer version of setuptools\"\n )\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nsetup_requirements = []\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (5, 3):\n raise RuntimeError(\n \"cryptography 1.9 is not compatible with PyPy < 5.3. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n setup_requirements.append(\"cffi>=1.7,!=1.11.3\")\n\ntest_requirements = [\n \"pytest>=3.2.1,!=3.3.0\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4\",\n]\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n\n install_requires=[\n \"idna >= 2.1\",\n \"asn1crypto >= 0.21.0\",\n \"six >= 1.4.1\",\n ],\n tests_require=test_requirements,\n extras_require={\n \":python_version < '3'\": [\"enum34\", \"ipaddress\"],\n \":platform_python_implementation != 'PyPy'\": [\"cffi >= 1.7\"],\n\n \"test\": test_requirements,\n \"docs\": [\n \"sphinx >= 1.6.5\",\n \"sphinx_rtd_theme\",\n ],\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"readme_renderer >= 16.0\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]}
3,524
181
gh_patches_debug_33771
rasdani/github-patches
git_diff
bridgecrewio__checkov-4750
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_AWS_300 fails if there are more rules than one **Describe the issue** CKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration` **Examples** ``` resource "aws_s3_bucket_lifecycle_configuration" "bucket" { bucket = aws_s3_bucket.bucket.bucket rule { id = "id-1" status = "Enabled" abort_incomplete_multipart_upload { days_after_initiation = 1 } } rule { id = "id-2" status = "Enabled" noncurrent_version_expiration { noncurrent_days = 1 } } } ``` **Version (please complete the following information):** - Checkov Version 2.3.111 Fails with: ``` Check: CKV_AWS_300: "Ensure S3 lifecycle configuration sets period for aborting failed uploads" FAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket File: s3.tf:1-1 122 | resource "aws_s3_bucket_lifecycle_configuration" "bucket" { 123 | bucket = aws_s3_bucket.bucket.bucket 124 | 125 | rule { 126 | id = "id-1" 127 | 128 | abort_incomplete_multipart_upload { 129 | days_after_initiation = 1 130 | } 131 | 132 | status = "Enabled" 133 | } 134 | 135 | rule { 136 | id = "id-2" 137 | status = "Enabled" 138 | 139 | noncurrent_version_expiration { 140 | noncurrent_days = 1 141 | } 142 | } 143 | } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py` Content: ``` 1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 2 from checkov.common.models.enums import CheckCategories, CheckResult 3 4 5 class S3AbortIncompleteUploads(BaseResourceCheck): 6 def __init__(self): 7 """ 8 If you don't set this value in a lifecycle configuration you'll end up paying for s3 9 resources you never could use 10 """ 11 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads" 12 id = "CKV_AWS_300" 13 supported_resources = ('aws_s3_bucket_lifecycle_configuration',) 14 categories = (CheckCategories.GENERAL_SECURITY,) 15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 16 17 def scan_resource_conf(self, conf): 18 self.evaluated_keys = ["rule"] 19 rules = conf.get("rule") 20 if rules and isinstance(rules, list): 21 for idx_rule, rule in enumerate(rules): 22 if not rule.get("abort_incomplete_multipart_upload"): 23 self.evaluated_keys = [f"rule/[{idx_rule}]/"] 24 return CheckResult.FAILED 25 return CheckResult.PASSED 26 return CheckResult.FAILED 27 28 29 check = S3AbortIncompleteUploads() 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py --- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py +++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py @@ -1,28 +1,36 @@ +from __future__ import annotations + +from typing import Any + from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck from checkov.common.models.enums import CheckCategories, CheckResult class S3AbortIncompleteUploads(BaseResourceCheck): - def __init__(self): + def __init__(self) -> None: """ If you don't set this value in a lifecycle configuration you'll end up paying for s3 resources you never could use """ name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads" id = "CKV_AWS_300" - supported_resources = ('aws_s3_bucket_lifecycle_configuration',) + supported_resources = ("aws_s3_bucket_lifecycle_configuration",) categories = (CheckCategories.GENERAL_SECURITY,) super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def scan_resource_conf(self, conf): + def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: self.evaluated_keys = ["rule"] rules = conf.get("rule") if rules and isinstance(rules, list): for idx_rule, rule in enumerate(rules): - if not rule.get("abort_incomplete_multipart_upload"): - self.evaluated_keys = [f"rule/[{idx_rule}]/"] - return CheckResult.FAILED - return CheckResult.PASSED + if ( + rule.get("abort_incomplete_multipart_upload") + and rule.get("status") == ["Enabled"] + and not rule.get("filter") + ): + self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"] + return CheckResult.PASSED + return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -1,28 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckCategories, CheckResult\n \n \n class S3AbortIncompleteUploads(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n- supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n+ supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if not rule.get(\"abort_incomplete_multipart_upload\"):\n- self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n+ if (\n+ rule.get(\"abort_incomplete_multipart_upload\")\n+ and rule.get(\"status\") == [\"Enabled\"]\n+ and not rule.get(\"filter\")\n+ ):\n+ self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ return CheckResult.PASSED\n+\n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 fails if there are more rules than one\n**Describe the issue**\r\nCKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration`\r\n\r\n**Examples**\r\n```\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n bucket = aws_s3_bucket.bucket.bucket\r\n\r\n rule {\r\n id = \"id-1\"\r\n status = \"Enabled\"\r\n\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 1\r\n }\r\n }\r\n\r\n rule {\r\n id = \"id-2\"\r\n status = \"Enabled\"\r\n\r\n noncurrent_version_expiration {\r\n noncurrent_days = 1\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.111\r\n\r\nFails with:\r\n\r\n```\r\nCheck: CKV_AWS_300: \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\r\n\tFAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket\r\n\tFile: s3.tf:1-1\r\n\r\n\t\t122 | resource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n\t\t123 | bucket = aws_s3_bucket.bucket.bucket\r\n\t\t124 | \r\n\t\t125 | rule {\r\n\t\t126 | id = \"id-1\"\r\n\t\t127 | \r\n\t\t128 | abort_incomplete_multipart_upload {\r\n\t\t129 | days_after_initiation = 1\r\n\t\t130 | }\r\n\t\t131 | \r\n\t\t132 | status = \"Enabled\"\r\n\t\t133 | }\r\n\t\t134 | \r\n\t\t135 | rule {\r\n\t\t136 | id = \"id-2\"\r\n\t\t137 | status = \"Enabled\"\r\n\t\t138 | \r\n\t\t139 | noncurrent_version_expiration {\r\n\t\t140 | noncurrent_days = 1\r\n\t\t141 | }\r\n\t\t142 | }\r\n\t\t143 | }\r\n```\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self):\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if not rule.get(\"abort_incomplete_multipart_upload\"):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if (\n rule.get(\"abort_incomplete_multipart_upload\")\n and rule.get(\"status\") == [\"Enabled\"]\n and not rule.get(\"filter\")\n ):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]}
1,061
482
gh_patches_debug_13279
rasdani/github-patches
git_diff
pypa__pip-3007
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip 7.0.1: you should use "--trusted-host". Hey, no such option "--trusted-host"! ``` $ cat req.txt --extra-index-url http://pip.mycompany.com/simple mylib myanotherlib $ pip install -r req.txt Collecting mylib (from -r req.txt (line 2)) .../urllib3/util/ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning. InsecurePlatformWarning The repository located at pip.mycompany.com is not a trusted or secure host and is being ignored. If this repository is available via HTTPS it is recommended to use HTTPS instead, otherwise you may silence this warning and allow it anyways with '--trusted-host pip.mycompany.com'. Could not find a version that satisfies the requirement mylib (from -r req.txt (line 2)) (from versions: ) No matching distribution found for mylib (from -r req.txt (line 2)) $ cat req1.txt --extra-index-url http://pip.mycompany.com/simple --trusted-host pip.mycompany.com mylib myanotherlib $ pip install -r req1.txt Usage: pip [options] pip: error: no such option: --trusted-host $ ``` I know that i can run `pip install -r file.txt --trusted-host=mycompany.com` but I configure my servers with Chef, not running pip directly. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/req/req_file.py` Content: ``` 1 """ 2 Requirements file parsing 3 """ 4 5 from __future__ import absolute_import 6 7 import os 8 import re 9 import shlex 10 import optparse 11 import warnings 12 13 from pip._vendor.six.moves.urllib import parse as urllib_parse 14 from pip._vendor.six.moves import filterfalse 15 16 import pip 17 from pip.download import get_file_content 18 from pip.req.req_install import InstallRequirement 19 from pip.exceptions import (RequirementsFileParseError) 20 from pip.utils.deprecation import RemovedInPip10Warning 21 from pip import cmdoptions 22 23 __all__ = ['parse_requirements'] 24 25 SCHEME_RE = re.compile(r'^(http|https|file):', re.I) 26 COMMENT_RE = re.compile(r'(^|\s)+#.*$') 27 28 SUPPORTED_OPTIONS = [ 29 cmdoptions.constraints, 30 cmdoptions.editable, 31 cmdoptions.requirements, 32 cmdoptions.no_index, 33 cmdoptions.index_url, 34 cmdoptions.find_links, 35 cmdoptions.extra_index_url, 36 cmdoptions.allow_external, 37 cmdoptions.allow_all_external, 38 cmdoptions.no_allow_external, 39 cmdoptions.allow_unsafe, 40 cmdoptions.no_allow_unsafe, 41 cmdoptions.use_wheel, 42 cmdoptions.no_use_wheel, 43 cmdoptions.always_unzip, 44 cmdoptions.no_binary, 45 cmdoptions.only_binary, 46 ] 47 48 # options to be passed to requirements 49 SUPPORTED_OPTIONS_REQ = [ 50 cmdoptions.install_options, 51 cmdoptions.global_options 52 ] 53 54 # the 'dest' string values 55 SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ] 56 57 58 def parse_requirements(filename, finder=None, comes_from=None, options=None, 59 session=None, constraint=False, wheel_cache=None): 60 """Parse a requirements file and yield InstallRequirement instances. 61 62 :param filename: Path or url of requirements file. 63 :param finder: Instance of pip.index.PackageFinder. 64 :param comes_from: Origin description of requirements. 65 :param options: Global options. 66 :param session: Instance of pip.download.PipSession. 67 :param constraint: If true, parsing a constraint file rather than 68 requirements file. 69 :param wheel_cache: Instance of pip.wheel.WheelCache 70 """ 71 if session is None: 72 raise TypeError( 73 "parse_requirements() missing 1 required keyword argument: " 74 "'session'" 75 ) 76 77 _, content = get_file_content( 78 filename, comes_from=comes_from, session=session 79 ) 80 81 lines = content.splitlines() 82 lines = ignore_comments(lines) 83 lines = join_lines(lines) 84 lines = skip_regex(lines, options) 85 86 for line_number, line in enumerate(lines, 1): 87 req_iter = process_line(line, filename, line_number, finder, 88 comes_from, options, session, wheel_cache, 89 constraint=constraint) 90 for req in req_iter: 91 yield req 92 93 94 def process_line(line, filename, line_number, finder=None, comes_from=None, 95 options=None, session=None, wheel_cache=None, 96 constraint=False): 97 """Process a single requirements line; This can result in creating/yielding 98 requirements, or updating the finder. 99 100 For lines that contain requirements, the only options that have an effect 101 are from SUPPORTED_OPTIONS_REQ, and they are scoped to the 102 requirement. Other options from SUPPORTED_OPTIONS may be present, but are 103 ignored. 104 105 For lines that do not contain requirements, the only options that have an 106 effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may 107 be present, but are ignored. These lines may contain multiple options 108 (although our docs imply only one is supported), and all our parsed and 109 affect the finder. 110 111 :param constraint: If True, parsing a constraints file. 112 """ 113 parser = build_parser() 114 defaults = parser.get_default_values() 115 defaults.index_url = None 116 if finder: 117 # `finder.format_control` will be updated during parsing 118 defaults.format_control = finder.format_control 119 args_str, options_str = break_args_options(line) 120 opts, _ = parser.parse_args(shlex.split(options_str), defaults) 121 122 # preserve for the nested code path 123 line_comes_from = '%s %s (line %s)' % ( 124 '-c' if constraint else '-r', filename, line_number) 125 126 # yield a line requirement 127 if args_str: 128 isolated = options.isolated_mode if options else False 129 if options: 130 cmdoptions.check_install_build_global(options, opts) 131 # get the options that apply to requirements 132 req_options = {} 133 for dest in SUPPORTED_OPTIONS_REQ_DEST: 134 if dest in opts.__dict__ and opts.__dict__[dest]: 135 req_options[dest] = opts.__dict__[dest] 136 yield InstallRequirement.from_line( 137 args_str, line_comes_from, constraint=constraint, 138 isolated=isolated, options=req_options, wheel_cache=wheel_cache 139 ) 140 141 # yield an editable requirement 142 elif opts.editables: 143 isolated = options.isolated_mode if options else False 144 default_vcs = options.default_vcs if options else None 145 yield InstallRequirement.from_editable( 146 opts.editables[0], comes_from=line_comes_from, 147 constraint=constraint, default_vcs=default_vcs, isolated=isolated, 148 wheel_cache=wheel_cache 149 ) 150 151 # parse a nested requirements file 152 elif opts.requirements or opts.constraints: 153 if opts.requirements: 154 req_path = opts.requirements[0] 155 nested_constraint = False 156 else: 157 req_path = opts.constraints[0] 158 nested_constraint = True 159 # original file is over http 160 if SCHEME_RE.search(filename): 161 # do a url join so relative paths work 162 req_path = urllib_parse.urljoin(filename, req_path) 163 # original file and nested file are paths 164 elif not SCHEME_RE.search(req_path): 165 # do a join so relative paths work 166 req_dir = os.path.dirname(filename) 167 req_path = os.path.join(os.path.dirname(filename), req_path) 168 # TODO: Why not use `comes_from='-r {} (line {})'` here as well? 169 parser = parse_requirements( 170 req_path, finder, comes_from, options, session, 171 constraint=nested_constraint, wheel_cache=wheel_cache 172 ) 173 for req in parser: 174 yield req 175 176 # set finder options 177 elif finder: 178 if opts.allow_external: 179 warnings.warn( 180 "--allow-external has been deprecated and will be removed in " 181 "the future. Due to changes in the repository protocol, it no " 182 "longer has any effect.", 183 RemovedInPip10Warning, 184 ) 185 186 if opts.allow_all_external: 187 warnings.warn( 188 "--allow-all-external has been deprecated and will be removed " 189 "in the future. Due to changes in the repository protocol, it " 190 "no longer has any effect.", 191 RemovedInPip10Warning, 192 ) 193 194 if opts.allow_unverified: 195 warnings.warn( 196 "--allow-unverified has been deprecated and will be removed " 197 "in the future. Due to changes in the repository protocol, it " 198 "no longer has any effect.", 199 RemovedInPip10Warning, 200 ) 201 202 if opts.index_url: 203 finder.index_urls = [opts.index_url] 204 if opts.use_wheel is False: 205 finder.use_wheel = False 206 pip.index.fmt_ctl_no_use_wheel(finder.format_control) 207 if opts.no_index is True: 208 finder.index_urls = [] 209 if opts.extra_index_urls: 210 finder.index_urls.extend(opts.extra_index_urls) 211 if opts.find_links: 212 # FIXME: it would be nice to keep track of the source 213 # of the find_links: support a find-links local path 214 # relative to a requirements file. 215 value = opts.find_links[0] 216 req_dir = os.path.dirname(os.path.abspath(filename)) 217 relative_to_reqs_file = os.path.join(req_dir, value) 218 if os.path.exists(relative_to_reqs_file): 219 value = relative_to_reqs_file 220 finder.find_links.append(value) 221 222 223 def break_args_options(line): 224 """Break up the line into an args and options string. We only want to shlex 225 (and then optparse) the options, not the args. args can contain markers 226 which are corrupted by shlex. 227 """ 228 tokens = line.split(' ') 229 args = [] 230 options = tokens[:] 231 for token in tokens: 232 if token.startswith('-') or token.startswith('--'): 233 break 234 else: 235 args.append(token) 236 options.pop(0) 237 return ' '.join(args), ' '.join(options) 238 239 240 def build_parser(): 241 """ 242 Return a parser for parsing requirement lines 243 """ 244 parser = optparse.OptionParser(add_help_option=False) 245 246 option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ 247 for option_factory in option_factories: 248 option = option_factory() 249 parser.add_option(option) 250 251 # By default optparse sys.exits on parsing errors. We want to wrap 252 # that in our own exception. 253 def parser_exit(self, msg): 254 raise RequirementsFileParseError(msg) 255 parser.exit = parser_exit 256 257 return parser 258 259 260 def join_lines(iterator): 261 """ 262 Joins a line ending in '\' with the previous line. 263 """ 264 lines = [] 265 for line in iterator: 266 if not line.endswith('\\'): 267 if lines: 268 lines.append(line) 269 yield ''.join(lines) 270 lines = [] 271 else: 272 yield line 273 else: 274 lines.append(line.strip('\\')) 275 276 # TODO: handle space after '\'. 277 # TODO: handle '\' on last line. 278 279 280 def ignore_comments(iterator): 281 """ 282 Strips and filters empty or commented lines. 283 """ 284 for line in iterator: 285 line = COMMENT_RE.sub('', line) 286 line = line.strip() 287 if line: 288 yield line 289 290 291 def skip_regex(lines, options): 292 """ 293 Optionally exclude lines that match '--skip-requirements-regex' 294 """ 295 skip_regex = options.skip_requirements_regex if options else None 296 if skip_regex: 297 lines = filterfalse(re.compile(skip_regex).search, lines) 298 return lines 299 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/req/req_file.py b/pip/req/req_file.py --- a/pip/req/req_file.py +++ b/pip/req/req_file.py @@ -43,6 +43,7 @@ cmdoptions.always_unzip, cmdoptions.no_binary, cmdoptions.only_binary, + cmdoptions.trusted_host, ] # options to be passed to requirements @@ -218,6 +219,9 @@ if os.path.exists(relative_to_reqs_file): value = relative_to_reqs_file finder.find_links.append(value) + if opts.trusted_hosts: + finder.secure_origins.extend( + ("*", host, "*") for host in opts.trusted_hosts) def break_args_options(line):
{"golden_diff": "diff --git a/pip/req/req_file.py b/pip/req/req_file.py\n--- a/pip/req/req_file.py\n+++ b/pip/req/req_file.py\n@@ -43,6 +43,7 @@\n cmdoptions.always_unzip,\n cmdoptions.no_binary,\n cmdoptions.only_binary,\n+ cmdoptions.trusted_host,\n ]\n \n # options to be passed to requirements\n@@ -218,6 +219,9 @@\n if os.path.exists(relative_to_reqs_file):\n value = relative_to_reqs_file\n finder.find_links.append(value)\n+ if opts.trusted_hosts:\n+ finder.secure_origins.extend(\n+ (\"*\", host, \"*\") for host in opts.trusted_hosts)\n \n \n def break_args_options(line):\n", "issue": "pip 7.0.1: you should use \"--trusted-host\". Hey, no such option \"--trusted-host\"!\n```\n$ cat req.txt \n--extra-index-url http://pip.mycompany.com/simple \nmylib\nmyanotherlib\n\n$ pip install -r req.txt\nCollecting mylib (from -r req.txt (line 2))\n.../urllib3/util/ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.\n InsecurePlatformWarning\n The repository located at pip.mycompany.com is not a trusted or secure host and is being ignored. If this repository is available via HTTPS it is recommended to use HTTPS instead, otherwise you may silence this warning and allow it anyways with '--trusted-host pip.mycompany.com'.\n Could not find a version that satisfies the requirement mylib (from -r req.txt (line 2)) (from versions: )\nNo matching distribution found for mylib (from -r req.txt (line 2))\n\n$ cat req1.txt \n--extra-index-url http://pip.mycompany.com/simple \n--trusted-host pip.mycompany.com\nmylib\nmyanotherlib\n\n$ pip install -r req1.txt \nUsage: pip [options]\n\npip: error: no such option: --trusted-host\n$ \n```\n\nI know that i can run `pip install -r file.txt --trusted-host=mycompany.com` but I configure my servers with Chef, not running pip directly.\n\n", "before_files": [{"content": "\"\"\"\nRequirements file parsing\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport shlex\nimport optparse\nimport warnings\n\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves import filterfalse\n\nimport pip\nfrom pip.download import get_file_content\nfrom pip.req.req_install import InstallRequirement\nfrom pip.exceptions import (RequirementsFileParseError)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip import cmdoptions\n\n__all__ = ['parse_requirements']\n\nSCHEME_RE = re.compile(r'^(http|https|file):', re.I)\nCOMMENT_RE = re.compile(r'(^|\\s)+#.*$')\n\nSUPPORTED_OPTIONS = [\n cmdoptions.constraints,\n cmdoptions.editable,\n cmdoptions.requirements,\n cmdoptions.no_index,\n cmdoptions.index_url,\n cmdoptions.find_links,\n cmdoptions.extra_index_url,\n cmdoptions.allow_external,\n cmdoptions.allow_all_external,\n cmdoptions.no_allow_external,\n cmdoptions.allow_unsafe,\n cmdoptions.no_allow_unsafe,\n cmdoptions.use_wheel,\n cmdoptions.no_use_wheel,\n cmdoptions.always_unzip,\n cmdoptions.no_binary,\n cmdoptions.only_binary,\n]\n\n# options to be passed to requirements\nSUPPORTED_OPTIONS_REQ = [\n cmdoptions.install_options,\n cmdoptions.global_options\n]\n\n# the 'dest' string values\nSUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]\n\n\ndef parse_requirements(filename, finder=None, comes_from=None, options=None,\n session=None, constraint=False, wheel_cache=None):\n \"\"\"Parse a requirements file and yield InstallRequirement instances.\n\n :param filename: Path or url of requirements file.\n :param finder: Instance of pip.index.PackageFinder.\n :param comes_from: Origin description of requirements.\n :param options: Global options.\n :param session: Instance of pip.download.PipSession.\n :param constraint: If true, parsing a constraint file rather than\n requirements file.\n :param wheel_cache: Instance of pip.wheel.WheelCache\n \"\"\"\n if session is None:\n raise TypeError(\n \"parse_requirements() missing 1 required keyword argument: \"\n \"'session'\"\n )\n\n _, content = get_file_content(\n filename, comes_from=comes_from, session=session\n )\n\n lines = content.splitlines()\n lines = ignore_comments(lines)\n lines = join_lines(lines)\n lines = skip_regex(lines, options)\n\n for line_number, line in enumerate(lines, 1):\n req_iter = process_line(line, filename, line_number, finder,\n comes_from, options, session, wheel_cache,\n constraint=constraint)\n for req in req_iter:\n yield req\n\n\ndef process_line(line, filename, line_number, finder=None, comes_from=None,\n options=None, session=None, wheel_cache=None,\n constraint=False):\n \"\"\"Process a single requirements line; This can result in creating/yielding\n requirements, or updating the finder.\n\n For lines that contain requirements, the only options that have an effect\n are from SUPPORTED_OPTIONS_REQ, and they are scoped to the\n requirement. Other options from SUPPORTED_OPTIONS may be present, but are\n ignored.\n\n For lines that do not contain requirements, the only options that have an\n effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may\n be present, but are ignored. These lines may contain multiple options\n (although our docs imply only one is supported), and all our parsed and\n affect the finder.\n\n :param constraint: If True, parsing a constraints file.\n \"\"\"\n parser = build_parser()\n defaults = parser.get_default_values()\n defaults.index_url = None\n if finder:\n # `finder.format_control` will be updated during parsing\n defaults.format_control = finder.format_control\n args_str, options_str = break_args_options(line)\n opts, _ = parser.parse_args(shlex.split(options_str), defaults)\n\n # preserve for the nested code path\n line_comes_from = '%s %s (line %s)' % (\n '-c' if constraint else '-r', filename, line_number)\n\n # yield a line requirement\n if args_str:\n isolated = options.isolated_mode if options else False\n if options:\n cmdoptions.check_install_build_global(options, opts)\n # get the options that apply to requirements\n req_options = {}\n for dest in SUPPORTED_OPTIONS_REQ_DEST:\n if dest in opts.__dict__ and opts.__dict__[dest]:\n req_options[dest] = opts.__dict__[dest]\n yield InstallRequirement.from_line(\n args_str, line_comes_from, constraint=constraint,\n isolated=isolated, options=req_options, wheel_cache=wheel_cache\n )\n\n # yield an editable requirement\n elif opts.editables:\n isolated = options.isolated_mode if options else False\n default_vcs = options.default_vcs if options else None\n yield InstallRequirement.from_editable(\n opts.editables[0], comes_from=line_comes_from,\n constraint=constraint, default_vcs=default_vcs, isolated=isolated,\n wheel_cache=wheel_cache\n )\n\n # parse a nested requirements file\n elif opts.requirements or opts.constraints:\n if opts.requirements:\n req_path = opts.requirements[0]\n nested_constraint = False\n else:\n req_path = opts.constraints[0]\n nested_constraint = True\n # original file is over http\n if SCHEME_RE.search(filename):\n # do a url join so relative paths work\n req_path = urllib_parse.urljoin(filename, req_path)\n # original file and nested file are paths\n elif not SCHEME_RE.search(req_path):\n # do a join so relative paths work\n req_dir = os.path.dirname(filename)\n req_path = os.path.join(os.path.dirname(filename), req_path)\n # TODO: Why not use `comes_from='-r {} (line {})'` here as well?\n parser = parse_requirements(\n req_path, finder, comes_from, options, session,\n constraint=nested_constraint, wheel_cache=wheel_cache\n )\n for req in parser:\n yield req\n\n # set finder options\n elif finder:\n if opts.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.index_url:\n finder.index_urls = [opts.index_url]\n if opts.use_wheel is False:\n finder.use_wheel = False\n pip.index.fmt_ctl_no_use_wheel(finder.format_control)\n if opts.no_index is True:\n finder.index_urls = []\n if opts.extra_index_urls:\n finder.index_urls.extend(opts.extra_index_urls)\n if opts.find_links:\n # FIXME: it would be nice to keep track of the source\n # of the find_links: support a find-links local path\n # relative to a requirements file.\n value = opts.find_links[0]\n req_dir = os.path.dirname(os.path.abspath(filename))\n relative_to_reqs_file = os.path.join(req_dir, value)\n if os.path.exists(relative_to_reqs_file):\n value = relative_to_reqs_file\n finder.find_links.append(value)\n\n\ndef break_args_options(line):\n \"\"\"Break up the line into an args and options string. We only want to shlex\n (and then optparse) the options, not the args. args can contain markers\n which are corrupted by shlex.\n \"\"\"\n tokens = line.split(' ')\n args = []\n options = tokens[:]\n for token in tokens:\n if token.startswith('-') or token.startswith('--'):\n break\n else:\n args.append(token)\n options.pop(0)\n return ' '.join(args), ' '.join(options)\n\n\ndef build_parser():\n \"\"\"\n Return a parser for parsing requirement lines\n \"\"\"\n parser = optparse.OptionParser(add_help_option=False)\n\n option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ\n for option_factory in option_factories:\n option = option_factory()\n parser.add_option(option)\n\n # By default optparse sys.exits on parsing errors. We want to wrap\n # that in our own exception.\n def parser_exit(self, msg):\n raise RequirementsFileParseError(msg)\n parser.exit = parser_exit\n\n return parser\n\n\ndef join_lines(iterator):\n \"\"\"\n Joins a line ending in '\\' with the previous line.\n \"\"\"\n lines = []\n for line in iterator:\n if not line.endswith('\\\\'):\n if lines:\n lines.append(line)\n yield ''.join(lines)\n lines = []\n else:\n yield line\n else:\n lines.append(line.strip('\\\\'))\n\n # TODO: handle space after '\\'.\n # TODO: handle '\\' on last line.\n\n\ndef ignore_comments(iterator):\n \"\"\"\n Strips and filters empty or commented lines.\n \"\"\"\n for line in iterator:\n line = COMMENT_RE.sub('', line)\n line = line.strip()\n if line:\n yield line\n\n\ndef skip_regex(lines, options):\n \"\"\"\n Optionally exclude lines that match '--skip-requirements-regex'\n \"\"\"\n skip_regex = options.skip_requirements_regex if options else None\n if skip_regex:\n lines = filterfalse(re.compile(skip_regex).search, lines)\n return lines\n", "path": "pip/req/req_file.py"}], "after_files": [{"content": "\"\"\"\nRequirements file parsing\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport shlex\nimport optparse\nimport warnings\n\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves import filterfalse\n\nimport pip\nfrom pip.download import get_file_content\nfrom pip.req.req_install import InstallRequirement\nfrom pip.exceptions import (RequirementsFileParseError)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip import cmdoptions\n\n__all__ = ['parse_requirements']\n\nSCHEME_RE = re.compile(r'^(http|https|file):', re.I)\nCOMMENT_RE = re.compile(r'(^|\\s)+#.*$')\n\nSUPPORTED_OPTIONS = [\n cmdoptions.constraints,\n cmdoptions.editable,\n cmdoptions.requirements,\n cmdoptions.no_index,\n cmdoptions.index_url,\n cmdoptions.find_links,\n cmdoptions.extra_index_url,\n cmdoptions.allow_external,\n cmdoptions.allow_all_external,\n cmdoptions.no_allow_external,\n cmdoptions.allow_unsafe,\n cmdoptions.no_allow_unsafe,\n cmdoptions.use_wheel,\n cmdoptions.no_use_wheel,\n cmdoptions.always_unzip,\n cmdoptions.no_binary,\n cmdoptions.only_binary,\n cmdoptions.trusted_host,\n]\n\n# options to be passed to requirements\nSUPPORTED_OPTIONS_REQ = [\n cmdoptions.install_options,\n cmdoptions.global_options\n]\n\n# the 'dest' string values\nSUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]\n\n\ndef parse_requirements(filename, finder=None, comes_from=None, options=None,\n session=None, constraint=False, wheel_cache=None):\n \"\"\"Parse a requirements file and yield InstallRequirement instances.\n\n :param filename: Path or url of requirements file.\n :param finder: Instance of pip.index.PackageFinder.\n :param comes_from: Origin description of requirements.\n :param options: Global options.\n :param session: Instance of pip.download.PipSession.\n :param constraint: If true, parsing a constraint file rather than\n requirements file.\n :param wheel_cache: Instance of pip.wheel.WheelCache\n \"\"\"\n if session is None:\n raise TypeError(\n \"parse_requirements() missing 1 required keyword argument: \"\n \"'session'\"\n )\n\n _, content = get_file_content(\n filename, comes_from=comes_from, session=session\n )\n\n lines = content.splitlines()\n lines = ignore_comments(lines)\n lines = join_lines(lines)\n lines = skip_regex(lines, options)\n\n for line_number, line in enumerate(lines, 1):\n req_iter = process_line(line, filename, line_number, finder,\n comes_from, options, session, wheel_cache,\n constraint=constraint)\n for req in req_iter:\n yield req\n\n\ndef process_line(line, filename, line_number, finder=None, comes_from=None,\n options=None, session=None, wheel_cache=None,\n constraint=False):\n \"\"\"Process a single requirements line; This can result in creating/yielding\n requirements, or updating the finder.\n\n For lines that contain requirements, the only options that have an effect\n are from SUPPORTED_OPTIONS_REQ, and they are scoped to the\n requirement. Other options from SUPPORTED_OPTIONS may be present, but are\n ignored.\n\n For lines that do not contain requirements, the only options that have an\n effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may\n be present, but are ignored. These lines may contain multiple options\n (although our docs imply only one is supported), and all our parsed and\n affect the finder.\n\n :param constraint: If True, parsing a constraints file.\n \"\"\"\n parser = build_parser()\n defaults = parser.get_default_values()\n defaults.index_url = None\n if finder:\n # `finder.format_control` will be updated during parsing\n defaults.format_control = finder.format_control\n args_str, options_str = break_args_options(line)\n opts, _ = parser.parse_args(shlex.split(options_str), defaults)\n\n # preserve for the nested code path\n line_comes_from = '%s %s (line %s)' % (\n '-c' if constraint else '-r', filename, line_number)\n\n # yield a line requirement\n if args_str:\n isolated = options.isolated_mode if options else False\n if options:\n cmdoptions.check_install_build_global(options, opts)\n # get the options that apply to requirements\n req_options = {}\n for dest in SUPPORTED_OPTIONS_REQ_DEST:\n if dest in opts.__dict__ and opts.__dict__[dest]:\n req_options[dest] = opts.__dict__[dest]\n yield InstallRequirement.from_line(\n args_str, line_comes_from, constraint=constraint,\n isolated=isolated, options=req_options, wheel_cache=wheel_cache\n )\n\n # yield an editable requirement\n elif opts.editables:\n isolated = options.isolated_mode if options else False\n default_vcs = options.default_vcs if options else None\n yield InstallRequirement.from_editable(\n opts.editables[0], comes_from=line_comes_from,\n constraint=constraint, default_vcs=default_vcs, isolated=isolated,\n wheel_cache=wheel_cache\n )\n\n # parse a nested requirements file\n elif opts.requirements or opts.constraints:\n if opts.requirements:\n req_path = opts.requirements[0]\n nested_constraint = False\n else:\n req_path = opts.constraints[0]\n nested_constraint = True\n # original file is over http\n if SCHEME_RE.search(filename):\n # do a url join so relative paths work\n req_path = urllib_parse.urljoin(filename, req_path)\n # original file and nested file are paths\n elif not SCHEME_RE.search(req_path):\n # do a join so relative paths work\n req_dir = os.path.dirname(filename)\n req_path = os.path.join(os.path.dirname(filename), req_path)\n # TODO: Why not use `comes_from='-r {} (line {})'` here as well?\n parser = parse_requirements(\n req_path, finder, comes_from, options, session,\n constraint=nested_constraint, wheel_cache=wheel_cache\n )\n for req in parser:\n yield req\n\n # set finder options\n elif finder:\n if opts.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if opts.index_url:\n finder.index_urls = [opts.index_url]\n if opts.use_wheel is False:\n finder.use_wheel = False\n pip.index.fmt_ctl_no_use_wheel(finder.format_control)\n if opts.no_index is True:\n finder.index_urls = []\n if opts.extra_index_urls:\n finder.index_urls.extend(opts.extra_index_urls)\n if opts.find_links:\n # FIXME: it would be nice to keep track of the source\n # of the find_links: support a find-links local path\n # relative to a requirements file.\n value = opts.find_links[0]\n req_dir = os.path.dirname(os.path.abspath(filename))\n relative_to_reqs_file = os.path.join(req_dir, value)\n if os.path.exists(relative_to_reqs_file):\n value = relative_to_reqs_file\n finder.find_links.append(value)\n if opts.trusted_hosts:\n finder.secure_origins.extend(\n (\"*\", host, \"*\") for host in opts.trusted_hosts)\n\n\ndef break_args_options(line):\n \"\"\"Break up the line into an args and options string. We only want to shlex\n (and then optparse) the options, not the args. args can contain markers\n which are corrupted by shlex.\n \"\"\"\n tokens = line.split(' ')\n args = []\n options = tokens[:]\n for token in tokens:\n if token.startswith('-') or token.startswith('--'):\n break\n else:\n args.append(token)\n options.pop(0)\n return ' '.join(args), ' '.join(options)\n\n\ndef build_parser():\n \"\"\"\n Return a parser for parsing requirement lines\n \"\"\"\n parser = optparse.OptionParser(add_help_option=False)\n\n option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ\n for option_factory in option_factories:\n option = option_factory()\n parser.add_option(option)\n\n # By default optparse sys.exits on parsing errors. We want to wrap\n # that in our own exception.\n def parser_exit(self, msg):\n raise RequirementsFileParseError(msg)\n parser.exit = parser_exit\n\n return parser\n\n\ndef join_lines(iterator):\n \"\"\"\n Joins a line ending in '\\' with the previous line.\n \"\"\"\n lines = []\n for line in iterator:\n if not line.endswith('\\\\'):\n if lines:\n lines.append(line)\n yield ''.join(lines)\n lines = []\n else:\n yield line\n else:\n lines.append(line.strip('\\\\'))\n\n # TODO: handle space after '\\'.\n # TODO: handle '\\' on last line.\n\n\ndef ignore_comments(iterator):\n \"\"\"\n Strips and filters empty or commented lines.\n \"\"\"\n for line in iterator:\n line = COMMENT_RE.sub('', line)\n line = line.strip()\n if line:\n yield line\n\n\ndef skip_regex(lines, options):\n \"\"\"\n Optionally exclude lines that match '--skip-requirements-regex'\n \"\"\"\n skip_regex = options.skip_requirements_regex if options else None\n if skip_regex:\n lines = filterfalse(re.compile(skip_regex).search, lines)\n return lines\n", "path": "pip/req/req_file.py"}]}
3,575
175
gh_patches_debug_22629
rasdani/github-patches
git_diff
yt-project__yt-3613
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BUG: spurious log warning when saving a plot to png format ### Bug report **Bug summary** **Code for reproduction** ```python import yt yt.funcs.mylog.setLevel("warning") ds = yt.load_sample("IsolatedGalaxy") p = yt.SlicePlot(ds, "z", "density") p.save("/tmp/test.png") ``` **Actual outcome** ``` yt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored. ``` **Expected outcome** No log warning --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt/visualization/_commons.py` Content: ``` 1 import os 2 import sys 3 from typing import Optional, Type 4 5 import matplotlib 6 from packaging.version import Version 7 8 from yt.utilities.logger import ytLogger as mylog 9 10 from ._mpl_imports import ( 11 FigureCanvasAgg, 12 FigureCanvasBase, 13 FigureCanvasPdf, 14 FigureCanvasPS, 15 FigureCanvasSVG, 16 ) 17 18 MPL_VERSION = Version(matplotlib.__version__) 19 20 DEFAULT_FONT_PROPERTIES = { 21 "family": "stixgeneral", 22 "size": 18, 23 } 24 25 if MPL_VERSION >= Version("3.4"): 26 DEFAULT_FONT_PROPERTIES["math_fontfamily"] = "cm" 27 28 SUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys()) 29 SUPPORTED_CANVAS_CLASSES = frozenset( 30 (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG) 31 ) 32 33 34 def get_canvas_class(suffix: str) -> Type[FigureCanvasBase]: 35 s = normalize_extension_string(suffix) 36 if s not in SUPPORTED_FORMATS: 37 raise ValueError(f"Unsupported file format '{suffix}'.") 38 for cls in SUPPORTED_CANVAS_CLASSES: 39 if s in cls.get_supported_filetypes(): 40 return cls 41 raise RuntimeError( 42 "Something went terribly wrong. " 43 f"File extension '{suffix}' is supposed to be supported " 44 "but no compatible backend was found." 45 ) 46 47 48 def normalize_extension_string(s: str) -> str: 49 if sys.version_info < (3, 9): 50 if s.startswith("."): 51 return s[1:] 52 return s 53 else: 54 return s.removeprefix(".") 55 56 57 def validate_image_name(filename, suffix: Optional[str] = None) -> str: 58 """ 59 Build a valid image filename with a specified extension (default to png). 60 The suffix parameter is ignored if the input filename has a valid extension already. 61 Otherwise, suffix is appended to the filename, replacing any existing extension. 62 """ 63 name, psuffix = os.path.splitext(filename) 64 if normalize_extension_string(psuffix) in SUPPORTED_FORMATS: 65 if suffix is not None: 66 suffix = normalize_extension_string(suffix) 67 if suffix in SUPPORTED_FORMATS and suffix != psuffix: 68 mylog.warning( 69 "Received two valid image formats '%s' (from `filename`) " 70 "and '%s' (from `suffix`). The former is ignored.", 71 psuffix, 72 suffix, 73 ) 74 return f"{name}.{suffix}" 75 return str(filename) 76 77 if suffix is None: 78 suffix = ".png" 79 80 suffix = normalize_extension_string(suffix) 81 82 if suffix not in SUPPORTED_FORMATS: 83 raise ValueError(f"Unsupported file format '{suffix}'.") 84 85 return f"{filename}.{suffix}" 86 87 88 def get_canvas(figure, filename): 89 90 name, suffix = os.path.splitext(filename) 91 92 if not suffix: 93 raise ValueError( 94 f"Can not determine canvas class from filename '{filename}' " 95 f"without an extension." 96 ) 97 return get_canvas_class(suffix)(figure) 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py --- a/yt/visualization/_commons.py +++ b/yt/visualization/_commons.py @@ -61,9 +61,12 @@ Otherwise, suffix is appended to the filename, replacing any existing extension. """ name, psuffix = os.path.splitext(filename) - if normalize_extension_string(psuffix) in SUPPORTED_FORMATS: - if suffix is not None: - suffix = normalize_extension_string(suffix) + psuffix = normalize_extension_string(psuffix) + + if suffix is not None: + suffix = normalize_extension_string(suffix) + + if psuffix in SUPPORTED_FORMATS: if suffix in SUPPORTED_FORMATS and suffix != psuffix: mylog.warning( "Received two valid image formats '%s' (from `filename`) " @@ -75,9 +78,7 @@ return str(filename) if suffix is None: - suffix = ".png" - - suffix = normalize_extension_string(suffix) + suffix = "png" if suffix not in SUPPORTED_FORMATS: raise ValueError(f"Unsupported file format '{suffix}'.")
{"golden_diff": "diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py\n--- a/yt/visualization/_commons.py\n+++ b/yt/visualization/_commons.py\n@@ -61,9 +61,12 @@\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n- if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n- if suffix is not None:\n- suffix = normalize_extension_string(suffix)\n+ psuffix = normalize_extension_string(psuffix)\n+\n+ if suffix is not None:\n+ suffix = normalize_extension_string(suffix)\n+\n+ if psuffix in SUPPORTED_FORMATS:\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n@@ -75,9 +78,7 @@\n return str(filename)\n \n if suffix is None:\n- suffix = \".png\"\n-\n- suffix = normalize_extension_string(suffix)\n+ suffix = \"png\"\n \n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n", "issue": "BUG: spurious log warning when saving a plot to png format\n### Bug report\r\n\r\n**Bug summary**\r\n\r\n**Code for reproduction**\r\n\r\n```python\r\nimport yt\r\n\r\nyt.funcs.mylog.setLevel(\"warning\")\r\n\r\nds = yt.load_sample(\"IsolatedGalaxy\")\r\np = yt.SlicePlot(ds, \"z\", \"density\")\r\np.save(\"/tmp/test.png\")\r\n```\r\n\r\n\r\n**Actual outcome**\r\n\r\n```\r\nyt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored.\r\n```\r\n\r\n**Expected outcome**\r\n\r\nNo log warning\n", "before_files": [{"content": "import os\nimport sys\nfrom typing import Optional, Type\n\nimport matplotlib\nfrom packaging.version import Version\n\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom ._mpl_imports import (\n FigureCanvasAgg,\n FigureCanvasBase,\n FigureCanvasPdf,\n FigureCanvasPS,\n FigureCanvasSVG,\n)\n\nMPL_VERSION = Version(matplotlib.__version__)\n\nDEFAULT_FONT_PROPERTIES = {\n \"family\": \"stixgeneral\",\n \"size\": 18,\n}\n\nif MPL_VERSION >= Version(\"3.4\"):\n DEFAULT_FONT_PROPERTIES[\"math_fontfamily\"] = \"cm\"\n\nSUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())\nSUPPORTED_CANVAS_CLASSES = frozenset(\n (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)\n)\n\n\ndef get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:\n s = normalize_extension_string(suffix)\n if s not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n for cls in SUPPORTED_CANVAS_CLASSES:\n if s in cls.get_supported_filetypes():\n return cls\n raise RuntimeError(\n \"Something went terribly wrong. \"\n f\"File extension '{suffix}' is supposed to be supported \"\n \"but no compatible backend was found.\"\n )\n\n\ndef normalize_extension_string(s: str) -> str:\n if sys.version_info < (3, 9):\n if s.startswith(\".\"):\n return s[1:]\n return s\n else:\n return s.removeprefix(\".\")\n\n\ndef validate_image_name(filename, suffix: Optional[str] = None) -> str:\n \"\"\"\n Build a valid image filename with a specified extension (default to png).\n The suffix parameter is ignored if the input filename has a valid extension already.\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n if suffix is not None:\n suffix = normalize_extension_string(suffix)\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n \"and '%s' (from `suffix`). The former is ignored.\",\n psuffix,\n suffix,\n )\n return f\"{name}.{suffix}\"\n return str(filename)\n\n if suffix is None:\n suffix = \".png\"\n\n suffix = normalize_extension_string(suffix)\n\n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n\n return f\"{filename}.{suffix}\"\n\n\ndef get_canvas(figure, filename):\n\n name, suffix = os.path.splitext(filename)\n\n if not suffix:\n raise ValueError(\n f\"Can not determine canvas class from filename '{filename}' \"\n f\"without an extension.\"\n )\n return get_canvas_class(suffix)(figure)\n", "path": "yt/visualization/_commons.py"}], "after_files": [{"content": "import os\nimport sys\nfrom typing import Optional, Type\n\nimport matplotlib\nfrom packaging.version import Version\n\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom ._mpl_imports import (\n FigureCanvasAgg,\n FigureCanvasBase,\n FigureCanvasPdf,\n FigureCanvasPS,\n FigureCanvasSVG,\n)\n\nMPL_VERSION = Version(matplotlib.__version__)\n\nDEFAULT_FONT_PROPERTIES = {\n \"family\": \"stixgeneral\",\n \"size\": 18,\n}\n\nif MPL_VERSION >= Version(\"3.4\"):\n DEFAULT_FONT_PROPERTIES[\"math_fontfamily\"] = \"cm\"\n\nSUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())\nSUPPORTED_CANVAS_CLASSES = frozenset(\n (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)\n)\n\n\ndef get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:\n s = normalize_extension_string(suffix)\n if s not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n for cls in SUPPORTED_CANVAS_CLASSES:\n if s in cls.get_supported_filetypes():\n return cls\n raise RuntimeError(\n \"Something went terribly wrong. \"\n f\"File extension '{suffix}' is supposed to be supported \"\n \"but no compatible backend was found.\"\n )\n\n\ndef normalize_extension_string(s: str) -> str:\n if sys.version_info < (3, 9):\n if s.startswith(\".\"):\n return s[1:]\n return s\n else:\n return s.removeprefix(\".\")\n\n\ndef validate_image_name(filename, suffix: Optional[str] = None) -> str:\n \"\"\"\n Build a valid image filename with a specified extension (default to png).\n The suffix parameter is ignored if the input filename has a valid extension already.\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n psuffix = normalize_extension_string(psuffix)\n\n if suffix is not None:\n suffix = normalize_extension_string(suffix)\n\n if psuffix in SUPPORTED_FORMATS:\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n \"and '%s' (from `suffix`). The former is ignored.\",\n psuffix,\n suffix,\n )\n return f\"{name}.{suffix}\"\n return str(filename)\n\n if suffix is None:\n suffix = \"png\"\n\n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n\n return f\"{filename}.{suffix}\"\n\n\ndef get_canvas(figure, filename):\n\n name, suffix = os.path.splitext(filename)\n\n if not suffix:\n raise ValueError(\n f\"Can not determine canvas class from filename '{filename}' \"\n f\"without an extension.\"\n )\n return get_canvas_class(suffix)(figure)\n", "path": "yt/visualization/_commons.py"}]}
1,235
260
gh_patches_debug_38815
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-424
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [WSGI] Replace span name callback with request and response hooks WSGI instrumentation accepts a span name callback which should be replaced with more generic request/response callbacks (hooks). Details: https://github.com/open-telemetry/opentelemetry-python-contrib/issues/408 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This library provides a WSGI middleware that can be used on any WSGI framework 16 (such as Django / Flask) to track requests timing through OpenTelemetry. 17 18 Usage (Flask) 19 ------------- 20 21 .. code-block:: python 22 23 from flask import Flask 24 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 25 26 app = Flask(__name__) 27 app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) 28 29 @app.route("/") 30 def hello(): 31 return "Hello!" 32 33 if __name__ == "__main__": 34 app.run(debug=True) 35 36 37 Usage (Django) 38 -------------- 39 40 Modify the application's ``wsgi.py`` file as shown below. 41 42 .. code-block:: python 43 44 import os 45 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 46 from django.core.wsgi import get_wsgi_application 47 48 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') 49 50 application = get_wsgi_application() 51 application = OpenTelemetryMiddleware(application) 52 53 API 54 --- 55 """ 56 57 import functools 58 import typing 59 import wsgiref.util as wsgiref_util 60 61 from opentelemetry import context, trace 62 from opentelemetry.instrumentation.utils import http_status_to_status_code 63 from opentelemetry.instrumentation.wsgi.version import __version__ 64 from opentelemetry.propagate import extract 65 from opentelemetry.propagators.textmap import Getter 66 from opentelemetry.trace.status import Status, StatusCode 67 68 _HTTP_VERSION_PREFIX = "HTTP/" 69 _CARRIER_KEY_PREFIX = "HTTP_" 70 _CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX) 71 72 73 class WSGIGetter(Getter): 74 def get( 75 self, carrier: dict, key: str 76 ) -> typing.Optional[typing.List[str]]: 77 """Getter implementation to retrieve a HTTP header value from the 78 PEP3333-conforming WSGI environ 79 80 Args: 81 carrier: WSGI environ object 82 key: header name in environ object 83 Returns: 84 A list with a single string with the header value if it exists, 85 else None. 86 """ 87 environ_key = "HTTP_" + key.upper().replace("-", "_") 88 value = carrier.get(environ_key) 89 if value is not None: 90 return [value] 91 return None 92 93 def keys(self, carrier): 94 return [ 95 key[_CARRIER_KEY_PREFIX_LEN:].lower().replace("_", "-") 96 for key in carrier 97 if key.startswith(_CARRIER_KEY_PREFIX) 98 ] 99 100 101 wsgi_getter = WSGIGetter() 102 103 104 def setifnotnone(dic, key, value): 105 if value is not None: 106 dic[key] = value 107 108 109 def collect_request_attributes(environ): 110 """Collects HTTP request attributes from the PEP3333-conforming 111 WSGI environ and returns a dictionary to be used as span creation attributes.""" 112 113 result = { 114 "http.method": environ.get("REQUEST_METHOD"), 115 "http.server_name": environ.get("SERVER_NAME"), 116 "http.scheme": environ.get("wsgi.url_scheme"), 117 } 118 119 host_port = environ.get("SERVER_PORT") 120 if host_port is not None: 121 result.update({"net.host.port": int(host_port)}) 122 123 setifnotnone(result, "http.host", environ.get("HTTP_HOST")) 124 target = environ.get("RAW_URI") 125 if target is None: # Note: `"" or None is None` 126 target = environ.get("REQUEST_URI") 127 if target is not None: 128 result["http.target"] = target 129 else: 130 result["http.url"] = wsgiref_util.request_uri(environ) 131 132 remote_addr = environ.get("REMOTE_ADDR") 133 if remote_addr: 134 result["net.peer.ip"] = remote_addr 135 remote_host = environ.get("REMOTE_HOST") 136 if remote_host and remote_host != remote_addr: 137 result["net.peer.name"] = remote_host 138 139 user_agent = environ.get("HTTP_USER_AGENT") 140 if user_agent is not None and len(user_agent) > 0: 141 result["http.user_agent"] = user_agent 142 143 setifnotnone(result, "net.peer.port", environ.get("REMOTE_PORT")) 144 flavor = environ.get("SERVER_PROTOCOL", "") 145 if flavor.upper().startswith(_HTTP_VERSION_PREFIX): 146 flavor = flavor[len(_HTTP_VERSION_PREFIX) :] 147 if flavor: 148 result["http.flavor"] = flavor 149 150 return result 151 152 153 def add_response_attributes( 154 span, start_response_status, response_headers 155 ): # pylint: disable=unused-argument 156 """Adds HTTP response attributes to span using the arguments 157 passed to a PEP3333-conforming start_response callable.""" 158 if not span.is_recording(): 159 return 160 status_code, _ = start_response_status.split(" ", 1) 161 162 try: 163 status_code = int(status_code) 164 except ValueError: 165 span.set_status( 166 Status( 167 StatusCode.ERROR, 168 "Non-integer HTTP status: " + repr(status_code), 169 ) 170 ) 171 else: 172 span.set_attribute("http.status_code", status_code) 173 span.set_status(Status(http_status_to_status_code(status_code))) 174 175 176 def get_default_span_name(environ): 177 """Default implementation for name_callback, returns HTTP {METHOD_NAME}.""" 178 return "HTTP {}".format(environ.get("REQUEST_METHOD", "")).strip() 179 180 181 class OpenTelemetryMiddleware: 182 """The WSGI application middleware. 183 184 This class is a PEP 3333 conforming WSGI middleware that starts and 185 annotates spans for any requests it is invoked with. 186 187 Args: 188 wsgi: The WSGI application callable to forward requests to. 189 name_callback: Callback which calculates a generic span name for an 190 incoming HTTP request based on the PEP3333 WSGI environ. 191 Optional: Defaults to get_default_span_name. 192 """ 193 194 def __init__(self, wsgi, name_callback=get_default_span_name): 195 self.wsgi = wsgi 196 self.tracer = trace.get_tracer(__name__, __version__) 197 self.name_callback = name_callback 198 199 @staticmethod 200 def _create_start_response(span, start_response): 201 @functools.wraps(start_response) 202 def _start_response(status, response_headers, *args, **kwargs): 203 add_response_attributes(span, status, response_headers) 204 return start_response(status, response_headers, *args, **kwargs) 205 206 return _start_response 207 208 def __call__(self, environ, start_response): 209 """The WSGI application 210 211 Args: 212 environ: A WSGI environment. 213 start_response: The WSGI start_response callable. 214 """ 215 216 token = context.attach(extract(environ, getter=wsgi_getter)) 217 span_name = self.name_callback(environ) 218 219 span = self.tracer.start_span( 220 span_name, 221 kind=trace.SpanKind.SERVER, 222 attributes=collect_request_attributes(environ), 223 ) 224 225 try: 226 with trace.use_span(span): 227 start_response = self._create_start_response( 228 span, start_response 229 ) 230 iterable = self.wsgi(environ, start_response) 231 return _end_span_after_iterating( 232 iterable, span, self.tracer, token 233 ) 234 except Exception as ex: 235 if span.is_recording(): 236 span.set_status(Status(StatusCode.ERROR, str(ex))) 237 span.end() 238 context.detach(token) 239 raise 240 241 242 # Put this in a subfunction to not delay the call to the wrapped 243 # WSGI application (instrumentation should change the application 244 # behavior as little as possible). 245 def _end_span_after_iterating(iterable, span, tracer, token): 246 try: 247 with trace.use_span(span): 248 for yielded in iterable: 249 yield yielded 250 finally: 251 close = getattr(iterable, "close", None) 252 if close: 253 close() 254 span.end() 255 context.detach(token) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py @@ -186,21 +186,26 @@ Args: wsgi: The WSGI application callable to forward requests to. - name_callback: Callback which calculates a generic span name for an - incoming HTTP request based on the PEP3333 WSGI environ. - Optional: Defaults to get_default_span_name. + request_hook: Optional callback which is called with the server span and WSGI + environ object for every incoming request. + response_hook: Optional callback which is called with the server span, + WSGI environ, status_code and response_headers for every + incoming request. """ - def __init__(self, wsgi, name_callback=get_default_span_name): + def __init__(self, wsgi, request_hook=None, response_hook=None): self.wsgi = wsgi self.tracer = trace.get_tracer(__name__, __version__) - self.name_callback = name_callback + self.request_hook = request_hook + self.response_hook = response_hook @staticmethod - def _create_start_response(span, start_response): + def _create_start_response(span, start_response, response_hook): @functools.wraps(start_response) def _start_response(status, response_headers, *args, **kwargs): add_response_attributes(span, status, response_headers) + if response_hook: + response_hook(status, response_headers) return start_response(status, response_headers, *args, **kwargs) return _start_response @@ -214,18 +219,24 @@ """ token = context.attach(extract(environ, getter=wsgi_getter)) - span_name = self.name_callback(environ) span = self.tracer.start_span( - span_name, + get_default_span_name(environ), kind=trace.SpanKind.SERVER, attributes=collect_request_attributes(environ), ) + if self.request_hook: + self.request_hook(span, environ) + + response_hook = self.response_hook + if response_hook: + response_hook = functools.partial(response_hook, span, environ) + try: with trace.use_span(span): start_response = self._create_start_response( - span, start_response + span, start_response, response_hook ) iterable = self.wsgi(environ, start_response) return _end_span_after_iterating(
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n@@ -186,21 +186,26 @@\n \n Args:\n wsgi: The WSGI application callable to forward requests to.\n- name_callback: Callback which calculates a generic span name for an\n- incoming HTTP request based on the PEP3333 WSGI environ.\n- Optional: Defaults to get_default_span_name.\n+ request_hook: Optional callback which is called with the server span and WSGI\n+ environ object for every incoming request.\n+ response_hook: Optional callback which is called with the server span,\n+ WSGI environ, status_code and response_headers for every\n+ incoming request.\n \"\"\"\n \n- def __init__(self, wsgi, name_callback=get_default_span_name):\n+ def __init__(self, wsgi, request_hook=None, response_hook=None):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__)\n- self.name_callback = name_callback\n+ self.request_hook = request_hook\n+ self.response_hook = response_hook\n \n @staticmethod\n- def _create_start_response(span, start_response):\n+ def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n+ if response_hook:\n+ response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n \n return _start_response\n@@ -214,18 +219,24 @@\n \"\"\"\n \n token = context.attach(extract(environ, getter=wsgi_getter))\n- span_name = self.name_callback(environ)\n \n span = self.tracer.start_span(\n- span_name,\n+ get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n \n+ if self.request_hook:\n+ self.request_hook(span, environ)\n+\n+ response_hook = self.response_hook\n+ if response_hook:\n+ response_hook = functools.partial(response_hook, span, environ)\n+\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n- span, start_response\n+ span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n", "issue": "[WSGI] Replace span name callback with request and response hooks \nWSGI instrumentation accepts a span name callback which should be replaced with more generic request/response callbacks (hooks). \r\n\r\nDetails: https://github.com/open-telemetry/opentelemetry-python-contrib/issues/408\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n \"http.method\": environ.get(\"REQUEST_METHOD\"),\n \"http.server_name\": environ.get(\"SERVER_NAME\"),\n \"http.scheme\": environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None:\n result.update({\"net.host.port\": int(host_port)})\n\n setifnotnone(result, \"http.host\", environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[\"http.target\"] = target\n else:\n result[\"http.url\"] = wsgiref_util.request_uri(environ)\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[\"net.peer.ip\"] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[\"net.peer.name\"] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[\"http.user_agent\"] = user_agent\n\n setifnotnone(result, \"net.peer.port\", environ.get(\"REMOTE_PORT\"))\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[\"http.flavor\"] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(\"http.status_code\", status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n name_callback: Callback which calculates a generic span name for an\n incoming HTTP request based on the PEP3333 WSGI environ.\n Optional: Defaults to get_default_span_name.\n \"\"\"\n\n def __init__(self, wsgi, name_callback=get_default_span_name):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__)\n self.name_callback = name_callback\n\n @staticmethod\n def _create_start_response(span, start_response):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n span_name = self.name_callback(environ)\n\n span = self.tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n \"http.method\": environ.get(\"REQUEST_METHOD\"),\n \"http.server_name\": environ.get(\"SERVER_NAME\"),\n \"http.scheme\": environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None:\n result.update({\"net.host.port\": int(host_port)})\n\n setifnotnone(result, \"http.host\", environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[\"http.target\"] = target\n else:\n result[\"http.url\"] = wsgiref_util.request_uri(environ)\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[\"net.peer.ip\"] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[\"net.peer.name\"] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[\"http.user_agent\"] = user_agent\n\n setifnotnone(result, \"net.peer.port\", environ.get(\"REMOTE_PORT\"))\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[\"http.flavor\"] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(\"http.status_code\", status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n \"\"\"\n\n def __init__(self, wsgi, request_hook=None, response_hook=None):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}]}
2,828
643
gh_patches_debug_5604
rasdani/github-patches
git_diff
bokeh__bokeh-9682
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOCUMENTATION] typo in texas.py Superfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip. Not sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/plotting/file/texas.py` Content: ``` 1 from bokeh.io import show 2 from bokeh.models import LogColorMapper 3 from bokeh.palettes import Viridis6 as palette 4 from bokeh.plotting import figure 5 from bokeh.sampledata.unemployment import data as unemployment 6 from bokeh.sampledata.us_counties import data as counties 7 8 palette = tuple(reversed(palette)) 9 10 counties = { 11 code: county for code, county in counties.items() if county["state"] == "tx" 12 } 13 14 county_xs = [county["lons"] for county in counties.values()] 15 county_ys = [county["lats"] for county in counties.values()] 16 17 county_names = [county['name'] for county in counties.values()] 18 county_rates = [unemployment[county_id] for county_id in counties] 19 color_mapper = LogColorMapper(palette=palette) 20 21 data=dict( 22 x=county_xs, 23 y=county_ys, 24 name=county_names, 25 rate=county_rates, 26 ) 27 28 TOOLS = "pan,wheel_zoom,reset,hover,save" 29 30 p = figure( 31 title="Texas Unemployment, 2009", tools=TOOLS, 32 x_axis_location=None, y_axis_location=None, 33 tooltips=[ 34 ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)") 35 ]) 36 p.grid.grid_line_color = None 37 p.hover.point_policy = "follow_mouse" 38 39 p.patches('x', 'y', source=data, 40 fill_color={'field': 'rate', 'transform': color_mapper}, 41 fill_alpha=0.7, line_color="white", line_width=0.5) 42 43 show(p) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py --- a/examples/plotting/file/texas.py +++ b/examples/plotting/file/texas.py @@ -31,7 +31,7 @@ title="Texas Unemployment, 2009", tools=TOOLS, x_axis_location=None, y_axis_location=None, tooltips=[ - ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)") + ("Name", "@name"), ("Unemployment rate", "@rate%"), ("(Long, Lat)", "($x, $y)") ]) p.grid.grid_line_color = None p.hover.point_policy = "follow_mouse"
{"golden_diff": "diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py\n--- a/examples/plotting/file/texas.py\n+++ b/examples/plotting/file/texas.py\n@@ -31,7 +31,7 @@\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n- (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n+ (\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\n p.grid.grid_line_color = None\n p.hover.point_policy = \"follow_mouse\"\n", "issue": "[DOCUMENTATION] typo in texas.py\nSuperfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip.\r\n\r\nNot sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in.\n", "before_files": [{"content": "from bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.unemployment import data as unemployment\nfrom bokeh.sampledata.us_counties import data as counties\n\npalette = tuple(reversed(palette))\n\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"tx\"\n}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['name'] for county in counties.values()]\ncounty_rates = [unemployment[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\np.grid.grid_line_color = None\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nshow(p)\n", "path": "examples/plotting/file/texas.py"}], "after_files": [{"content": "from bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.unemployment import data as unemployment\nfrom bokeh.sampledata.us_counties import data as counties\n\npalette = tuple(reversed(palette))\n\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"tx\"\n}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['name'] for county in counties.values()]\ncounty_rates = [unemployment[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\np.grid.grid_line_color = None\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nshow(p)\n", "path": "examples/plotting/file/texas.py"}]}
811
166
gh_patches_debug_50247
rasdani/github-patches
git_diff
sopel-irc__sopel-2154
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `.clearpronouns` command ### The problem Users might set their pronouns by mistake or just to test the functionality and then they are stuck. ### The solution Add an "unsetpronouns" that deletes pronoun information for the nick. Something like this might work. ```python @plugin.command('unsetpronouns') def unset_pronouns(bot, trigger): bot.db.delete_nick_value(trigger.nick, 'pronouns') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/pronouns.py` Content: ``` 1 """ 2 pronouns.py - Sopel Pronouns Plugin 3 Copyright © 2016, Elsie Powell 4 Licensed under the Eiffel Forum License 2. 5 6 https://sopel.chat 7 """ 8 from __future__ import generator_stop 9 10 from sopel import plugin 11 12 13 # Copied from pronoun.is, leaving a *lot* out. If 14 # https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that 15 # would be a lot easier. 16 # If ambiguous, the earlier one will be used. 17 KNOWN_SETS = { 18 "ze/hir": "ze/hir/hir/hirs/hirself", 19 "ze/zir": "ze/zir/zir/zirs/zirself", 20 "they/.../themselves": "they/them/their/theirs/themselves", 21 "they/.../themself": "they/them/their/theirs/themself", 22 "she/her": "she/her/her/hers/herself", 23 "he/him": "he/him/his/his/himself", 24 "xey/xem": "xey/xem/xyr/xyrs/xemself", 25 "sie/hir": "sie/hir/hir/hirs/hirself", 26 "it/it": "it/it/its/its/itself", 27 "ey/em": "ey/em/eir/eirs/eirself", 28 } 29 30 31 @plugin.command('pronouns') 32 @plugin.example('.pronouns Embolalia') 33 def pronouns(bot, trigger): 34 """Show the pronouns for a given user, defaulting to the current user if left blank.""" 35 if not trigger.group(3): 36 pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns') 37 if pronouns: 38 say_pronouns(bot, trigger.nick, pronouns) 39 else: 40 bot.reply("I don't know your pronouns! You can set them with " 41 "{}setpronouns".format(bot.config.core.help_prefix)) 42 else: 43 pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns') 44 if pronouns: 45 say_pronouns(bot, trigger.group(3), pronouns) 46 elif trigger.group(3) == bot.nick: 47 # You can stuff an entry into the database manually for your bot's 48 # gender, but like… it's a bot. 49 bot.say( 50 "I am a bot. Beep boop. My pronouns are it/it/its/its/itself. " 51 "See https://pronoun.is/it for examples." 52 ) 53 else: 54 bot.reply("I don't know {}'s pronouns. They can set them with " 55 "{}setpronouns".format(trigger.group(3), 56 bot.config.core.help_prefix)) 57 58 59 def say_pronouns(bot, nick, pronouns): 60 for short, set_ in KNOWN_SETS.items(): 61 if pronouns == set_: 62 break 63 short = pronouns 64 65 bot.say("{}'s pronouns are {}. See https://pronoun.is/{} for " 66 "examples.".format(nick, pronouns, short)) 67 68 69 @plugin.command('setpronouns') 70 @plugin.example('.setpronouns fae/faer/faer/faers/faerself') 71 @plugin.example('.setpronouns they/them/theirs') 72 @plugin.example('.setpronouns they/them') 73 def set_pronouns(bot, trigger): 74 """Set your pronouns.""" 75 pronouns = trigger.group(2) 76 if not pronouns: 77 bot.reply('What pronouns do you use?') 78 return 79 80 disambig = '' 81 requested_pronoun_split = pronouns.split("/") 82 if len(requested_pronoun_split) < 5: 83 matching = [] 84 for known_pronoun_set in KNOWN_SETS.values(): 85 known_pronoun_split = known_pronoun_set.split("/") 86 if known_pronoun_set.startswith(pronouns + "/") or ( 87 len(requested_pronoun_split) == 3 88 and ( 89 ( 90 # "they/.../themself" 91 requested_pronoun_split[1] == "..." 92 and requested_pronoun_split[0] == known_pronoun_split[0] 93 and requested_pronoun_split[2] == known_pronoun_split[4] 94 ) 95 or ( 96 # "they/them/theirs" 97 requested_pronoun_split[0:2] == known_pronoun_split[0:2] 98 and requested_pronoun_split[2] == known_pronoun_split[3] 99 ) 100 ) 101 ): 102 matching.append(known_pronoun_set) 103 104 if len(matching) == 0: 105 bot.reply( 106 "I'm sorry, I don't know those pronouns. " 107 "You can give me a set I don't know by formatting it " 108 "subject/object/possessive-determiner/possessive-pronoun/" 109 "reflexive, as in: they/them/their/theirs/themselves" 110 ) 111 return 112 113 pronouns = matching[0] 114 if len(matching) > 1: 115 disambig = " Or, if you meant one of these, please tell me: {}".format( 116 ", ".join(matching[1:]) 117 ) 118 119 bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns) 120 bot.reply( 121 "Thanks for telling me! I'll remember you use {}.{}".format(pronouns, disambig) 122 ) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py --- a/sopel/modules/pronouns.py +++ b/sopel/modules/pronouns.py @@ -120,3 +120,10 @@ bot.reply( "Thanks for telling me! I'll remember you use {}.{}".format(pronouns, disambig) ) + + [email protected]('clearpronouns') +def unset_pronouns(bot, trigger): + """Clear pronouns for the given user.""" + bot.db.delete_nick_value(trigger.nick, 'pronouns') + bot.reply("Okay, I'll forget your pronouns.")
{"golden_diff": "diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py\n--- a/sopel/modules/pronouns.py\n+++ b/sopel/modules/pronouns.py\n@@ -120,3 +120,10 @@\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n+\n+\[email protected]('clearpronouns')\n+def unset_pronouns(bot, trigger):\n+ \"\"\"Clear pronouns for the given user.\"\"\"\n+ bot.db.delete_nick_value(trigger.nick, 'pronouns')\n+ bot.reply(\"Okay, I'll forget your pronouns.\")\n", "issue": "Add `.clearpronouns` command\n### The problem\r\nUsers might set their pronouns by mistake or just to test the functionality and then they are stuck.\r\n\r\n### The solution\r\n\r\nAdd an \"unsetpronouns\" that deletes pronoun information for the nick. \r\nSomething like this might work. \r\n\r\n```python\r\[email protected]('unsetpronouns')\r\ndef unset_pronouns(bot, trigger):\r\n bot.db.delete_nick_value(trigger.nick, 'pronouns')\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\n# If ambiguous, the earlier one will be used.\nKNOWN_SETS = {\n \"ze/hir\": \"ze/hir/hir/hirs/hirself\",\n \"ze/zir\": \"ze/zir/zir/zirs/zirself\",\n \"they/.../themselves\": \"they/them/their/theirs/themselves\",\n \"they/.../themself\": \"they/them/their/theirs/themself\",\n \"she/her\": \"she/her/her/hers/herself\",\n \"he/him\": \"he/him/his/his/himself\",\n \"xey/xem\": \"xey/xem/xyr/xyrs/xemself\",\n \"sie/hir\": \"sie/hir/hir/hirs/hirself\",\n \"it/it\": \"it/it/its/its/itself\",\n \"ey/em\": \"ey/em/eir/eirs/eirself\",\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns fae/faer/faer/faers/faerself')\[email protected]('.setpronouns they/them/theirs')\[email protected]('.setpronouns they/them')\ndef set_pronouns(bot, trigger):\n \"\"\"Set your pronouns.\"\"\"\n pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n requested_pronoun_split = pronouns.split(\"/\")\n if len(requested_pronoun_split) < 5:\n matching = []\n for known_pronoun_set in KNOWN_SETS.values():\n known_pronoun_split = known_pronoun_set.split(\"/\")\n if known_pronoun_set.startswith(pronouns + \"/\") or (\n len(requested_pronoun_split) == 3\n and (\n (\n # \"they/.../themself\"\n requested_pronoun_split[1] == \"...\"\n and requested_pronoun_split[0] == known_pronoun_split[0]\n and requested_pronoun_split[2] == known_pronoun_split[4]\n )\n or (\n # \"they/them/theirs\"\n requested_pronoun_split[0:2] == known_pronoun_split[0:2]\n and requested_pronoun_split[2] == known_pronoun_split[3]\n )\n )\n ):\n matching.append(known_pronoun_set)\n\n if len(matching) == 0:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n\n pronouns = matching[0]\n if len(matching) > 1:\n disambig = \" Or, if you meant one of these, please tell me: {}\".format(\n \", \".join(matching[1:])\n )\n\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n", "path": "sopel/modules/pronouns.py"}], "after_files": [{"content": "\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\n# If ambiguous, the earlier one will be used.\nKNOWN_SETS = {\n \"ze/hir\": \"ze/hir/hir/hirs/hirself\",\n \"ze/zir\": \"ze/zir/zir/zirs/zirself\",\n \"they/.../themselves\": \"they/them/their/theirs/themselves\",\n \"they/.../themself\": \"they/them/their/theirs/themself\",\n \"she/her\": \"she/her/her/hers/herself\",\n \"he/him\": \"he/him/his/his/himself\",\n \"xey/xem\": \"xey/xem/xyr/xyrs/xemself\",\n \"sie/hir\": \"sie/hir/hir/hirs/hirself\",\n \"it/it\": \"it/it/its/its/itself\",\n \"ey/em\": \"ey/em/eir/eirs/eirself\",\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns fae/faer/faer/faers/faerself')\[email protected]('.setpronouns they/them/theirs')\[email protected]('.setpronouns they/them')\ndef set_pronouns(bot, trigger):\n \"\"\"Set your pronouns.\"\"\"\n pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n requested_pronoun_split = pronouns.split(\"/\")\n if len(requested_pronoun_split) < 5:\n matching = []\n for known_pronoun_set in KNOWN_SETS.values():\n known_pronoun_split = known_pronoun_set.split(\"/\")\n if known_pronoun_set.startswith(pronouns + \"/\") or (\n len(requested_pronoun_split) == 3\n and (\n (\n # \"they/.../themself\"\n requested_pronoun_split[1] == \"...\"\n and requested_pronoun_split[0] == known_pronoun_split[0]\n and requested_pronoun_split[2] == known_pronoun_split[4]\n )\n or (\n # \"they/them/theirs\"\n requested_pronoun_split[0:2] == known_pronoun_split[0:2]\n and requested_pronoun_split[2] == known_pronoun_split[3]\n )\n )\n ):\n matching.append(known_pronoun_set)\n\n if len(matching) == 0:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n\n pronouns = matching[0]\n if len(matching) > 1:\n disambig = \" Or, if you meant one of these, please tell me: {}\".format(\n \", \".join(matching[1:])\n )\n\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n\n\[email protected]('clearpronouns')\ndef unset_pronouns(bot, trigger):\n \"\"\"Clear pronouns for the given user.\"\"\"\n bot.db.delete_nick_value(trigger.nick, 'pronouns')\n bot.reply(\"Okay, I'll forget your pronouns.\")\n", "path": "sopel/modules/pronouns.py"}]}
1,817
148
gh_patches_debug_20340
rasdani/github-patches
git_diff
carpentries__amy-1458
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Include badge & badge date in member view In the member view [such as this](https://amy.software-carpentry.org/fiscal/membership/121/), we see a list of instructor training seats with the columns Event, Person, and Task. Can we also have two more columns in this view: Badge (SWC, DC, and/or LC) and Date Badged? Task appears to be a concatenation of Event, Person, and Role. If Role is always going to be Learner, we can remove this column to make room for the new columns suggested above. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `amy/fiscal/views.py` Content: ``` 1 from django.contrib.auth.mixins import ( 2 PermissionRequiredMixin, 3 ) 4 from django.db.models import ( 5 F, 6 Q, 7 Count, 8 Prefetch, 9 ) 10 from django.db.models.functions import Now 11 from django.urls import reverse, reverse_lazy 12 13 from fiscal.filters import ( 14 OrganizationFilter, 15 MembershipFilter, 16 ) 17 from fiscal.forms import ( 18 OrganizationForm, 19 OrganizationCreateForm, 20 MembershipForm, 21 MembershipCreateForm, 22 SponsorshipForm, 23 ) 24 from workshops.base_views import ( 25 AMYCreateView, 26 AMYUpdateView, 27 AMYDeleteView, 28 AMYListView, 29 RedirectSupportMixin, 30 PrepopulationSupportMixin, 31 AMYDetailView, 32 ) 33 from workshops.models import ( 34 Organization, 35 Membership, 36 Sponsorship, 37 ) 38 from workshops.util import ( 39 OnlyForAdminsMixin, 40 ) 41 42 43 # ------------------------------------------------------------ 44 # Organization related views 45 # ------------------------------------------------------------ 46 47 class AllOrganizations(OnlyForAdminsMixin, AMYListView): 48 context_object_name = 'all_organizations' 49 template_name = 'fiscal/all_organizations.html' 50 filter_class = OrganizationFilter 51 queryset = Organization.objects.prefetch_related(Prefetch( 52 'membership_set', 53 to_attr='current_memberships', 54 queryset=Membership.objects.filter( 55 agreement_start__lte=Now(), 56 agreement_end__gte=Now(), 57 ) 58 )) 59 title = 'All Organizations' 60 61 62 class OrganizationDetails(OnlyForAdminsMixin, AMYDetailView): 63 queryset = Organization.objects.all() 64 context_object_name = 'organization' 65 template_name = 'fiscal/organization.html' 66 slug_field = 'domain' 67 slug_url_kwarg = 'org_domain' 68 69 def get_context_data(self, **kwargs): 70 context = super().get_context_data(**kwargs) 71 context['title'] = 'Organization {0}'.format(self.object) 72 return context 73 74 75 class OrganizationCreate(OnlyForAdminsMixin, PermissionRequiredMixin, 76 AMYCreateView): 77 permission_required = 'workshops.add_organization' 78 model = Organization 79 form_class = OrganizationCreateForm 80 81 82 class OrganizationUpdate(OnlyForAdminsMixin, PermissionRequiredMixin, 83 AMYUpdateView): 84 permission_required = 'workshops.change_organization' 85 model = Organization 86 form_class = OrganizationForm 87 slug_field = 'domain' 88 slug_url_kwarg = 'org_domain' 89 template_name = 'generic_form_with_comments.html' 90 91 92 class OrganizationDelete(OnlyForAdminsMixin, PermissionRequiredMixin, 93 AMYDeleteView): 94 model = Organization 95 slug_field = 'domain' 96 slug_url_kwarg = 'org_domain' 97 permission_required = 'workshops.delete_organization' 98 success_url = reverse_lazy('all_organizations') 99 100 101 # ------------------------------------------------------------ 102 # Membership related views 103 # ------------------------------------------------------------ 104 105 class AllMemberships(OnlyForAdminsMixin, AMYListView): 106 context_object_name = 'all_memberships' 107 template_name = 'fiscal/all_memberships.html' 108 filter_class = MembershipFilter 109 queryset = Membership.objects.annotate( 110 instructor_training_seats_total=( 111 F('seats_instructor_training') + 112 F('additional_instructor_training_seats') 113 ), 114 # for future reference, in case someone would want to implement 115 # this annotation 116 # instructor_training_seats_utilized=( 117 # Count('task', filter=Q(task__role__name='learner')) 118 # ), 119 instructor_training_seats_remaining=( 120 F('seats_instructor_training') + 121 F('additional_instructor_training_seats') - 122 Count('task', filter=Q(task__role__name='learner')) 123 ), 124 ) 125 title = 'All Memberships' 126 127 128 class MembershipDetails(OnlyForAdminsMixin, AMYDetailView): 129 queryset = ( 130 Membership.objects 131 .select_related('organization') 132 .prefetch_related('task_set') 133 ) 134 context_object_name = 'membership' 135 template_name = 'fiscal/membership.html' 136 pk_url_kwarg = 'membership_id' 137 138 def get_context_data(self, **kwargs): 139 context = super().get_context_data(**kwargs) 140 context['title'] = '{0}'.format(self.object) 141 return context 142 143 144 class MembershipCreate(OnlyForAdminsMixin, PermissionRequiredMixin, 145 PrepopulationSupportMixin, AMYCreateView): 146 permission_required = [ 147 'workshops.add_membership', 148 'workshops.change_organization', 149 ] 150 model = Membership 151 form_class = MembershipCreateForm 152 populate_fields = ['organization'] 153 154 155 class MembershipUpdate(OnlyForAdminsMixin, PermissionRequiredMixin, 156 RedirectSupportMixin, AMYUpdateView): 157 permission_required = 'workshops.change_membership' 158 model = Membership 159 form_class = MembershipForm 160 pk_url_kwarg = 'membership_id' 161 template_name = 'generic_form_with_comments.html' 162 163 164 class MembershipDelete(OnlyForAdminsMixin, PermissionRequiredMixin, 165 AMYDeleteView): 166 model = Membership 167 permission_required = 'workshops.delete_membership' 168 pk_url_kwarg = 'membership_id' 169 170 def get_success_url(self): 171 return reverse('organization_details', args=[ 172 self.get_object().organization.domain]) 173 174 175 # ------------------------------------------------------------ 176 # Sponsorship related views 177 # ------------------------------------------------------------ 178 179 class SponsorshipCreate(OnlyForAdminsMixin, PermissionRequiredMixin, 180 AMYCreateView): 181 model = Sponsorship 182 permission_required = 'workshops.add_sponsorship' 183 form_class = SponsorshipForm 184 185 def get_success_url(self): 186 return reverse('event_edit', args=[self.object.event.slug]) + \ 187 '#sponsors' 188 189 190 class SponsorshipDelete(OnlyForAdminsMixin, PermissionRequiredMixin, 191 AMYDeleteView): 192 model = Sponsorship 193 permission_required = 'workshops.delete_sponsorship' 194 195 def get_success_url(self): 196 return reverse('event_edit', args=[self.get_object().event.slug]) + \ 197 '#sponsors' 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/amy/fiscal/views.py b/amy/fiscal/views.py --- a/amy/fiscal/views.py +++ b/amy/fiscal/views.py @@ -34,6 +34,8 @@ Organization, Membership, Sponsorship, + Task, + Award, ) from workshops.util import ( OnlyForAdminsMixin, @@ -126,11 +128,16 @@ class MembershipDetails(OnlyForAdminsMixin, AMYDetailView): - queryset = ( - Membership.objects - .select_related('organization') - .prefetch_related('task_set') - ) + prefetch_awards = Prefetch('person__award_set', + queryset=Award.objects.select_related('badge')) + queryset = Membership.objects.select_related('organization') \ + .prefetch_related( + Prefetch( + 'task_set', + queryset=Task.objects.select_related('event', 'person') + .prefetch_related(prefetch_awards) + ) + ) context_object_name = 'membership' template_name = 'fiscal/membership.html' pk_url_kwarg = 'membership_id'
{"golden_diff": "diff --git a/amy/fiscal/views.py b/amy/fiscal/views.py\n--- a/amy/fiscal/views.py\n+++ b/amy/fiscal/views.py\n@@ -34,6 +34,8 @@\n Organization,\n Membership,\n Sponsorship,\n+ Task,\n+ Award,\n )\n from workshops.util import (\n OnlyForAdminsMixin,\n@@ -126,11 +128,16 @@\n \n \n class MembershipDetails(OnlyForAdminsMixin, AMYDetailView):\n- queryset = (\n- Membership.objects\n- .select_related('organization')\n- .prefetch_related('task_set')\n- )\n+ prefetch_awards = Prefetch('person__award_set',\n+ queryset=Award.objects.select_related('badge'))\n+ queryset = Membership.objects.select_related('organization') \\\n+ .prefetch_related(\n+ Prefetch(\n+ 'task_set',\n+ queryset=Task.objects.select_related('event', 'person')\n+ .prefetch_related(prefetch_awards)\n+ )\n+ )\n context_object_name = 'membership'\n template_name = 'fiscal/membership.html'\n pk_url_kwarg = 'membership_id'\n", "issue": "Include badge & badge date in member view \nIn the member view [such as this](https://amy.software-carpentry.org/fiscal/membership/121/), we see a list of instructor training seats with the columns Event, Person, and Task.\r\n\r\nCan we also have two more columns in this view: Badge (SWC, DC, and/or LC) and Date Badged?\r\n\r\nTask appears to be a concatenation of Event, Person, and Role. If Role is always going to be Learner, we can remove this column to make room for the new columns suggested above.\n", "before_files": [{"content": "from django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n)\nfrom django.db.models import (\n F,\n Q,\n Count,\n Prefetch,\n)\nfrom django.db.models.functions import Now\nfrom django.urls import reverse, reverse_lazy\n\nfrom fiscal.filters import (\n OrganizationFilter,\n MembershipFilter,\n)\nfrom fiscal.forms import (\n OrganizationForm,\n OrganizationCreateForm,\n MembershipForm,\n MembershipCreateForm,\n SponsorshipForm,\n)\nfrom workshops.base_views import (\n AMYCreateView,\n AMYUpdateView,\n AMYDeleteView,\n AMYListView,\n RedirectSupportMixin,\n PrepopulationSupportMixin,\n AMYDetailView,\n)\nfrom workshops.models import (\n Organization,\n Membership,\n Sponsorship,\n)\nfrom workshops.util import (\n OnlyForAdminsMixin,\n)\n\n\n# ------------------------------------------------------------\n# Organization related views\n# ------------------------------------------------------------\n\nclass AllOrganizations(OnlyForAdminsMixin, AMYListView):\n context_object_name = 'all_organizations'\n template_name = 'fiscal/all_organizations.html'\n filter_class = OrganizationFilter\n queryset = Organization.objects.prefetch_related(Prefetch(\n 'membership_set',\n to_attr='current_memberships',\n queryset=Membership.objects.filter(\n agreement_start__lte=Now(),\n agreement_end__gte=Now(),\n )\n ))\n title = 'All Organizations'\n\n\nclass OrganizationDetails(OnlyForAdminsMixin, AMYDetailView):\n queryset = Organization.objects.all()\n context_object_name = 'organization'\n template_name = 'fiscal/organization.html'\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Organization {0}'.format(self.object)\n return context\n\n\nclass OrganizationCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYCreateView):\n permission_required = 'workshops.add_organization'\n model = Organization\n form_class = OrganizationCreateForm\n\n\nclass OrganizationUpdate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYUpdateView):\n permission_required = 'workshops.change_organization'\n model = Organization\n form_class = OrganizationForm\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n template_name = 'generic_form_with_comments.html'\n\n\nclass OrganizationDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Organization\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n permission_required = 'workshops.delete_organization'\n success_url = reverse_lazy('all_organizations')\n\n\n# ------------------------------------------------------------\n# Membership related views\n# ------------------------------------------------------------\n\nclass AllMemberships(OnlyForAdminsMixin, AMYListView):\n context_object_name = 'all_memberships'\n template_name = 'fiscal/all_memberships.html'\n filter_class = MembershipFilter\n queryset = Membership.objects.annotate(\n instructor_training_seats_total=(\n F('seats_instructor_training') +\n F('additional_instructor_training_seats')\n ),\n # for future reference, in case someone would want to implement\n # this annotation\n # instructor_training_seats_utilized=(\n # Count('task', filter=Q(task__role__name='learner'))\n # ),\n instructor_training_seats_remaining=(\n F('seats_instructor_training') +\n F('additional_instructor_training_seats') -\n Count('task', filter=Q(task__role__name='learner'))\n ),\n )\n title = 'All Memberships'\n\n\nclass MembershipDetails(OnlyForAdminsMixin, AMYDetailView):\n queryset = (\n Membership.objects\n .select_related('organization')\n .prefetch_related('task_set')\n )\n context_object_name = 'membership'\n template_name = 'fiscal/membership.html'\n pk_url_kwarg = 'membership_id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = '{0}'.format(self.object)\n return context\n\n\nclass MembershipCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n PrepopulationSupportMixin, AMYCreateView):\n permission_required = [\n 'workshops.add_membership',\n 'workshops.change_organization',\n ]\n model = Membership\n form_class = MembershipCreateForm\n populate_fields = ['organization']\n\n\nclass MembershipUpdate(OnlyForAdminsMixin, PermissionRequiredMixin,\n RedirectSupportMixin, AMYUpdateView):\n permission_required = 'workshops.change_membership'\n model = Membership\n form_class = MembershipForm\n pk_url_kwarg = 'membership_id'\n template_name = 'generic_form_with_comments.html'\n\n\nclass MembershipDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Membership\n permission_required = 'workshops.delete_membership'\n pk_url_kwarg = 'membership_id'\n\n def get_success_url(self):\n return reverse('organization_details', args=[\n self.get_object().organization.domain])\n\n\n# ------------------------------------------------------------\n# Sponsorship related views\n# ------------------------------------------------------------\n\nclass SponsorshipCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYCreateView):\n model = Sponsorship\n permission_required = 'workshops.add_sponsorship'\n form_class = SponsorshipForm\n\n def get_success_url(self):\n return reverse('event_edit', args=[self.object.event.slug]) + \\\n '#sponsors'\n\n\nclass SponsorshipDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Sponsorship\n permission_required = 'workshops.delete_sponsorship'\n\n def get_success_url(self):\n return reverse('event_edit', args=[self.get_object().event.slug]) + \\\n '#sponsors'\n", "path": "amy/fiscal/views.py"}], "after_files": [{"content": "from django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n)\nfrom django.db.models import (\n F,\n Q,\n Count,\n Prefetch,\n)\nfrom django.db.models.functions import Now\nfrom django.urls import reverse, reverse_lazy\n\nfrom fiscal.filters import (\n OrganizationFilter,\n MembershipFilter,\n)\nfrom fiscal.forms import (\n OrganizationForm,\n OrganizationCreateForm,\n MembershipForm,\n MembershipCreateForm,\n SponsorshipForm,\n)\nfrom workshops.base_views import (\n AMYCreateView,\n AMYUpdateView,\n AMYDeleteView,\n AMYListView,\n RedirectSupportMixin,\n PrepopulationSupportMixin,\n AMYDetailView,\n)\nfrom workshops.models import (\n Organization,\n Membership,\n Sponsorship,\n Task,\n Award,\n)\nfrom workshops.util import (\n OnlyForAdminsMixin,\n)\n\n\n# ------------------------------------------------------------\n# Organization related views\n# ------------------------------------------------------------\n\nclass AllOrganizations(OnlyForAdminsMixin, AMYListView):\n context_object_name = 'all_organizations'\n template_name = 'fiscal/all_organizations.html'\n filter_class = OrganizationFilter\n queryset = Organization.objects.prefetch_related(Prefetch(\n 'membership_set',\n to_attr='current_memberships',\n queryset=Membership.objects.filter(\n agreement_start__lte=Now(),\n agreement_end__gte=Now(),\n )\n ))\n title = 'All Organizations'\n\n\nclass OrganizationDetails(OnlyForAdminsMixin, AMYDetailView):\n queryset = Organization.objects.all()\n context_object_name = 'organization'\n template_name = 'fiscal/organization.html'\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Organization {0}'.format(self.object)\n return context\n\n\nclass OrganizationCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYCreateView):\n permission_required = 'workshops.add_organization'\n model = Organization\n form_class = OrganizationCreateForm\n\n\nclass OrganizationUpdate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYUpdateView):\n permission_required = 'workshops.change_organization'\n model = Organization\n form_class = OrganizationForm\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n template_name = 'generic_form_with_comments.html'\n\n\nclass OrganizationDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Organization\n slug_field = 'domain'\n slug_url_kwarg = 'org_domain'\n permission_required = 'workshops.delete_organization'\n success_url = reverse_lazy('all_organizations')\n\n\n# ------------------------------------------------------------\n# Membership related views\n# ------------------------------------------------------------\n\nclass AllMemberships(OnlyForAdminsMixin, AMYListView):\n context_object_name = 'all_memberships'\n template_name = 'fiscal/all_memberships.html'\n filter_class = MembershipFilter\n queryset = Membership.objects.annotate(\n instructor_training_seats_total=(\n F('seats_instructor_training') +\n F('additional_instructor_training_seats')\n ),\n # for future reference, in case someone would want to implement\n # this annotation\n # instructor_training_seats_utilized=(\n # Count('task', filter=Q(task__role__name='learner'))\n # ),\n instructor_training_seats_remaining=(\n F('seats_instructor_training') +\n F('additional_instructor_training_seats') -\n Count('task', filter=Q(task__role__name='learner'))\n ),\n )\n title = 'All Memberships'\n\n\nclass MembershipDetails(OnlyForAdminsMixin, AMYDetailView):\n prefetch_awards = Prefetch('person__award_set',\n queryset=Award.objects.select_related('badge'))\n queryset = Membership.objects.select_related('organization') \\\n .prefetch_related(\n Prefetch(\n 'task_set',\n queryset=Task.objects.select_related('event', 'person')\n .prefetch_related(prefetch_awards)\n )\n )\n context_object_name = 'membership'\n template_name = 'fiscal/membership.html'\n pk_url_kwarg = 'membership_id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = '{0}'.format(self.object)\n return context\n\n\nclass MembershipCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n PrepopulationSupportMixin, AMYCreateView):\n permission_required = [\n 'workshops.add_membership',\n 'workshops.change_organization',\n ]\n model = Membership\n form_class = MembershipCreateForm\n populate_fields = ['organization']\n\n\nclass MembershipUpdate(OnlyForAdminsMixin, PermissionRequiredMixin,\n RedirectSupportMixin, AMYUpdateView):\n permission_required = 'workshops.change_membership'\n model = Membership\n form_class = MembershipForm\n pk_url_kwarg = 'membership_id'\n template_name = 'generic_form_with_comments.html'\n\n\nclass MembershipDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Membership\n permission_required = 'workshops.delete_membership'\n pk_url_kwarg = 'membership_id'\n\n def get_success_url(self):\n return reverse('organization_details', args=[\n self.get_object().organization.domain])\n\n\n# ------------------------------------------------------------\n# Sponsorship related views\n# ------------------------------------------------------------\n\nclass SponsorshipCreate(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYCreateView):\n model = Sponsorship\n permission_required = 'workshops.add_sponsorship'\n form_class = SponsorshipForm\n\n def get_success_url(self):\n return reverse('event_edit', args=[self.object.event.slug]) + \\\n '#sponsors'\n\n\nclass SponsorshipDelete(OnlyForAdminsMixin, PermissionRequiredMixin,\n AMYDeleteView):\n model = Sponsorship\n permission_required = 'workshops.delete_sponsorship'\n\n def get_success_url(self):\n return reverse('event_edit', args=[self.get_object().event.slug]) + \\\n '#sponsors'\n", "path": "amy/fiscal/views.py"}]}
2,111
257
gh_patches_debug_41079
rasdani/github-patches
git_diff
carpentries__amy-637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- API: add filtering by workshop type in published events We need to grab DC-only, or SWC-only published events. Probably there's no need to return type of the event in the structure, just filtering. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `api/serializers.py` Content: ``` 1 from rest_framework import serializers 2 3 from workshops.models import Badge, Airport, Person, Event, TodoItem 4 5 6 class PersonUsernameSerializer(serializers.ModelSerializer): 7 name = serializers.CharField(source='get_full_name') 8 user = serializers.CharField(source='username') 9 10 class Meta: 11 model = Person 12 fields = ('name', 'user', ) 13 14 15 class PersonNameEmailSerializer(serializers.ModelSerializer): 16 name = serializers.CharField(source='get_full_name') 17 18 class Meta: 19 model = Person 20 fields = ('name', 'email') 21 22 23 class ExportBadgesSerializer(serializers.ModelSerializer): 24 persons = PersonUsernameSerializer(many=True, source='person_set') 25 26 class Meta: 27 model = Badge 28 fields = ('name', 'persons') 29 30 31 class ExportInstructorLocationsSerializer(serializers.ModelSerializer): 32 name = serializers.CharField(source='fullname') 33 instructors = PersonUsernameSerializer(many=True, source='person_set') 34 35 class Meta: 36 model = Airport 37 fields = ('name', 'latitude', 'longitude', 'instructors', 'country') 38 39 40 class EventSerializer(serializers.ModelSerializer): 41 humandate = serializers.SerializerMethodField() 42 country = serializers.CharField() 43 start = serializers.DateField(format=None) 44 end = serializers.DateField(format=None) 45 url = serializers.URLField(source='website_url') 46 eventbrite_id = serializers.CharField(source='reg_key') 47 48 def get_humandate(self, obj): 49 """Render start and end dates as human-readable short date.""" 50 return EventSerializer.human_readable_date(obj.start, obj.end) 51 52 @staticmethod 53 def human_readable_date(date1, date2): 54 """Render start and end dates as human-readable short date.""" 55 if date1 and not date2: 56 return '{:%b %d, %Y}-???'.format(date1) 57 elif date2 and not date1: 58 return '???-{:%b %d, %Y}'.format(date2) 59 elif not date2 and not date1: 60 return '???-???' 61 62 if date1.year == date2.year: 63 if date1.month == date2.month: 64 return '{:%b %d}-{:%d, %Y}'.format(date1, date2) 65 else: 66 return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2) 67 else: 68 return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2) 69 70 class Meta: 71 model = Event 72 fields = ( 73 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country', 74 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id', 75 ) 76 77 78 class TodoSerializer(serializers.ModelSerializer): 79 content = serializers.SerializerMethodField() 80 start = serializers.DateField(format=None, source='due') 81 82 class Meta: 83 model = TodoItem 84 fields = ( 85 'content', 'start', 86 ) 87 88 def get_content(self, obj): 89 """Return HTML containing interesting information for admins. This 90 will be displayed on labels in the timeline.""" 91 92 return '<a href="{url}">{event}</a><br><small>{todo}</small>'.format( 93 url=obj.event.get_absolute_url(), 94 event=obj.event.get_ident(), 95 todo=obj.title, 96 ) 97 ``` Path: `api/views.py` Content: ``` 1 import datetime 2 3 from rest_framework.generics import ListAPIView 4 from rest_framework.metadata import SimpleMetadata 5 from rest_framework.permissions import ( 6 IsAuthenticatedOrReadOnly, IsAuthenticated 7 ) 8 from rest_framework.response import Response 9 from rest_framework.reverse import reverse 10 from rest_framework.views import APIView 11 12 from workshops.models import Badge, Airport, Event, TodoItem 13 from workshops.util import get_members, default_membership_cutoff 14 15 from .serializers import ( 16 PersonNameEmailSerializer, 17 ExportBadgesSerializer, 18 ExportInstructorLocationsSerializer, 19 EventSerializer, 20 TodoSerializer, 21 ) 22 23 24 class QueryMetadata(SimpleMetadata): 25 """Additionally include info about query parameters.""" 26 27 def determine_metadata(self, request, view): 28 data = super().determine_metadata(request, view) 29 30 try: 31 data['query_params'] = view.get_query_params_description() 32 except AttributeError: 33 pass 34 35 return data 36 37 38 class ApiRoot(APIView): 39 def get(self, request, format=None): 40 return Response({ 41 'export-badges': reverse('api:export-badges', request=request, 42 format=format), 43 'export-instructors': reverse('api:export-instructors', 44 request=request, format=format), 45 'export-members': reverse('api:export-members', request=request, 46 format=format), 47 'events-published': reverse('api:events-published', 48 request=request, format=format), 49 'user-todos': reverse('api:user-todos', 50 request=request, format=format), 51 }) 52 53 54 class ExportBadgesView(ListAPIView): 55 """List all badges and people who have them.""" 56 permission_classes = (IsAuthenticatedOrReadOnly, ) 57 paginator = None # disable pagination 58 59 queryset = Badge.objects.prefetch_related('person_set') 60 serializer_class = ExportBadgesSerializer 61 62 63 class ExportInstructorLocationsView(ListAPIView): 64 """List all airports and instructors located near them.""" 65 permission_classes = (IsAuthenticatedOrReadOnly, ) 66 paginator = None # disable pagination 67 68 queryset = Airport.objects.exclude(person=None) \ 69 .prefetch_related('person_set') 70 serializer_class = ExportInstructorLocationsSerializer 71 72 73 class ExportMembersView(ListAPIView): 74 """Show everyone who qualifies as an SCF member.""" 75 permission_classes = (IsAuthenticatedOrReadOnly, ) 76 paginator = None # disable pagination 77 78 serializer_class = PersonNameEmailSerializer 79 80 def get_queryset(self): 81 earliest_default, latest_default = default_membership_cutoff() 82 83 earliest = self.request.query_params.get('earliest', None) 84 if earliest is not None: 85 try: 86 earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \ 87 .date() 88 except ValueError: 89 earliest = earliest_default 90 else: 91 earliest = earliest_default 92 93 latest = self.request.query_params.get('latest', None) 94 if latest is not None: 95 try: 96 latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date() 97 except ValueError: 98 latest = latest_default 99 else: 100 latest = latest_default 101 102 return get_members(earliest, latest) 103 104 def get_query_params_description(self): 105 return { 106 'earliest': 'Date of earliest workshop someone taught at.' 107 ' Defaults to -2*365 days from current date.', 108 'latest': 'Date of latest workshop someone taught at.' 109 ' Defaults to current date.', 110 } 111 112 113 class PublishedEvents(ListAPIView): 114 """List published events.""" 115 116 # only events that have both a starting date and a URL 117 permission_classes = (IsAuthenticatedOrReadOnly, ) 118 paginator = None # disable pagination 119 120 serializer_class = EventSerializer 121 122 metadata_class = QueryMetadata 123 124 def get_queryset(self): 125 """Optionally restrict the returned event set to events hosted by 126 specific host or administered by specific admin.""" 127 queryset = Event.objects.published_events() 128 129 administrator = self.request.query_params.get('administrator', None) 130 if administrator is not None: 131 queryset = queryset.filter(administrator__pk=administrator) 132 133 host = self.request.query_params.get('host', None) 134 if host is not None: 135 queryset = queryset.filter(host__pk=host) 136 137 return queryset 138 139 def get_query_params_description(self): 140 return { 141 'administrator': 'ID of the organization responsible for admin ' 142 'work on events.', 143 'host': 'ID of the organization hosting the event.', 144 } 145 146 147 class UserTodoItems(ListAPIView): 148 permission_classes = (IsAuthenticated, ) 149 paginator = None 150 serializer_class = TodoSerializer 151 152 def get_queryset(self): 153 """Return current TODOs for currently logged in user.""" 154 return TodoItem.objects.user(self.request.user) \ 155 .incomplete() \ 156 .select_related('event') 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/api/serializers.py b/api/serializers.py --- a/api/serializers.py +++ b/api/serializers.py @@ -1,6 +1,6 @@ from rest_framework import serializers -from workshops.models import Badge, Airport, Person, Event, TodoItem +from workshops.models import Badge, Airport, Person, Event, TodoItem, Tag class PersonUsernameSerializer(serializers.ModelSerializer): @@ -37,6 +37,12 @@ fields = ('name', 'latitude', 'longitude', 'instructors', 'country') +class TagSerializer(serializers.ModelSerializer): + class Meta: + model = Tag + fields = ('name', ) + + class EventSerializer(serializers.ModelSerializer): humandate = serializers.SerializerMethodField() country = serializers.CharField() @@ -44,6 +50,7 @@ end = serializers.DateField(format=None) url = serializers.URLField(source='website_url') eventbrite_id = serializers.CharField(source='reg_key') + tags = TagSerializer(many=True) def get_humandate(self, obj): """Render start and end dates as human-readable short date.""" @@ -72,6 +79,7 @@ fields = ( 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country', 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id', + 'tags', ) diff --git a/api/views.py b/api/views.py --- a/api/views.py +++ b/api/views.py @@ -1,5 +1,6 @@ import datetime +from django.db.models import Q from rest_framework.generics import ListAPIView from rest_framework.metadata import SimpleMetadata from rest_framework.permissions import ( @@ -9,7 +10,7 @@ from rest_framework.reverse import reverse from rest_framework.views import APIView -from workshops.models import Badge, Airport, Event, TodoItem +from workshops.models import Badge, Airport, Event, TodoItem, Tag from workshops.util import get_members, default_membership_cutoff from .serializers import ( @@ -134,6 +135,12 @@ if host is not None: queryset = queryset.filter(host__pk=host) + tags = self.request.query_params.getlist('tag', None) + if tags: + tags = Tag.objects.filter(name__in=tags) + for tag in tags: + queryset = queryset.filter(tags=tag) + return queryset def get_query_params_description(self): @@ -141,6 +148,8 @@ 'administrator': 'ID of the organization responsible for admin ' 'work on events.', 'host': 'ID of the organization hosting the event.', + 'tag': "Events' tag(s). You can use this parameter multiple " + "times.", }
{"golden_diff": "diff --git a/api/serializers.py b/api/serializers.py\n--- a/api/serializers.py\n+++ b/api/serializers.py\n@@ -1,6 +1,6 @@\n from rest_framework import serializers\n \n-from workshops.models import Badge, Airport, Person, Event, TodoItem\n+from workshops.models import Badge, Airport, Person, Event, TodoItem, Tag\n \n \n class PersonUsernameSerializer(serializers.ModelSerializer):\n@@ -37,6 +37,12 @@\n fields = ('name', 'latitude', 'longitude', 'instructors', 'country')\n \n \n+class TagSerializer(serializers.ModelSerializer):\n+ class Meta:\n+ model = Tag\n+ fields = ('name', )\n+\n+\n class EventSerializer(serializers.ModelSerializer):\n humandate = serializers.SerializerMethodField()\n country = serializers.CharField()\n@@ -44,6 +50,7 @@\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n eventbrite_id = serializers.CharField(source='reg_key')\n+ tags = TagSerializer(many=True)\n \n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n@@ -72,6 +79,7 @@\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',\n+ 'tags',\n )\n \n \ndiff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -1,5 +1,6 @@\n import datetime\n \n+from django.db.models import Q\n from rest_framework.generics import ListAPIView\n from rest_framework.metadata import SimpleMetadata\n from rest_framework.permissions import (\n@@ -9,7 +10,7 @@\n from rest_framework.reverse import reverse\n from rest_framework.views import APIView\n \n-from workshops.models import Badge, Airport, Event, TodoItem\n+from workshops.models import Badge, Airport, Event, TodoItem, Tag\n from workshops.util import get_members, default_membership_cutoff\n \n from .serializers import (\n@@ -134,6 +135,12 @@\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n \n+ tags = self.request.query_params.getlist('tag', None)\n+ if tags:\n+ tags = Tag.objects.filter(name__in=tags)\n+ for tag in tags:\n+ queryset = queryset.filter(tags=tag)\n+\n return queryset\n \n def get_query_params_description(self):\n@@ -141,6 +148,8 @@\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n+ 'tag': \"Events' tag(s). You can use this parameter multiple \"\n+ \"times.\",\n }\n", "issue": "API: add filtering by workshop type in published events\nWe need to grab DC-only, or SWC-only published events. Probably there's no need to return type of the event in the structure, just filtering.\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom workshops.models import Badge, Airport, Person, Event, TodoItem\n\n\nclass PersonUsernameSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n user = serializers.CharField(source='username')\n\n class Meta:\n model = Person\n fields = ('name', 'user', )\n\n\nclass PersonNameEmailSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n\n class Meta:\n model = Person\n fields = ('name', 'email')\n\n\nclass ExportBadgesSerializer(serializers.ModelSerializer):\n persons = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Badge\n fields = ('name', 'persons')\n\n\nclass ExportInstructorLocationsSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='fullname')\n instructors = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Airport\n fields = ('name', 'latitude', 'longitude', 'instructors', 'country')\n\n\nclass EventSerializer(serializers.ModelSerializer):\n humandate = serializers.SerializerMethodField()\n country = serializers.CharField()\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n eventbrite_id = serializers.CharField(source='reg_key')\n\n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n return EventSerializer.human_readable_date(obj.start, obj.end)\n\n @staticmethod\n def human_readable_date(date1, date2):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n if date1 and not date2:\n return '{:%b %d, %Y}-???'.format(date1)\n elif date2 and not date1:\n return '???-{:%b %d, %Y}'.format(date2)\n elif not date2 and not date1:\n return '???-???'\n\n if date1.year == date2.year:\n if date1.month == date2.month:\n return '{:%b %d}-{:%d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)\n\n class Meta:\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',\n )\n\n\nclass TodoSerializer(serializers.ModelSerializer):\n content = serializers.SerializerMethodField()\n start = serializers.DateField(format=None, source='due')\n\n class Meta:\n model = TodoItem\n fields = (\n 'content', 'start',\n )\n\n def get_content(self, obj):\n \"\"\"Return HTML containing interesting information for admins. This\n will be displayed on labels in the timeline.\"\"\"\n\n return '<a href=\"{url}\">{event}</a><br><small>{todo}</small>'.format(\n url=obj.event.get_absolute_url(),\n event=obj.event.get_ident(),\n todo=obj.title,\n )\n", "path": "api/serializers.py"}, {"content": "import datetime\n\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import (\n IsAuthenticatedOrReadOnly, IsAuthenticated\n)\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event, TodoItem\nfrom workshops.util import get_members, default_membership_cutoff\n\nfrom .serializers import (\n PersonNameEmailSerializer,\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n TodoSerializer,\n)\n\n\nclass QueryMetadata(SimpleMetadata):\n \"\"\"Additionally include info about query parameters.\"\"\"\n\n def determine_metadata(self, request, view):\n data = super().determine_metadata(request, view)\n\n try:\n data['query_params'] = view.get_query_params_description()\n except AttributeError:\n pass\n\n return data\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'export-members': reverse('api:export-members', request=request,\n format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n 'user-todos': reverse('api:user-todos',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass ExportMembersView(ListAPIView):\n \"\"\"Show everyone who qualifies as an SCF member.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = PersonNameEmailSerializer\n\n def get_queryset(self):\n earliest_default, latest_default = default_membership_cutoff()\n\n earliest = self.request.query_params.get('earliest', None)\n if earliest is not None:\n try:\n earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \\\n .date()\n except ValueError:\n earliest = earliest_default\n else:\n earliest = earliest_default\n\n latest = self.request.query_params.get('latest', None)\n if latest is not None:\n try:\n latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()\n except ValueError:\n latest = latest_default\n else:\n latest = latest_default\n\n return get_members(earliest, latest)\n\n def get_query_params_description(self):\n return {\n 'earliest': 'Date of earliest workshop someone taught at.'\n ' Defaults to -2*365 days from current date.',\n 'latest': 'Date of latest workshop someone taught at.'\n ' Defaults to current date.',\n }\n\n\nclass PublishedEvents(ListAPIView):\n \"\"\"List published events.\"\"\"\n\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n\n metadata_class = QueryMetadata\n\n def get_queryset(self):\n \"\"\"Optionally restrict the returned event set to events hosted by\n specific host or administered by specific admin.\"\"\"\n queryset = Event.objects.published_events()\n\n administrator = self.request.query_params.get('administrator', None)\n if administrator is not None:\n queryset = queryset.filter(administrator__pk=administrator)\n\n host = self.request.query_params.get('host', None)\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n\n return queryset\n\n def get_query_params_description(self):\n return {\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n }\n\n\nclass UserTodoItems(ListAPIView):\n permission_classes = (IsAuthenticated, )\n paginator = None\n serializer_class = TodoSerializer\n\n def get_queryset(self):\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n .select_related('event')\n", "path": "api/views.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom workshops.models import Badge, Airport, Person, Event, TodoItem, Tag\n\n\nclass PersonUsernameSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n user = serializers.CharField(source='username')\n\n class Meta:\n model = Person\n fields = ('name', 'user', )\n\n\nclass PersonNameEmailSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='get_full_name')\n\n class Meta:\n model = Person\n fields = ('name', 'email')\n\n\nclass ExportBadgesSerializer(serializers.ModelSerializer):\n persons = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Badge\n fields = ('name', 'persons')\n\n\nclass ExportInstructorLocationsSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source='fullname')\n instructors = PersonUsernameSerializer(many=True, source='person_set')\n\n class Meta:\n model = Airport\n fields = ('name', 'latitude', 'longitude', 'instructors', 'country')\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ('name', )\n\n\nclass EventSerializer(serializers.ModelSerializer):\n humandate = serializers.SerializerMethodField()\n country = serializers.CharField()\n start = serializers.DateField(format=None)\n end = serializers.DateField(format=None)\n url = serializers.URLField(source='website_url')\n eventbrite_id = serializers.CharField(source='reg_key')\n tags = TagSerializer(many=True)\n\n def get_humandate(self, obj):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n return EventSerializer.human_readable_date(obj.start, obj.end)\n\n @staticmethod\n def human_readable_date(date1, date2):\n \"\"\"Render start and end dates as human-readable short date.\"\"\"\n if date1 and not date2:\n return '{:%b %d, %Y}-???'.format(date1)\n elif date2 and not date1:\n return '???-{:%b %d, %Y}'.format(date2)\n elif not date2 and not date1:\n return '???-???'\n\n if date1.year == date2.year:\n if date1.month == date2.month:\n return '{:%b %d}-{:%d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d}-{:%b %d, %Y}'.format(date1, date2)\n else:\n return '{:%b %d, %Y}-{:%b %d, %Y}'.format(date1, date2)\n\n class Meta:\n model = Event\n fields = (\n 'slug', 'start', 'end', 'url', 'humandate', 'contact', 'country',\n 'venue', 'address', 'latitude', 'longitude', 'eventbrite_id',\n 'tags',\n )\n\n\nclass TodoSerializer(serializers.ModelSerializer):\n content = serializers.SerializerMethodField()\n start = serializers.DateField(format=None, source='due')\n\n class Meta:\n model = TodoItem\n fields = (\n 'content', 'start',\n )\n\n def get_content(self, obj):\n \"\"\"Return HTML containing interesting information for admins. This\n will be displayed on labels in the timeline.\"\"\"\n\n return '<a href=\"{url}\">{event}</a><br><small>{todo}</small>'.format(\n url=obj.event.get_absolute_url(),\n event=obj.event.get_ident(),\n todo=obj.title,\n )\n", "path": "api/serializers.py"}, {"content": "import datetime\n\nfrom django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import (\n IsAuthenticatedOrReadOnly, IsAuthenticated\n)\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event, TodoItem, Tag\nfrom workshops.util import get_members, default_membership_cutoff\n\nfrom .serializers import (\n PersonNameEmailSerializer,\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n TodoSerializer,\n)\n\n\nclass QueryMetadata(SimpleMetadata):\n \"\"\"Additionally include info about query parameters.\"\"\"\n\n def determine_metadata(self, request, view):\n data = super().determine_metadata(request, view)\n\n try:\n data['query_params'] = view.get_query_params_description()\n except AttributeError:\n pass\n\n return data\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'export-members': reverse('api:export-members', request=request,\n format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n 'user-todos': reverse('api:user-todos',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass ExportMembersView(ListAPIView):\n \"\"\"Show everyone who qualifies as an SCF member.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = PersonNameEmailSerializer\n\n def get_queryset(self):\n earliest_default, latest_default = default_membership_cutoff()\n\n earliest = self.request.query_params.get('earliest', None)\n if earliest is not None:\n try:\n earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \\\n .date()\n except ValueError:\n earliest = earliest_default\n else:\n earliest = earliest_default\n\n latest = self.request.query_params.get('latest', None)\n if latest is not None:\n try:\n latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()\n except ValueError:\n latest = latest_default\n else:\n latest = latest_default\n\n return get_members(earliest, latest)\n\n def get_query_params_description(self):\n return {\n 'earliest': 'Date of earliest workshop someone taught at.'\n ' Defaults to -2*365 days from current date.',\n 'latest': 'Date of latest workshop someone taught at.'\n ' Defaults to current date.',\n }\n\n\nclass PublishedEvents(ListAPIView):\n \"\"\"List published events.\"\"\"\n\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n\n metadata_class = QueryMetadata\n\n def get_queryset(self):\n \"\"\"Optionally restrict the returned event set to events hosted by\n specific host or administered by specific admin.\"\"\"\n queryset = Event.objects.published_events()\n\n administrator = self.request.query_params.get('administrator', None)\n if administrator is not None:\n queryset = queryset.filter(administrator__pk=administrator)\n\n host = self.request.query_params.get('host', None)\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n\n tags = self.request.query_params.getlist('tag', None)\n if tags:\n tags = Tag.objects.filter(name__in=tags)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n\n return queryset\n\n def get_query_params_description(self):\n return {\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n 'tag': \"Events' tag(s). You can use this parameter multiple \"\n \"times.\",\n }\n\n\nclass UserTodoItems(ListAPIView):\n permission_classes = (IsAuthenticated, )\n paginator = None\n serializer_class = TodoSerializer\n\n def get_queryset(self):\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n .select_related('event')\n", "path": "api/views.py"}]}
2,586
626
gh_patches_debug_29591
rasdani/github-patches
git_diff
cupy__cupy-1374
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- matrix power is missing from cupy.linalg. There is a corresponding `numpy.linalg.matrix_power` in NumPy. Is there a reason why this has not made it into CuPy? If it helps, I am willing to take a shot at this one, though I might need a bit of hand-holding as I've not much experience with Cython code. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/linalg/__init__.py` Content: ``` 1 # Functions from the following NumPy document 2 # https://docs.scipy.org/doc/numpy/reference/routines.linalg.html 3 4 # "NOQA" to suppress flake8 warning 5 from cupy.linalg import decomposition # NOQA 6 from cupy.linalg import eigenvalue # NOQA 7 from cupy.linalg import einsum # NOQA 8 from cupy.linalg import norms # NOQA 9 from cupy.linalg.norms import det # NOQA 10 from cupy.linalg.norms import matrix_rank # NOQA 11 from cupy.linalg.norms import norm # NOQA 12 from cupy.linalg.norms import slogdet # NOQA 13 from cupy.linalg import product # NOQA 14 from cupy.linalg import solve # NOQA 15 16 from cupy.linalg.decomposition import cholesky # NOQA 17 from cupy.linalg.decomposition import qr # NOQA 18 from cupy.linalg.decomposition import svd # NOQA 19 20 from cupy.linalg.eigenvalue import eigh # NOQA 21 from cupy.linalg.eigenvalue import eigvalsh # NOQA 22 23 from cupy.linalg.solve import inv # NOQA 24 from cupy.linalg.solve import pinv # NOQA 25 from cupy.linalg.solve import solve # NOQA 26 from cupy.linalg.solve import tensorinv # NOQA 27 from cupy.linalg.solve import tensorsolve # NOQA 28 ``` Path: `cupy/linalg/product.py` Content: ``` 1 import collections 2 3 import numpy 4 import six 5 6 import cupy 7 from cupy import core 8 from cupy import internal 9 10 11 matmul = core.matmul 12 13 14 def dot(a, b, out=None): 15 """Returns a dot product of two arrays. 16 17 For arrays with more than one axis, it computes the dot product along the 18 last axis of ``a`` and the second-to-last axis of ``b``. This is just a 19 matrix product if the both arrays are 2-D. For 1-D arrays, it uses their 20 unique axis as an axis to take dot product over. 21 22 Args: 23 a (cupy.ndarray): The left argument. 24 b (cupy.ndarray): The right argument. 25 out (cupy.ndarray): Output array. 26 27 Returns: 28 cupy.ndarray: The dot product of ``a`` and ``b``. 29 30 .. seealso:: :func:`numpy.dot` 31 32 """ 33 # TODO(okuta): check type 34 return a.dot(b, out) 35 36 37 def vdot(a, b): 38 """Returns the dot product of two vectors. 39 40 The input arrays are flattened into 1-D vectors and then it performs inner 41 product of these vectors. 42 43 Args: 44 a (cupy.ndarray): The first argument. 45 b (cupy.ndarray): The second argument. 46 47 Returns: 48 cupy.ndarray: Zero-dimensional array of the dot product result. 49 50 .. seealso:: :func:`numpy.vdot` 51 52 """ 53 if a.size != b.size: 54 raise ValueError('Axis dimension mismatch') 55 if a.dtype.kind == 'c': 56 a = a.conj() 57 58 return core.tensordot_core(a, b, None, 1, 1, a.size, ()) 59 60 61 def inner(a, b): 62 """Returns the inner product of two arrays. 63 64 It uses the last axis of each argument to take sum product. 65 66 Args: 67 a (cupy.ndarray): The first argument. 68 b (cupy.ndarray): The second argument. 69 70 Returns: 71 cupy.ndarray: The inner product of ``a`` and ``b``. 72 73 .. seealso:: :func:`numpy.inner` 74 75 """ 76 a_ndim = a.ndim 77 b_ndim = b.ndim 78 if a_ndim == 0 or b_ndim == 0: 79 return cupy.multiply(a, b) 80 81 a_axis = a_ndim - 1 82 b_axis = b_ndim - 1 83 84 if a.shape[-1] != b.shape[-1]: 85 raise ValueError('Axis dimension mismatch') 86 87 if a_axis: 88 a = cupy.rollaxis(a, a_axis, 0) 89 if b_axis: 90 b = cupy.rollaxis(b, b_axis, 0) 91 92 ret_shape = a.shape[1:] + b.shape[1:] 93 94 k = a.shape[0] 95 n = a.size // k 96 m = b.size // k 97 98 return core.tensordot_core(a, b, None, n, m, k, ret_shape) 99 100 101 def outer(a, b, out=None): 102 """Returns the outer product of two vectors. 103 104 The input arrays are flattened into 1-D vectors and then it performs outer 105 product of these vectors. 106 107 Args: 108 a (cupy.ndarray): The first argument. 109 b (cupy.ndarray): The second argument. 110 out (cupy.ndarray): Output array. 111 112 Returns: 113 cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``. 114 115 .. seealso:: :func:`numpy.outer` 116 117 """ 118 n = a.size 119 m = b.size 120 ret_shape = (n, m) 121 122 if out is None: 123 return core.tensordot_core(a, b, None, n, m, 1, ret_shape) 124 125 if out.size != n * m: 126 raise ValueError('Output array has an invalid size') 127 if out.flags.c_contiguous: 128 return core.tensordot_core(a, b, out, n, m, 1, ret_shape) 129 else: 130 out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape) 131 return out 132 133 134 def tensordot(a, b, axes=2): 135 """Returns the tensor dot product of two arrays along specified axes. 136 137 This is equivalent to compute dot product along the specified axes which 138 are treated as one axis by reshaping. 139 140 Args: 141 a (cupy.ndarray): The first argument. 142 b (cupy.ndarray): The second argument. 143 axes: 144 - If it is an integer, then ``axes`` axes at the last of ``a`` and 145 the first of ``b`` are used. 146 - If it is a pair of sequences of integers, then these two 147 sequences specify the list of axes for ``a`` and ``b``. The 148 corresponding axes are paired for sum-product. 149 150 Returns: 151 cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the 152 axes specified by ``axes``. 153 154 .. seealso:: :func:`numpy.tensordot` 155 156 """ 157 a_ndim = a.ndim 158 b_ndim = b.ndim 159 if a_ndim == 0 or b_ndim == 0: 160 if axes != 0 and axes != ((), ()): 161 raise ValueError('An input is zero-dim while axes has dimensions') 162 return cupy.multiply(a, b) 163 164 if isinstance(axes, collections.Sequence): 165 if len(axes) != 2: 166 raise ValueError('Axes must consist of two arrays.') 167 a_axes, b_axes = axes 168 if numpy.isscalar(a_axes): 169 a_axes = a_axes, 170 if numpy.isscalar(b_axes): 171 b_axes = b_axes, 172 else: 173 a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim)) 174 b_axes = tuple(six.moves.range(axes)) 175 176 sum_ndim = len(a_axes) 177 if sum_ndim != len(b_axes): 178 raise ValueError('Axes length mismatch') 179 180 for a_axis, b_axis in zip(a_axes, b_axes): 181 if a.shape[a_axis] != b.shape[b_axis]: 182 raise ValueError('Axis dimension mismatch') 183 184 # Make the axes non-negative 185 a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes]) 186 b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes]) 187 188 ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:] 189 190 k = internal.prod(a.shape[:sum_ndim]) 191 n = a.size // k 192 m = b.size // k 193 194 return core.tensordot_core(a, b, None, n, m, k, ret_shape) 195 196 197 # TODO(okuta): Implement matrix_power 198 199 200 def kron(a, b): 201 """Returns the kronecker product of two arrays. 202 203 Args: 204 a (~cupy.ndarray): The first argument. 205 b (~cupy.ndarray): The second argument. 206 207 Returns: 208 ~cupy.ndarray: Output array. 209 210 .. seealso:: :func:`numpy.kron` 211 212 """ 213 a_ndim = a.ndim 214 b_ndim = b.ndim 215 if a_ndim == 0 or b_ndim == 0: 216 return cupy.multiply(a, b) 217 218 ndim = b_ndim 219 a_shape = a.shape 220 b_shape = b.shape 221 if a_ndim != b_ndim: 222 if b_ndim > a_ndim: 223 a_shape = (1,) * (b_ndim - a_ndim) + a_shape 224 else: 225 b_shape = (1,) * (a_ndim - b_ndim) + b_shape 226 ndim = a_ndim 227 228 axis = ndim - 1 229 out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape) 230 for _ in six.moves.range(ndim): 231 out = core.concatenate_method(out, axis=axis) 232 233 return out 234 235 236 def _move_axes_to_head(a, axes): 237 # This function moves the axes of ``s`` to the head of the shape. 238 for idx, axis in enumerate(axes): 239 if idx != axis: 240 break 241 else: 242 return a 243 244 return a.transpose( 245 axes + [i for i in six.moves.range(a.ndim) if i not in axes]) 246 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/linalg/__init__.py b/cupy/linalg/__init__.py --- a/cupy/linalg/__init__.py +++ b/cupy/linalg/__init__.py @@ -25,3 +25,5 @@ from cupy.linalg.solve import solve # NOQA from cupy.linalg.solve import tensorinv # NOQA from cupy.linalg.solve import tensorsolve # NOQA + +from cupy.linalg.product import matrix_power # NOQA diff --git a/cupy/linalg/product.py b/cupy/linalg/product.py --- a/cupy/linalg/product.py +++ b/cupy/linalg/product.py @@ -7,6 +7,7 @@ from cupy import core from cupy import internal +from cupy.linalg.solve import inv matmul = core.matmul @@ -194,7 +195,49 @@ return core.tensordot_core(a, b, None, n, m, k, ret_shape) -# TODO(okuta): Implement matrix_power +def matrix_power(M, n): + """Raise a square matrix to the (integer) power `n`. + + Args: + M (~cupy.ndarray): Matrix to raise by power n. + n (~int): Power to raise matrix to. + + Returns: + ~cupy.ndarray: Output array. + + .. note:: M must be of dtype `float32` or `float64`. + + ..seealso:: :func:`numpy.linalg.matrix_power` + """ + if M.ndim != 2 or M.shape[0] != M.shape[1]: + raise ValueError("input must be a square array") + if not isinstance(n, six.integer_types): + raise TypeError("exponent must be an integer") + + if n == 0: + return cupy.identity(M.shape[0], dtype=M.dtype) + elif n < 0: + M = inv(M) + n *= -1 + + # short-cuts + if n <= 3: + if n == 1: + return M + elif n == 2: + return cupy.matmul(M, M) + else: + return cupy.matmul(cupy.matmul(M, M), M) + + # binary decomposition to reduce the number of Matrix + # multiplications for n > 3. + result, Z = None, None + for b in cupy.binary_repr(n)[::-1]: + Z = M if Z is None else cupy.matmul(Z, Z) + if b == '1': + result = Z if result is None else cupy.matmul(result, Z) + + return result def kron(a, b):
{"golden_diff": "diff --git a/cupy/linalg/__init__.py b/cupy/linalg/__init__.py\n--- a/cupy/linalg/__init__.py\n+++ b/cupy/linalg/__init__.py\n@@ -25,3 +25,5 @@\n from cupy.linalg.solve import solve # NOQA\n from cupy.linalg.solve import tensorinv # NOQA\n from cupy.linalg.solve import tensorsolve # NOQA\n+\n+from cupy.linalg.product import matrix_power # NOQA\ndiff --git a/cupy/linalg/product.py b/cupy/linalg/product.py\n--- a/cupy/linalg/product.py\n+++ b/cupy/linalg/product.py\n@@ -7,6 +7,7 @@\n from cupy import core\n from cupy import internal\n \n+from cupy.linalg.solve import inv\n \n matmul = core.matmul\n \n@@ -194,7 +195,49 @@\n return core.tensordot_core(a, b, None, n, m, k, ret_shape)\n \n \n-# TODO(okuta): Implement matrix_power\n+def matrix_power(M, n):\n+ \"\"\"Raise a square matrix to the (integer) power `n`.\n+\n+ Args:\n+ M (~cupy.ndarray): Matrix to raise by power n.\n+ n (~int): Power to raise matrix to.\n+\n+ Returns:\n+ ~cupy.ndarray: Output array.\n+\n+ .. note:: M must be of dtype `float32` or `float64`.\n+\n+ ..seealso:: :func:`numpy.linalg.matrix_power`\n+ \"\"\"\n+ if M.ndim != 2 or M.shape[0] != M.shape[1]:\n+ raise ValueError(\"input must be a square array\")\n+ if not isinstance(n, six.integer_types):\n+ raise TypeError(\"exponent must be an integer\")\n+\n+ if n == 0:\n+ return cupy.identity(M.shape[0], dtype=M.dtype)\n+ elif n < 0:\n+ M = inv(M)\n+ n *= -1\n+\n+ # short-cuts\n+ if n <= 3:\n+ if n == 1:\n+ return M\n+ elif n == 2:\n+ return cupy.matmul(M, M)\n+ else:\n+ return cupy.matmul(cupy.matmul(M, M), M)\n+\n+ # binary decomposition to reduce the number of Matrix\n+ # multiplications for n > 3.\n+ result, Z = None, None\n+ for b in cupy.binary_repr(n)[::-1]:\n+ Z = M if Z is None else cupy.matmul(Z, Z)\n+ if b == '1':\n+ result = Z if result is None else cupy.matmul(result, Z)\n+\n+ return result\n \n \n def kron(a, b):\n", "issue": "matrix power is missing from cupy.linalg.\nThere is a corresponding `numpy.linalg.matrix_power` in NumPy. Is there a reason why this has not made it into CuPy?\r\n\r\nIf it helps, I am willing to take a shot at this one, though I might need a bit of hand-holding as I've not much experience with Cython code.\n", "before_files": [{"content": "# Functions from the following NumPy document\n# https://docs.scipy.org/doc/numpy/reference/routines.linalg.html\n\n# \"NOQA\" to suppress flake8 warning\nfrom cupy.linalg import decomposition # NOQA\nfrom cupy.linalg import eigenvalue # NOQA\nfrom cupy.linalg import einsum # NOQA\nfrom cupy.linalg import norms # NOQA\nfrom cupy.linalg.norms import det # NOQA\nfrom cupy.linalg.norms import matrix_rank # NOQA\nfrom cupy.linalg.norms import norm # NOQA\nfrom cupy.linalg.norms import slogdet # NOQA\nfrom cupy.linalg import product # NOQA\nfrom cupy.linalg import solve # NOQA\n\nfrom cupy.linalg.decomposition import cholesky # NOQA\nfrom cupy.linalg.decomposition import qr # NOQA\nfrom cupy.linalg.decomposition import svd # NOQA\n\nfrom cupy.linalg.eigenvalue import eigh # NOQA\nfrom cupy.linalg.eigenvalue import eigvalsh # NOQA\n\nfrom cupy.linalg.solve import inv # NOQA\nfrom cupy.linalg.solve import pinv # NOQA\nfrom cupy.linalg.solve import solve # NOQA\nfrom cupy.linalg.solve import tensorinv # NOQA\nfrom cupy.linalg.solve import tensorsolve # NOQA\n", "path": "cupy/linalg/__init__.py"}, {"content": "import collections\n\nimport numpy\nimport six\n\nimport cupy\nfrom cupy import core\nfrom cupy import internal\n\n\nmatmul = core.matmul\n\n\ndef dot(a, b, out=None):\n \"\"\"Returns a dot product of two arrays.\n\n For arrays with more than one axis, it computes the dot product along the\n last axis of ``a`` and the second-to-last axis of ``b``. This is just a\n matrix product if the both arrays are 2-D. For 1-D arrays, it uses their\n unique axis as an axis to take dot product over.\n\n Args:\n a (cupy.ndarray): The left argument.\n b (cupy.ndarray): The right argument.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The dot product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.dot`\n\n \"\"\"\n # TODO(okuta): check type\n return a.dot(b, out)\n\n\ndef vdot(a, b):\n \"\"\"Returns the dot product of two vectors.\n\n The input arrays are flattened into 1-D vectors and then it performs inner\n product of these vectors.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n\n Returns:\n cupy.ndarray: Zero-dimensional array of the dot product result.\n\n .. seealso:: :func:`numpy.vdot`\n\n \"\"\"\n if a.size != b.size:\n raise ValueError('Axis dimension mismatch')\n if a.dtype.kind == 'c':\n a = a.conj()\n\n return core.tensordot_core(a, b, None, 1, 1, a.size, ())\n\n\ndef inner(a, b):\n \"\"\"Returns the inner product of two arrays.\n\n It uses the last axis of each argument to take sum product.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n\n Returns:\n cupy.ndarray: The inner product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.inner`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n return cupy.multiply(a, b)\n\n a_axis = a_ndim - 1\n b_axis = b_ndim - 1\n\n if a.shape[-1] != b.shape[-1]:\n raise ValueError('Axis dimension mismatch')\n\n if a_axis:\n a = cupy.rollaxis(a, a_axis, 0)\n if b_axis:\n b = cupy.rollaxis(b, b_axis, 0)\n\n ret_shape = a.shape[1:] + b.shape[1:]\n\n k = a.shape[0]\n n = a.size // k\n m = b.size // k\n\n return core.tensordot_core(a, b, None, n, m, k, ret_shape)\n\n\ndef outer(a, b, out=None):\n \"\"\"Returns the outer product of two vectors.\n\n The input arrays are flattened into 1-D vectors and then it performs outer\n product of these vectors.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.outer`\n\n \"\"\"\n n = a.size\n m = b.size\n ret_shape = (n, m)\n\n if out is None:\n return core.tensordot_core(a, b, None, n, m, 1, ret_shape)\n\n if out.size != n * m:\n raise ValueError('Output array has an invalid size')\n if out.flags.c_contiguous:\n return core.tensordot_core(a, b, out, n, m, 1, ret_shape)\n else:\n out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)\n return out\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"Returns the tensor dot product of two arrays along specified axes.\n\n This is equivalent to compute dot product along the specified axes which\n are treated as one axis by reshaping.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n axes:\n - If it is an integer, then ``axes`` axes at the last of ``a`` and\n the first of ``b`` are used.\n - If it is a pair of sequences of integers, then these two\n sequences specify the list of axes for ``a`` and ``b``. The\n corresponding axes are paired for sum-product.\n\n Returns:\n cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the\n axes specified by ``axes``.\n\n .. seealso:: :func:`numpy.tensordot`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n if axes != 0 and axes != ((), ()):\n raise ValueError('An input is zero-dim while axes has dimensions')\n return cupy.multiply(a, b)\n\n if isinstance(axes, collections.Sequence):\n if len(axes) != 2:\n raise ValueError('Axes must consist of two arrays.')\n a_axes, b_axes = axes\n if numpy.isscalar(a_axes):\n a_axes = a_axes,\n if numpy.isscalar(b_axes):\n b_axes = b_axes,\n else:\n a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))\n b_axes = tuple(six.moves.range(axes))\n\n sum_ndim = len(a_axes)\n if sum_ndim != len(b_axes):\n raise ValueError('Axes length mismatch')\n\n for a_axis, b_axis in zip(a_axes, b_axes):\n if a.shape[a_axis] != b.shape[b_axis]:\n raise ValueError('Axis dimension mismatch')\n\n # Make the axes non-negative\n a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])\n b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])\n\n ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]\n\n k = internal.prod(a.shape[:sum_ndim])\n n = a.size // k\n m = b.size // k\n\n return core.tensordot_core(a, b, None, n, m, k, ret_shape)\n\n\n# TODO(okuta): Implement matrix_power\n\n\ndef kron(a, b):\n \"\"\"Returns the kronecker product of two arrays.\n\n Args:\n a (~cupy.ndarray): The first argument.\n b (~cupy.ndarray): The second argument.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.kron`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n return cupy.multiply(a, b)\n\n ndim = b_ndim\n a_shape = a.shape\n b_shape = b.shape\n if a_ndim != b_ndim:\n if b_ndim > a_ndim:\n a_shape = (1,) * (b_ndim - a_ndim) + a_shape\n else:\n b_shape = (1,) * (a_ndim - b_ndim) + b_shape\n ndim = a_ndim\n\n axis = ndim - 1\n out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)\n for _ in six.moves.range(ndim):\n out = core.concatenate_method(out, axis=axis)\n\n return out\n\n\ndef _move_axes_to_head(a, axes):\n # This function moves the axes of ``s`` to the head of the shape.\n for idx, axis in enumerate(axes):\n if idx != axis:\n break\n else:\n return a\n\n return a.transpose(\n axes + [i for i in six.moves.range(a.ndim) if i not in axes])\n", "path": "cupy/linalg/product.py"}], "after_files": [{"content": "# Functions from the following NumPy document\n# https://docs.scipy.org/doc/numpy/reference/routines.linalg.html\n\n# \"NOQA\" to suppress flake8 warning\nfrom cupy.linalg import decomposition # NOQA\nfrom cupy.linalg import eigenvalue # NOQA\nfrom cupy.linalg import einsum # NOQA\nfrom cupy.linalg import norms # NOQA\nfrom cupy.linalg.norms import det # NOQA\nfrom cupy.linalg.norms import matrix_rank # NOQA\nfrom cupy.linalg.norms import norm # NOQA\nfrom cupy.linalg.norms import slogdet # NOQA\nfrom cupy.linalg import product # NOQA\nfrom cupy.linalg import solve # NOQA\n\nfrom cupy.linalg.decomposition import cholesky # NOQA\nfrom cupy.linalg.decomposition import qr # NOQA\nfrom cupy.linalg.decomposition import svd # NOQA\n\nfrom cupy.linalg.eigenvalue import eigh # NOQA\nfrom cupy.linalg.eigenvalue import eigvalsh # NOQA\n\nfrom cupy.linalg.solve import inv # NOQA\nfrom cupy.linalg.solve import pinv # NOQA\nfrom cupy.linalg.solve import solve # NOQA\nfrom cupy.linalg.solve import tensorinv # NOQA\nfrom cupy.linalg.solve import tensorsolve # NOQA\n\nfrom cupy.linalg.product import matrix_power # NOQA\n", "path": "cupy/linalg/__init__.py"}, {"content": "import collections\n\nimport numpy\nimport six\n\nimport cupy\nfrom cupy import core\nfrom cupy import internal\n\nfrom cupy.linalg.solve import inv\n\nmatmul = core.matmul\n\n\ndef dot(a, b, out=None):\n \"\"\"Returns a dot product of two arrays.\n\n For arrays with more than one axis, it computes the dot product along the\n last axis of ``a`` and the second-to-last axis of ``b``. This is just a\n matrix product if the both arrays are 2-D. For 1-D arrays, it uses their\n unique axis as an axis to take dot product over.\n\n Args:\n a (cupy.ndarray): The left argument.\n b (cupy.ndarray): The right argument.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The dot product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.dot`\n\n \"\"\"\n # TODO(okuta): check type\n return a.dot(b, out)\n\n\ndef vdot(a, b):\n \"\"\"Returns the dot product of two vectors.\n\n The input arrays are flattened into 1-D vectors and then it performs inner\n product of these vectors.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n\n Returns:\n cupy.ndarray: Zero-dimensional array of the dot product result.\n\n .. seealso:: :func:`numpy.vdot`\n\n \"\"\"\n if a.size != b.size:\n raise ValueError('Axis dimension mismatch')\n if a.dtype.kind == 'c':\n a = a.conj()\n\n return core.tensordot_core(a, b, None, 1, 1, a.size, ())\n\n\ndef inner(a, b):\n \"\"\"Returns the inner product of two arrays.\n\n It uses the last axis of each argument to take sum product.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n\n Returns:\n cupy.ndarray: The inner product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.inner`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n return cupy.multiply(a, b)\n\n a_axis = a_ndim - 1\n b_axis = b_ndim - 1\n\n if a.shape[-1] != b.shape[-1]:\n raise ValueError('Axis dimension mismatch')\n\n if a_axis:\n a = cupy.rollaxis(a, a_axis, 0)\n if b_axis:\n b = cupy.rollaxis(b, b_axis, 0)\n\n ret_shape = a.shape[1:] + b.shape[1:]\n\n k = a.shape[0]\n n = a.size // k\n m = b.size // k\n\n return core.tensordot_core(a, b, None, n, m, k, ret_shape)\n\n\ndef outer(a, b, out=None):\n \"\"\"Returns the outer product of two vectors.\n\n The input arrays are flattened into 1-D vectors and then it performs outer\n product of these vectors.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.\n\n .. seealso:: :func:`numpy.outer`\n\n \"\"\"\n n = a.size\n m = b.size\n ret_shape = (n, m)\n\n if out is None:\n return core.tensordot_core(a, b, None, n, m, 1, ret_shape)\n\n if out.size != n * m:\n raise ValueError('Output array has an invalid size')\n if out.flags.c_contiguous:\n return core.tensordot_core(a, b, out, n, m, 1, ret_shape)\n else:\n out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)\n return out\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"Returns the tensor dot product of two arrays along specified axes.\n\n This is equivalent to compute dot product along the specified axes which\n are treated as one axis by reshaping.\n\n Args:\n a (cupy.ndarray): The first argument.\n b (cupy.ndarray): The second argument.\n axes:\n - If it is an integer, then ``axes`` axes at the last of ``a`` and\n the first of ``b`` are used.\n - If it is a pair of sequences of integers, then these two\n sequences specify the list of axes for ``a`` and ``b``. The\n corresponding axes are paired for sum-product.\n\n Returns:\n cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the\n axes specified by ``axes``.\n\n .. seealso:: :func:`numpy.tensordot`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n if axes != 0 and axes != ((), ()):\n raise ValueError('An input is zero-dim while axes has dimensions')\n return cupy.multiply(a, b)\n\n if isinstance(axes, collections.Sequence):\n if len(axes) != 2:\n raise ValueError('Axes must consist of two arrays.')\n a_axes, b_axes = axes\n if numpy.isscalar(a_axes):\n a_axes = a_axes,\n if numpy.isscalar(b_axes):\n b_axes = b_axes,\n else:\n a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))\n b_axes = tuple(six.moves.range(axes))\n\n sum_ndim = len(a_axes)\n if sum_ndim != len(b_axes):\n raise ValueError('Axes length mismatch')\n\n for a_axis, b_axis in zip(a_axes, b_axes):\n if a.shape[a_axis] != b.shape[b_axis]:\n raise ValueError('Axis dimension mismatch')\n\n # Make the axes non-negative\n a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])\n b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])\n\n ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]\n\n k = internal.prod(a.shape[:sum_ndim])\n n = a.size // k\n m = b.size // k\n\n return core.tensordot_core(a, b, None, n, m, k, ret_shape)\n\n\ndef matrix_power(M, n):\n \"\"\"Raise a square matrix to the (integer) power `n`.\n\n Args:\n M (~cupy.ndarray): Matrix to raise by power n.\n n (~int): Power to raise matrix to.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. note:: M must be of dtype `float32` or `float64`.\n\n ..seealso:: :func:`numpy.linalg.matrix_power`\n \"\"\"\n if M.ndim != 2 or M.shape[0] != M.shape[1]:\n raise ValueError(\"input must be a square array\")\n if not isinstance(n, six.integer_types):\n raise TypeError(\"exponent must be an integer\")\n\n if n == 0:\n return cupy.identity(M.shape[0], dtype=M.dtype)\n elif n < 0:\n M = inv(M)\n n *= -1\n\n # short-cuts\n if n <= 3:\n if n == 1:\n return M\n elif n == 2:\n return cupy.matmul(M, M)\n else:\n return cupy.matmul(cupy.matmul(M, M), M)\n\n # binary decomposition to reduce the number of Matrix\n # multiplications for n > 3.\n result, Z = None, None\n for b in cupy.binary_repr(n)[::-1]:\n Z = M if Z is None else cupy.matmul(Z, Z)\n if b == '1':\n result = Z if result is None else cupy.matmul(result, Z)\n\n return result\n\n\ndef kron(a, b):\n \"\"\"Returns the kronecker product of two arrays.\n\n Args:\n a (~cupy.ndarray): The first argument.\n b (~cupy.ndarray): The second argument.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.kron`\n\n \"\"\"\n a_ndim = a.ndim\n b_ndim = b.ndim\n if a_ndim == 0 or b_ndim == 0:\n return cupy.multiply(a, b)\n\n ndim = b_ndim\n a_shape = a.shape\n b_shape = b.shape\n if a_ndim != b_ndim:\n if b_ndim > a_ndim:\n a_shape = (1,) * (b_ndim - a_ndim) + a_shape\n else:\n b_shape = (1,) * (a_ndim - b_ndim) + b_shape\n ndim = a_ndim\n\n axis = ndim - 1\n out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)\n for _ in six.moves.range(ndim):\n out = core.concatenate_method(out, axis=axis)\n\n return out\n\n\ndef _move_axes_to_head(a, axes):\n # This function moves the axes of ``s`` to the head of the shape.\n for idx, axis in enumerate(axes):\n if idx != axis:\n break\n else:\n return a\n\n return a.transpose(\n axes + [i for i in six.moves.range(a.ndim) if i not in axes])\n", "path": "cupy/linalg/product.py"}]}
3,201
611
gh_patches_debug_5488
rasdani/github-patches
git_diff
iterative__dvc-7908
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- config: option for `--jobs` value Several DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc). Afaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful. Can you consider adding it? Thanks --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/fs/__init__.py` Content: ``` 1 from urllib.parse import urlparse 2 3 # pylint: disable=unused-import 4 from dvc_objects.fs import utils # noqa: F401 5 from dvc_objects.fs import ( # noqa: F401 6 FS_MAP, 7 AzureFileSystem, 8 GDriveFileSystem, 9 GSFileSystem, 10 HDFSFileSystem, 11 HTTPFileSystem, 12 HTTPSFileSystem, 13 LocalFileSystem, 14 MemoryFileSystem, 15 OSSFileSystem, 16 S3FileSystem, 17 Schemes, 18 SSHFileSystem, 19 WebDAVFileSystem, 20 WebDAVSFileSystem, 21 WebHDFSFileSystem, 22 generic, 23 get_fs_cls, 24 system, 25 ) 26 from dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401 27 from dvc_objects.fs.errors import ( # noqa: F401 28 AuthError, 29 ConfigError, 30 RemoteMissingDepsError, 31 ) 32 from dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401 33 from dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401 34 from dvc_objects.fs.implementations.local import localfs # noqa: F401 35 from dvc_objects.fs.implementations.ssh import ( # noqa: F401 36 DEFAULT_PORT as DEFAULT_SSH_PORT, 37 ) 38 from dvc_objects.fs.path import Path # noqa: F401 39 40 from .data import DataFileSystem # noqa: F401 41 from .dvc import DvcFileSystem # noqa: F401 42 from .git import GitFileSystem # noqa: F401 43 44 # pylint: enable=unused-import 45 46 47 def get_fs_config(repo, config, **kwargs): 48 name = kwargs.get("name") 49 if name: 50 try: 51 remote_conf = config["remote"][name.lower()] 52 except KeyError: 53 from dvc.config import RemoteNotFoundError 54 55 raise RemoteNotFoundError(f"remote '{name}' doesn't exist") 56 else: 57 remote_conf = kwargs 58 return _resolve_remote_refs(repo, config, remote_conf) 59 60 61 def _resolve_remote_refs(repo, config, remote_conf): 62 # Support for cross referenced remotes. 63 # This will merge the settings, shadowing base ref with remote_conf. 64 # For example, having: 65 # 66 # dvc remote add server ssh://localhost 67 # dvc remote modify server user root 68 # dvc remote modify server ask_password true 69 # 70 # dvc remote add images remote://server/tmp/pictures 71 # dvc remote modify images user alice 72 # dvc remote modify images ask_password false 73 # dvc remote modify images password asdf1234 74 # 75 # Results on a config dictionary like: 76 # 77 # { 78 # "url": "ssh://localhost/tmp/pictures", 79 # "user": "alice", 80 # "password": "asdf1234", 81 # "ask_password": False, 82 # } 83 parsed = urlparse(remote_conf["url"]) 84 if parsed.scheme != "remote": 85 return remote_conf 86 87 base = get_fs_config(repo, config, name=parsed.netloc) 88 cls, _, _ = get_cloud_fs(repo, **base) 89 relpath = parsed.path.lstrip("/").replace("/", cls.sep) 90 url = cls.sep.join((base["url"], relpath)) 91 return {**base, **remote_conf, "url": url} 92 93 94 def get_cloud_fs(repo, **kwargs): 95 from dvc.config import ConfigError as RepoConfigError 96 from dvc.config_schema import SCHEMA, Invalid 97 98 repo_config = repo.config if repo else {} 99 core_config = repo_config.get("core", {}) 100 101 remote_conf = get_fs_config(repo, repo_config, **kwargs) 102 try: 103 remote_conf = SCHEMA["remote"][str](remote_conf) 104 except Invalid as exc: 105 raise RepoConfigError(str(exc)) from None 106 107 if "jobs" not in remote_conf: 108 jobs = core_config.get("jobs") 109 if jobs: 110 remote_conf["jobs"] = jobs 111 112 if "checksum_jobs" not in remote_conf: 113 checksum_jobs = core_config.get("checksum_jobs") 114 if checksum_jobs: 115 remote_conf["checksum_jobs"] = checksum_jobs 116 117 cls = get_fs_cls(remote_conf) 118 119 if cls == GDriveFileSystem and repo: 120 remote_conf["gdrive_credentials_tmp_dir"] = repo.tmp_dir 121 122 url = remote_conf.pop("url") 123 if issubclass(cls, WebDAVFileSystem): 124 # For WebDAVFileSystem, provided url is the base path itself, so it 125 # should be treated as being a root path. 126 fs_path = cls.root_marker 127 else: 128 fs_path = cls._strip_protocol(url) # pylint:disable=protected-access 129 130 extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access 131 conf = {**extras, **remote_conf} # remote config takes priority 132 return cls, conf, fs_path 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py --- a/dvc/fs/__init__.py +++ b/dvc/fs/__init__.py @@ -104,11 +104,6 @@ except Invalid as exc: raise RepoConfigError(str(exc)) from None - if "jobs" not in remote_conf: - jobs = core_config.get("jobs") - if jobs: - remote_conf["jobs"] = jobs - if "checksum_jobs" not in remote_conf: checksum_jobs = core_config.get("checksum_jobs") if checksum_jobs:
{"golden_diff": "diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py\n--- a/dvc/fs/__init__.py\n+++ b/dvc/fs/__init__.py\n@@ -104,11 +104,6 @@\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n \n- if \"jobs\" not in remote_conf:\n- jobs = core_config.get(\"jobs\")\n- if jobs:\n- remote_conf[\"jobs\"] = jobs\n-\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n", "issue": "config: option for `--jobs` value\nSeveral DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc).\r\n\r\nAfaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful.\r\n\r\nCan you consider adding it?\r\n\r\nThanks\n", "before_files": [{"content": "from urllib.parse import urlparse\n\n# pylint: disable=unused-import\nfrom dvc_objects.fs import utils # noqa: F401\nfrom dvc_objects.fs import ( # noqa: F401\n FS_MAP,\n AzureFileSystem,\n GDriveFileSystem,\n GSFileSystem,\n HDFSFileSystem,\n HTTPFileSystem,\n HTTPSFileSystem,\n LocalFileSystem,\n MemoryFileSystem,\n OSSFileSystem,\n S3FileSystem,\n Schemes,\n SSHFileSystem,\n WebDAVFileSystem,\n WebDAVSFileSystem,\n WebHDFSFileSystem,\n generic,\n get_fs_cls,\n system,\n)\nfrom dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401\nfrom dvc_objects.fs.errors import ( # noqa: F401\n AuthError,\n ConfigError,\n RemoteMissingDepsError,\n)\nfrom dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.local import localfs # noqa: F401\nfrom dvc_objects.fs.implementations.ssh import ( # noqa: F401\n DEFAULT_PORT as DEFAULT_SSH_PORT,\n)\nfrom dvc_objects.fs.path import Path # noqa: F401\n\nfrom .data import DataFileSystem # noqa: F401\nfrom .dvc import DvcFileSystem # noqa: F401\nfrom .git import GitFileSystem # noqa: F401\n\n# pylint: enable=unused-import\n\n\ndef get_fs_config(repo, config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n try:\n remote_conf = config[\"remote\"][name.lower()]\n except KeyError:\n from dvc.config import RemoteNotFoundError\n\n raise RemoteNotFoundError(f\"remote '{name}' doesn't exist\")\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, config, remote_conf)\n\n\ndef _resolve_remote_refs(repo, config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_fs_config(repo, config, name=parsed.netloc)\n cls, _, _ = get_cloud_fs(repo, **base)\n relpath = parsed.path.lstrip(\"/\").replace(\"/\", cls.sep)\n url = cls.sep.join((base[\"url\"], relpath))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_fs(repo, **kwargs):\n from dvc.config import ConfigError as RepoConfigError\n from dvc.config_schema import SCHEMA, Invalid\n\n repo_config = repo.config if repo else {}\n core_config = repo_config.get(\"core\", {})\n\n remote_conf = get_fs_config(repo, repo_config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n\n if \"jobs\" not in remote_conf:\n jobs = core_config.get(\"jobs\")\n if jobs:\n remote_conf[\"jobs\"] = jobs\n\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n remote_conf[\"checksum_jobs\"] = checksum_jobs\n\n cls = get_fs_cls(remote_conf)\n\n if cls == GDriveFileSystem and repo:\n remote_conf[\"gdrive_credentials_tmp_dir\"] = repo.tmp_dir\n\n url = remote_conf.pop(\"url\")\n if issubclass(cls, WebDAVFileSystem):\n # For WebDAVFileSystem, provided url is the base path itself, so it\n # should be treated as being a root path.\n fs_path = cls.root_marker\n else:\n fs_path = cls._strip_protocol(url) # pylint:disable=protected-access\n\n extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access\n conf = {**extras, **remote_conf} # remote config takes priority\n return cls, conf, fs_path\n", "path": "dvc/fs/__init__.py"}], "after_files": [{"content": "from urllib.parse import urlparse\n\n# pylint: disable=unused-import\nfrom dvc_objects.fs import utils # noqa: F401\nfrom dvc_objects.fs import ( # noqa: F401\n FS_MAP,\n AzureFileSystem,\n GDriveFileSystem,\n GSFileSystem,\n HDFSFileSystem,\n HTTPFileSystem,\n HTTPSFileSystem,\n LocalFileSystem,\n MemoryFileSystem,\n OSSFileSystem,\n S3FileSystem,\n Schemes,\n SSHFileSystem,\n WebDAVFileSystem,\n WebDAVSFileSystem,\n WebHDFSFileSystem,\n generic,\n get_fs_cls,\n system,\n)\nfrom dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401\nfrom dvc_objects.fs.errors import ( # noqa: F401\n AuthError,\n ConfigError,\n RemoteMissingDepsError,\n)\nfrom dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.local import localfs # noqa: F401\nfrom dvc_objects.fs.implementations.ssh import ( # noqa: F401\n DEFAULT_PORT as DEFAULT_SSH_PORT,\n)\nfrom dvc_objects.fs.path import Path # noqa: F401\n\nfrom .data import DataFileSystem # noqa: F401\nfrom .dvc import DvcFileSystem # noqa: F401\nfrom .git import GitFileSystem # noqa: F401\n\n# pylint: enable=unused-import\n\n\ndef get_fs_config(repo, config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n try:\n remote_conf = config[\"remote\"][name.lower()]\n except KeyError:\n from dvc.config import RemoteNotFoundError\n\n raise RemoteNotFoundError(f\"remote '{name}' doesn't exist\")\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, config, remote_conf)\n\n\ndef _resolve_remote_refs(repo, config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_fs_config(repo, config, name=parsed.netloc)\n cls, _, _ = get_cloud_fs(repo, **base)\n relpath = parsed.path.lstrip(\"/\").replace(\"/\", cls.sep)\n url = cls.sep.join((base[\"url\"], relpath))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_fs(repo, **kwargs):\n from dvc.config import ConfigError as RepoConfigError\n from dvc.config_schema import SCHEMA, Invalid\n\n repo_config = repo.config if repo else {}\n core_config = repo_config.get(\"core\", {})\n\n remote_conf = get_fs_config(repo, repo_config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n remote_conf[\"checksum_jobs\"] = checksum_jobs\n\n cls = get_fs_cls(remote_conf)\n\n if cls == GDriveFileSystem and repo:\n remote_conf[\"gdrive_credentials_tmp_dir\"] = repo.tmp_dir\n\n url = remote_conf.pop(\"url\")\n if issubclass(cls, WebDAVFileSystem):\n # For WebDAVFileSystem, provided url is the base path itself, so it\n # should be treated as being a root path.\n fs_path = cls.root_marker\n else:\n fs_path = cls._strip_protocol(url) # pylint:disable=protected-access\n\n extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access\n conf = {**extras, **remote_conf} # remote config takes priority\n return cls, conf, fs_path\n", "path": "dvc/fs/__init__.py"}]}
1,724
136
gh_patches_debug_34627
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2364
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update I1022 to only suggest sub if all values can be in the sub ### CloudFormation Lint Version 0.64.1 ### What operating system are you using? All ### Describe the bug Original feedback provided by @iann0036. Translated to an issue for tracking. ```yaml Fn::Join: - "" - - Fn::Select: - 0 - Fn::Split: - "/" - !Ref MySubnet1CIDR - !Ref MySubnetsCIDRSize ``` ``` I1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter ``` ### Expected behavior Currently the way to make this comply would be ```yaml Fn::Sub: - ${CIDR}${MySubnetsCIDRSize} - CIDR: Fn::Select: - 0 - Fn::Split: - "/" - !Ref MySubnet1CIDR ``` which may not be as optimal ### Reproduction template ```yaml Fn::Join: - "" - - Fn::Select: - 0 - Fn::Split: - "/" - !Ref MySubnet1CIDR - !Ref MySubnetsCIDRSize ```` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/functions/SubNotJoin.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from cfnlint.rules import CloudFormationLintRule, RuleMatch 6 7 8 class SubNotJoin(CloudFormationLintRule): 9 """Check if Join is being used with no join characters""" 10 id = 'I1022' 11 shortdesc = 'Use Sub instead of Join' 12 description = 'Prefer a sub instead of Join when using a join delimiter that is empty' 13 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' 14 tags = ['functions', 'sub', 'join'] 15 16 def match(self, cfn): 17 matches = [] 18 19 join_objs = cfn.search_deep_keys('Fn::Join') 20 21 for join_obj in join_objs: 22 if isinstance(join_obj[-1], list): 23 join_operator = join_obj[-1][0] 24 if isinstance(join_operator, str): 25 if join_operator == '': 26 matches.append(RuleMatch( 27 join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter')) 28 return matches 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py --- a/src/cfnlint/rules/functions/SubNotJoin.py +++ b/src/cfnlint/rules/functions/SubNotJoin.py @@ -7,12 +7,34 @@ class SubNotJoin(CloudFormationLintRule): """Check if Join is being used with no join characters""" + id = 'I1022' shortdesc = 'Use Sub instead of Join' - description = 'Prefer a sub instead of Join when using a join delimiter that is empty' + description = ( + 'Prefer a sub instead of Join when using a join delimiter that is empty' + ) source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' tags = ['functions', 'sub', 'join'] + def _check_element(self, element): + if isinstance(element, dict): + if len(element) == 1: + for key, value in element.items(): + if key in ['Fn::Sub']: + if not isinstance(value, str): + return False + elif key not in ['Ref', 'Fn::GetAtt']: + return False + + return True + + def _check_elements(self, elements): + for element in elements: + if not self._check_element(element): + return False + + return True + def match(self, cfn): matches = [] @@ -21,8 +43,15 @@ for join_obj in join_objs: if isinstance(join_obj[-1], list): join_operator = join_obj[-1][0] + join_elements = join_obj[-1][1] if isinstance(join_operator, str): if join_operator == '': - matches.append(RuleMatch( - join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter')) + if isinstance(join_elements, list): + if self._check_elements(join_elements): + matches.append( + RuleMatch( + join_obj[0:-1], + 'Prefer using Fn::Sub over Fn::Join with an empty delimiter', + ) + ) return matches
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py\n--- a/src/cfnlint/rules/functions/SubNotJoin.py\n+++ b/src/cfnlint/rules/functions/SubNotJoin.py\n@@ -7,12 +7,34 @@\n \n class SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n+\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n- description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ description = (\n+ 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ )\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n \n+ def _check_element(self, element):\n+ if isinstance(element, dict):\n+ if len(element) == 1:\n+ for key, value in element.items():\n+ if key in ['Fn::Sub']:\n+ if not isinstance(value, str):\n+ return False\n+ elif key not in ['Ref', 'Fn::GetAtt']:\n+ return False\n+\n+ return True\n+\n+ def _check_elements(self, elements):\n+ for element in elements:\n+ if not self._check_element(element):\n+ return False\n+\n+ return True\n+\n def match(self, cfn):\n matches = []\n \n@@ -21,8 +43,15 @@\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n+ join_elements = join_obj[-1][1]\n if isinstance(join_operator, str):\n if join_operator == '':\n- matches.append(RuleMatch(\n- join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n+ if isinstance(join_elements, list):\n+ if self._check_elements(join_elements):\n+ matches.append(\n+ RuleMatch(\n+ join_obj[0:-1],\n+ 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',\n+ )\n+ )\n return matches\n", "issue": "Update I1022 to only suggest sub if all values can be in the sub\n### CloudFormation Lint Version\n\n0.64.1\n\n### What operating system are you using?\n\nAll\n\n### Describe the bug\n\nOriginal feedback provided by @iann0036. Translated to an issue for tracking.\r\n\r\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n```\r\n\r\n```\r\nI1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter\r\n```\n\n### Expected behavior\n\nCurrently the way to make this comply would be\r\n\r\n```yaml\r\nFn::Sub:\r\n - ${CIDR}${MySubnetsCIDRSize}\r\n - CIDR:\r\n Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n```\r\n\r\nwhich may not be as optimal \n\n### Reproduction template\n\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n ````\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n\n def match(self, cfn):\n matches = []\n\n join_objs = cfn.search_deep_keys('Fn::Join')\n\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n if isinstance(join_operator, str):\n if join_operator == '':\n matches.append(RuleMatch(\n join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n return matches\n", "path": "src/cfnlint/rules/functions/SubNotJoin.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n description = (\n 'Prefer a sub instead of Join when using a join delimiter that is empty'\n )\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n\n def _check_element(self, element):\n if isinstance(element, dict):\n if len(element) == 1:\n for key, value in element.items():\n if key in ['Fn::Sub']:\n if not isinstance(value, str):\n return False\n elif key not in ['Ref', 'Fn::GetAtt']:\n return False\n\n return True\n\n def _check_elements(self, elements):\n for element in elements:\n if not self._check_element(element):\n return False\n\n return True\n\n def match(self, cfn):\n matches = []\n\n join_objs = cfn.search_deep_keys('Fn::Join')\n\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n join_elements = join_obj[-1][1]\n if isinstance(join_operator, str):\n if join_operator == '':\n if isinstance(join_elements, list):\n if self._check_elements(join_elements):\n matches.append(\n RuleMatch(\n join_obj[0:-1],\n 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',\n )\n )\n return matches\n", "path": "src/cfnlint/rules/functions/SubNotJoin.py"}]}
851
504
gh_patches_debug_15666
rasdani/github-patches
git_diff
PaddlePaddle__models-1971
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- icnet加载model_1000预训练模型进行训练报错 1.使用如下指令训练:python train.py --batch_size=16 --use_gpu=True --init_model="./model_1000/" --checkpoint_path="./chkpnt/" 其中:/model_1000为文档中提供的预训练模型,其中并无conv6参数。 <img width="970" alt="bfd08a979fcb4f72bb443fbca3f53d3f" src="https://user-images.githubusercontent.com/37854899/54199980-24704000-4505-11e9-9bf4-70dc09b0cc86.png"> 2. 文档中eval和infer中的model_path建议改为./chkpnt/100,与训练输出保持一致 ![image](https://user-images.githubusercontent.com/37854899/54200167-aceee080-4505-11e9-9d1a-535a8a09df74.png) ![image](https://user-images.githubusercontent.com/37854899/54200175-b2e4c180-4505-11e9-8cb7-68e5020ebc60.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PaddleCV/icnet/infer.py` Content: ``` 1 """Infer for ICNet model.""" 2 from __future__ import print_function 3 import cityscape 4 import argparse 5 import functools 6 import sys 7 import os 8 import cv2 9 10 import paddle.fluid as fluid 11 import paddle 12 from icnet import icnet 13 from utils import add_arguments, print_arguments, get_feeder_data 14 from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter 15 from paddle.fluid.initializer import init_on_cpu 16 import numpy as np 17 18 IMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32) 19 parser = argparse.ArgumentParser(description=__doc__) 20 add_arg = functools.partial(add_arguments, argparser=parser) 21 # yapf: disable 22 add_arg('model_path', str, None, "Model path.") 23 add_arg('images_list', str, None, "List file with images to be infered.") 24 add_arg('images_path', str, None, "The images path.") 25 add_arg('out_path', str, "./output", "Output path.") 26 add_arg('use_gpu', bool, True, "Whether use GPU to test.") 27 # yapf: enable 28 29 data_shape = [3, 1024, 2048] 30 num_classes = 19 31 32 label_colours = [ 33 [128, 64, 128], 34 [244, 35, 231], 35 [69, 69, 69] 36 # 0 = road, 1 = sidewalk, 2 = building 37 , 38 [102, 102, 156], 39 [190, 153, 153], 40 [153, 153, 153] 41 # 3 = wall, 4 = fence, 5 = pole 42 , 43 [250, 170, 29], 44 [219, 219, 0], 45 [106, 142, 35] 46 # 6 = traffic light, 7 = traffic sign, 8 = vegetation 47 , 48 [152, 250, 152], 49 [69, 129, 180], 50 [219, 19, 60] 51 # 9 = terrain, 10 = sky, 11 = person 52 , 53 [255, 0, 0], 54 [0, 0, 142], 55 [0, 0, 69] 56 # 12 = rider, 13 = car, 14 = truck 57 , 58 [0, 60, 100], 59 [0, 79, 100], 60 [0, 0, 230] 61 # 15 = bus, 16 = train, 17 = motocycle 62 , 63 [119, 10, 32] 64 ] 65 66 # 18 = bicycle 67 68 69 def color(input): 70 """ 71 Convert infered result to color image. 72 """ 73 result = [] 74 for i in input.flatten(): 75 result.append( 76 [label_colours[i][2], label_colours[i][1], label_colours[i][0]]) 77 result = np.array(result).reshape([input.shape[0], input.shape[1], 3]) 78 return result 79 80 81 def infer(args): 82 data_shape = cityscape.test_data_shape() 83 num_classes = cityscape.num_classes() 84 # define network 85 images = fluid.layers.data(name='image', shape=data_shape, dtype='float32') 86 _, _, sub124_out = icnet(images, num_classes, 87 np.array(data_shape[1:]).astype("float32")) 88 predict = fluid.layers.resize_bilinear( 89 sub124_out, out_shape=data_shape[1:3]) 90 predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1]) 91 predict = fluid.layers.reshape(predict, shape=[-1, num_classes]) 92 _, predict = fluid.layers.topk(predict, k=1) 93 predict = fluid.layers.reshape( 94 predict, 95 shape=[data_shape[1], data_shape[2], -1]) # batch_size should be 1 96 inference_program = fluid.default_main_program().clone(for_test=True) 97 # prepare environment 98 place = fluid.CPUPlace() 99 if args.use_gpu: 100 place = fluid.CUDAPlace(0) 101 exe = fluid.Executor(place) 102 exe.run(fluid.default_startup_program()) 103 assert os.path.exists(args.model_path) 104 fluid.io.load_params(exe, args.model_path) 105 print("loaded model from: %s" % args.model_path) 106 sys.stdout.flush() 107 108 if not os.path.isdir(args.out_path): 109 os.makedirs(args.out_path) 110 111 for line in open(args.images_list): 112 image_file = args.images_path + "/" + line.strip() 113 filename = os.path.basename(image_file) 114 image = paddle.dataset.image.load_image( 115 image_file, is_color=True).astype("float32") 116 image -= IMG_MEAN 117 img = paddle.dataset.image.to_chw(image)[np.newaxis, :] 118 image_t = fluid.core.LoDTensor() 119 image_t.set(img, place) 120 result = exe.run(inference_program, 121 feed={"image": image_t}, 122 fetch_list=[predict]) 123 cv2.imwrite(args.out_path + "/" + filename + "_result.png", 124 color(result[0])) 125 126 127 def main(): 128 args = parser.parse_args() 129 print_arguments(args) 130 infer(args) 131 132 133 if __name__ == "__main__": 134 main() 135 ``` Path: `PaddleCV/icnet/train.py` Content: ``` 1 """Trainer for ICNet model.""" 2 from __future__ import absolute_import 3 from __future__ import division 4 from __future__ import print_function 5 from icnet import icnet 6 import cityscape 7 import argparse 8 import functools 9 import sys 10 import os 11 import time 12 import paddle.fluid as fluid 13 import numpy as np 14 from utils import add_arguments, print_arguments, get_feeder_data 15 from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter 16 from paddle.fluid.initializer import init_on_cpu 17 18 if 'ce_mode' in os.environ: 19 np.random.seed(10) 20 fluid.default_startup_program().random_seed = 90 21 22 parser = argparse.ArgumentParser(description=__doc__) 23 add_arg = functools.partial(add_arguments, argparser=parser) 24 # yapf: disable 25 add_arg('batch_size', int, 16, "Minibatch size.") 26 add_arg('checkpoint_path', str, None, "Checkpoint svae path.") 27 add_arg('init_model', str, None, "Pretrain model path.") 28 add_arg('use_gpu', bool, True, "Whether use GPU to train.") 29 add_arg('random_mirror', bool, True, "Whether prepare by random mirror.") 30 add_arg('random_scaling', bool, True, "Whether prepare by random scaling.") 31 # yapf: enable 32 33 LAMBDA1 = 0.16 34 LAMBDA2 = 0.4 35 LAMBDA3 = 1.0 36 LEARNING_RATE = 0.003 37 POWER = 0.9 38 LOG_PERIOD = 100 39 CHECKPOINT_PERIOD = 100 40 TOTAL_STEP = 100 41 42 no_grad_set = [] 43 44 45 def create_loss(predict, label, mask, num_classes): 46 predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1]) 47 predict = fluid.layers.reshape(predict, shape=[-1, num_classes]) 48 label = fluid.layers.reshape(label, shape=[-1, 1]) 49 predict = fluid.layers.gather(predict, mask) 50 label = fluid.layers.gather(label, mask) 51 label = fluid.layers.cast(label, dtype="int64") 52 loss = fluid.layers.softmax_with_cross_entropy(predict, label) 53 no_grad_set.append(label.name) 54 return fluid.layers.reduce_mean(loss) 55 56 57 def poly_decay(): 58 global_step = _decay_step_counter() 59 with init_on_cpu(): 60 decayed_lr = LEARNING_RATE * (fluid.layers.pow( 61 (1 - global_step / TOTAL_STEP), POWER)) 62 return decayed_lr 63 64 65 def train(args): 66 data_shape = cityscape.train_data_shape() 67 num_classes = cityscape.num_classes() 68 # define network 69 images = fluid.layers.data(name='image', shape=data_shape, dtype='float32') 70 label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32') 71 label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32') 72 label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32') 73 mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32') 74 mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32') 75 mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32') 76 77 sub4_out, sub24_out, sub124_out = icnet( 78 images, num_classes, np.array(data_shape[1:]).astype("float32")) 79 loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes) 80 loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes) 81 loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes) 82 reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124 83 84 regularizer = fluid.regularizer.L2Decay(0.0001) 85 optimizer = fluid.optimizer.Momentum( 86 learning_rate=poly_decay(), momentum=0.9, regularization=regularizer) 87 _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set) 88 89 # prepare environment 90 place = fluid.CPUPlace() 91 if args.use_gpu: 92 place = fluid.CUDAPlace(0) 93 exe = fluid.Executor(place) 94 95 exe.run(fluid.default_startup_program()) 96 97 if args.init_model is not None: 98 print("load model from: %s" % args.init_model) 99 sys.stdout.flush() 100 fluid.io.load_params(exe, args.init_model) 101 102 iter_id = 0 103 t_loss = 0. 104 sub4_loss = 0. 105 sub24_loss = 0. 106 sub124_loss = 0. 107 train_reader = cityscape.train( 108 args.batch_size, flip=args.random_mirror, scaling=args.random_scaling) 109 start_time = time.time() 110 while True: 111 # train a pass 112 for data in train_reader(): 113 if iter_id > TOTAL_STEP: 114 end_time = time.time() 115 print("kpis train_duration %f" % (end_time - start_time)) 116 return 117 iter_id += 1 118 results = exe.run( 119 feed=get_feeder_data(data, place), 120 fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124]) 121 t_loss += results[0] 122 sub4_loss += results[1] 123 sub24_loss += results[2] 124 sub124_loss += results[3] 125 # training log 126 if iter_id % LOG_PERIOD == 0: 127 print( 128 "Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f" 129 % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD, 130 sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD)) 131 print("kpis train_cost %f" % (t_loss / LOG_PERIOD)) 132 133 t_loss = 0. 134 sub4_loss = 0. 135 sub24_loss = 0. 136 sub124_loss = 0. 137 sys.stdout.flush() 138 139 if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None: 140 dir_name = args.checkpoint_path + "/" + str(iter_id) 141 fluid.io.save_persistables(exe, dirname=dir_name) 142 print("Saved checkpoint: %s" % (dir_name)) 143 144 145 def main(): 146 args = parser.parse_args() 147 print_arguments(args) 148 train(args) 149 150 151 if __name__ == "__main__": 152 main() 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PaddleCV/icnet/infer.py b/PaddleCV/icnet/infer.py --- a/PaddleCV/icnet/infer.py +++ b/PaddleCV/icnet/infer.py @@ -122,6 +122,7 @@ fetch_list=[predict]) cv2.imwrite(args.out_path + "/" + filename + "_result.png", color(result[0])) + print("Saved images into: %s" % args.out_path) def main(): diff --git a/PaddleCV/icnet/train.py b/PaddleCV/icnet/train.py --- a/PaddleCV/icnet/train.py +++ b/PaddleCV/icnet/train.py @@ -96,8 +96,11 @@ if args.init_model is not None: print("load model from: %s" % args.init_model) - sys.stdout.flush() - fluid.io.load_params(exe, args.init_model) + + def if_exist(var): + return os.path.exists(os.path.join(args.init_model, var.name)) + + fluid.io.load_vars(exe, args.init_model, predicate=if_exist) iter_id = 0 t_loss = 0.
{"golden_diff": "diff --git a/PaddleCV/icnet/infer.py b/PaddleCV/icnet/infer.py\n--- a/PaddleCV/icnet/infer.py\n+++ b/PaddleCV/icnet/infer.py\n@@ -122,6 +122,7 @@\n fetch_list=[predict])\n cv2.imwrite(args.out_path + \"/\" + filename + \"_result.png\",\n color(result[0]))\n+ print(\"Saved images into: %s\" % args.out_path)\n \n \n def main():\ndiff --git a/PaddleCV/icnet/train.py b/PaddleCV/icnet/train.py\n--- a/PaddleCV/icnet/train.py\n+++ b/PaddleCV/icnet/train.py\n@@ -96,8 +96,11 @@\n \n if args.init_model is not None:\n print(\"load model from: %s\" % args.init_model)\n- sys.stdout.flush()\n- fluid.io.load_params(exe, args.init_model)\n+\n+ def if_exist(var):\n+ return os.path.exists(os.path.join(args.init_model, var.name))\n+\n+ fluid.io.load_vars(exe, args.init_model, predicate=if_exist)\n \n iter_id = 0\n t_loss = 0.\n", "issue": "icnet\u52a0\u8f7dmodel_1000\u9884\u8bad\u7ec3\u6a21\u578b\u8fdb\u884c\u8bad\u7ec3\u62a5\u9519\n1.\u4f7f\u7528\u5982\u4e0b\u6307\u4ee4\u8bad\u7ec3\uff1apython train.py --batch_size=16 --use_gpu=True --init_model=\"./model_1000/\" --checkpoint_path=\"./chkpnt/\"\r\n\u5176\u4e2d\uff1a/model_1000\u4e3a\u6587\u6863\u4e2d\u63d0\u4f9b\u7684\u9884\u8bad\u7ec3\u6a21\u578b\uff0c\u5176\u4e2d\u5e76\u65e0conv6\u53c2\u6570\u3002\r\n<img width=\"970\" alt=\"bfd08a979fcb4f72bb443fbca3f53d3f\" src=\"https://user-images.githubusercontent.com/37854899/54199980-24704000-4505-11e9-9bf4-70dc09b0cc86.png\">\r\n2. \u6587\u6863\u4e2deval\u548cinfer\u4e2d\u7684model_path\u5efa\u8bae\u6539\u4e3a./chkpnt/100\uff0c\u4e0e\u8bad\u7ec3\u8f93\u51fa\u4fdd\u6301\u4e00\u81f4\r\n![image](https://user-images.githubusercontent.com/37854899/54200167-aceee080-4505-11e9-9d1a-535a8a09df74.png)\r\n![image](https://user-images.githubusercontent.com/37854899/54200175-b2e4c180-4505-11e9-8cb7-68e5020ebc60.png)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Infer for ICNet model.\"\"\"\nfrom __future__ import print_function\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport cv2\n\nimport paddle.fluid as fluid\nimport paddle\nfrom icnet import icnet\nfrom utils import add_arguments, print_arguments, get_feeder_data\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\nimport numpy as np\n\nIMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32)\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('model_path', str, None, \"Model path.\")\nadd_arg('images_list', str, None, \"List file with images to be infered.\")\nadd_arg('images_path', str, None, \"The images path.\")\nadd_arg('out_path', str, \"./output\", \"Output path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to test.\")\n# yapf: enable\n\ndata_shape = [3, 1024, 2048]\nnum_classes = 19\n\nlabel_colours = [\n [128, 64, 128],\n [244, 35, 231],\n [69, 69, 69]\n # 0 = road, 1 = sidewalk, 2 = building\n ,\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153]\n # 3 = wall, 4 = fence, 5 = pole\n ,\n [250, 170, 29],\n [219, 219, 0],\n [106, 142, 35]\n # 6 = traffic light, 7 = traffic sign, 8 = vegetation\n ,\n [152, 250, 152],\n [69, 129, 180],\n [219, 19, 60]\n # 9 = terrain, 10 = sky, 11 = person\n ,\n [255, 0, 0],\n [0, 0, 142],\n [0, 0, 69]\n # 12 = rider, 13 = car, 14 = truck\n ,\n [0, 60, 100],\n [0, 79, 100],\n [0, 0, 230]\n # 15 = bus, 16 = train, 17 = motocycle\n ,\n [119, 10, 32]\n]\n\n# 18 = bicycle\n\n\ndef color(input):\n \"\"\"\n Convert infered result to color image.\n \"\"\"\n result = []\n for i in input.flatten():\n result.append(\n [label_colours[i][2], label_colours[i][1], label_colours[i][0]])\n result = np.array(result).reshape([input.shape[0], input.shape[1], 3])\n return result\n\n\ndef infer(args):\n data_shape = cityscape.test_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n _, _, sub124_out = icnet(images, num_classes,\n np.array(data_shape[1:]).astype(\"float32\"))\n predict = fluid.layers.resize_bilinear(\n sub124_out, out_shape=data_shape[1:3])\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n _, predict = fluid.layers.topk(predict, k=1)\n predict = fluid.layers.reshape(\n predict,\n shape=[data_shape[1], data_shape[2], -1]) # batch_size should be 1\n inference_program = fluid.default_main_program().clone(for_test=True)\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n assert os.path.exists(args.model_path)\n fluid.io.load_params(exe, args.model_path)\n print(\"loaded model from: %s\" % args.model_path)\n sys.stdout.flush()\n\n if not os.path.isdir(args.out_path):\n os.makedirs(args.out_path)\n\n for line in open(args.images_list):\n image_file = args.images_path + \"/\" + line.strip()\n filename = os.path.basename(image_file)\n image = paddle.dataset.image.load_image(\n image_file, is_color=True).astype(\"float32\")\n image -= IMG_MEAN\n img = paddle.dataset.image.to_chw(image)[np.newaxis, :]\n image_t = fluid.core.LoDTensor()\n image_t.set(img, place)\n result = exe.run(inference_program,\n feed={\"image\": image_t},\n fetch_list=[predict])\n cv2.imwrite(args.out_path + \"/\" + filename + \"_result.png\",\n color(result[0]))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n infer(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/infer.py"}, {"content": "\"\"\"Trainer for ICNet model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom icnet import icnet\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport time\nimport paddle.fluid as fluid\nimport numpy as np\nfrom utils import add_arguments, print_arguments, get_feeder_data\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\n\nif 'ce_mode' in os.environ:\n np.random.seed(10)\n fluid.default_startup_program().random_seed = 90\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('batch_size', int, 16, \"Minibatch size.\")\nadd_arg('checkpoint_path', str, None, \"Checkpoint svae path.\")\nadd_arg('init_model', str, None, \"Pretrain model path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to train.\")\nadd_arg('random_mirror', bool, True, \"Whether prepare by random mirror.\")\nadd_arg('random_scaling', bool, True, \"Whether prepare by random scaling.\")\n# yapf: enable\n\nLAMBDA1 = 0.16\nLAMBDA2 = 0.4\nLAMBDA3 = 1.0\nLEARNING_RATE = 0.003\nPOWER = 0.9\nLOG_PERIOD = 100\nCHECKPOINT_PERIOD = 100\nTOTAL_STEP = 100\n\nno_grad_set = []\n\n\ndef create_loss(predict, label, mask, num_classes):\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n label = fluid.layers.reshape(label, shape=[-1, 1])\n predict = fluid.layers.gather(predict, mask)\n label = fluid.layers.gather(label, mask)\n label = fluid.layers.cast(label, dtype=\"int64\")\n loss = fluid.layers.softmax_with_cross_entropy(predict, label)\n no_grad_set.append(label.name)\n return fluid.layers.reduce_mean(loss)\n\n\ndef poly_decay():\n global_step = _decay_step_counter()\n with init_on_cpu():\n decayed_lr = LEARNING_RATE * (fluid.layers.pow(\n (1 - global_step / TOTAL_STEP), POWER))\n return decayed_lr\n\n\ndef train(args):\n data_shape = cityscape.train_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32')\n label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32')\n label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32')\n mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32')\n mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32')\n mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32')\n\n sub4_out, sub24_out, sub124_out = icnet(\n images, num_classes, np.array(data_shape[1:]).astype(\"float32\"))\n loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes)\n loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes)\n loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes)\n reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124\n\n regularizer = fluid.regularizer.L2Decay(0.0001)\n optimizer = fluid.optimizer.Momentum(\n learning_rate=poly_decay(), momentum=0.9, regularization=regularizer)\n _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)\n\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n if args.init_model is not None:\n print(\"load model from: %s\" % args.init_model)\n sys.stdout.flush()\n fluid.io.load_params(exe, args.init_model)\n\n iter_id = 0\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n train_reader = cityscape.train(\n args.batch_size, flip=args.random_mirror, scaling=args.random_scaling)\n start_time = time.time()\n while True:\n # train a pass\n for data in train_reader():\n if iter_id > TOTAL_STEP:\n end_time = time.time()\n print(\"kpis\ttrain_duration\t%f\" % (end_time - start_time))\n return\n iter_id += 1\n results = exe.run(\n feed=get_feeder_data(data, place),\n fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124])\n t_loss += results[0]\n sub4_loss += results[1]\n sub24_loss += results[2]\n sub124_loss += results[3]\n # training log\n if iter_id % LOG_PERIOD == 0:\n print(\n \"Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f\"\n % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD,\n sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD))\n print(\"kpis\ttrain_cost\t%f\" % (t_loss / LOG_PERIOD))\n\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n sys.stdout.flush()\n\n if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None:\n dir_name = args.checkpoint_path + \"/\" + str(iter_id)\n fluid.io.save_persistables(exe, dirname=dir_name)\n print(\"Saved checkpoint: %s\" % (dir_name))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/train.py"}], "after_files": [{"content": "\"\"\"Infer for ICNet model.\"\"\"\nfrom __future__ import print_function\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport cv2\n\nimport paddle.fluid as fluid\nimport paddle\nfrom icnet import icnet\nfrom utils import add_arguments, print_arguments, get_feeder_data\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\nimport numpy as np\n\nIMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32)\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('model_path', str, None, \"Model path.\")\nadd_arg('images_list', str, None, \"List file with images to be infered.\")\nadd_arg('images_path', str, None, \"The images path.\")\nadd_arg('out_path', str, \"./output\", \"Output path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to test.\")\n# yapf: enable\n\ndata_shape = [3, 1024, 2048]\nnum_classes = 19\n\nlabel_colours = [\n [128, 64, 128],\n [244, 35, 231],\n [69, 69, 69]\n # 0 = road, 1 = sidewalk, 2 = building\n ,\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153]\n # 3 = wall, 4 = fence, 5 = pole\n ,\n [250, 170, 29],\n [219, 219, 0],\n [106, 142, 35]\n # 6 = traffic light, 7 = traffic sign, 8 = vegetation\n ,\n [152, 250, 152],\n [69, 129, 180],\n [219, 19, 60]\n # 9 = terrain, 10 = sky, 11 = person\n ,\n [255, 0, 0],\n [0, 0, 142],\n [0, 0, 69]\n # 12 = rider, 13 = car, 14 = truck\n ,\n [0, 60, 100],\n [0, 79, 100],\n [0, 0, 230]\n # 15 = bus, 16 = train, 17 = motocycle\n ,\n [119, 10, 32]\n]\n\n# 18 = bicycle\n\n\ndef color(input):\n \"\"\"\n Convert infered result to color image.\n \"\"\"\n result = []\n for i in input.flatten():\n result.append(\n [label_colours[i][2], label_colours[i][1], label_colours[i][0]])\n result = np.array(result).reshape([input.shape[0], input.shape[1], 3])\n return result\n\n\ndef infer(args):\n data_shape = cityscape.test_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n _, _, sub124_out = icnet(images, num_classes,\n np.array(data_shape[1:]).astype(\"float32\"))\n predict = fluid.layers.resize_bilinear(\n sub124_out, out_shape=data_shape[1:3])\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n _, predict = fluid.layers.topk(predict, k=1)\n predict = fluid.layers.reshape(\n predict,\n shape=[data_shape[1], data_shape[2], -1]) # batch_size should be 1\n inference_program = fluid.default_main_program().clone(for_test=True)\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n assert os.path.exists(args.model_path)\n fluid.io.load_params(exe, args.model_path)\n print(\"loaded model from: %s\" % args.model_path)\n sys.stdout.flush()\n\n if not os.path.isdir(args.out_path):\n os.makedirs(args.out_path)\n\n for line in open(args.images_list):\n image_file = args.images_path + \"/\" + line.strip()\n filename = os.path.basename(image_file)\n image = paddle.dataset.image.load_image(\n image_file, is_color=True).astype(\"float32\")\n image -= IMG_MEAN\n img = paddle.dataset.image.to_chw(image)[np.newaxis, :]\n image_t = fluid.core.LoDTensor()\n image_t.set(img, place)\n result = exe.run(inference_program,\n feed={\"image\": image_t},\n fetch_list=[predict])\n cv2.imwrite(args.out_path + \"/\" + filename + \"_result.png\",\n color(result[0]))\n print(\"Saved images into: %s\" % args.out_path)\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n infer(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/infer.py"}, {"content": "\"\"\"Trainer for ICNet model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom icnet import icnet\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport time\nimport paddle.fluid as fluid\nimport numpy as np\nfrom utils import add_arguments, print_arguments, get_feeder_data\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\n\nif 'ce_mode' in os.environ:\n np.random.seed(10)\n fluid.default_startup_program().random_seed = 90\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('batch_size', int, 16, \"Minibatch size.\")\nadd_arg('checkpoint_path', str, None, \"Checkpoint svae path.\")\nadd_arg('init_model', str, None, \"Pretrain model path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to train.\")\nadd_arg('random_mirror', bool, True, \"Whether prepare by random mirror.\")\nadd_arg('random_scaling', bool, True, \"Whether prepare by random scaling.\")\n# yapf: enable\n\nLAMBDA1 = 0.16\nLAMBDA2 = 0.4\nLAMBDA3 = 1.0\nLEARNING_RATE = 0.003\nPOWER = 0.9\nLOG_PERIOD = 100\nCHECKPOINT_PERIOD = 100\nTOTAL_STEP = 100\n\nno_grad_set = []\n\n\ndef create_loss(predict, label, mask, num_classes):\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n label = fluid.layers.reshape(label, shape=[-1, 1])\n predict = fluid.layers.gather(predict, mask)\n label = fluid.layers.gather(label, mask)\n label = fluid.layers.cast(label, dtype=\"int64\")\n loss = fluid.layers.softmax_with_cross_entropy(predict, label)\n no_grad_set.append(label.name)\n return fluid.layers.reduce_mean(loss)\n\n\ndef poly_decay():\n global_step = _decay_step_counter()\n with init_on_cpu():\n decayed_lr = LEARNING_RATE * (fluid.layers.pow(\n (1 - global_step / TOTAL_STEP), POWER))\n return decayed_lr\n\n\ndef train(args):\n data_shape = cityscape.train_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32')\n label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32')\n label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32')\n mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32')\n mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32')\n mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32')\n\n sub4_out, sub24_out, sub124_out = icnet(\n images, num_classes, np.array(data_shape[1:]).astype(\"float32\"))\n loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes)\n loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes)\n loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes)\n reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124\n\n regularizer = fluid.regularizer.L2Decay(0.0001)\n optimizer = fluid.optimizer.Momentum(\n learning_rate=poly_decay(), momentum=0.9, regularization=regularizer)\n _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)\n\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n if args.init_model is not None:\n print(\"load model from: %s\" % args.init_model)\n\n def if_exist(var):\n return os.path.exists(os.path.join(args.init_model, var.name))\n\n fluid.io.load_vars(exe, args.init_model, predicate=if_exist)\n\n iter_id = 0\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n train_reader = cityscape.train(\n args.batch_size, flip=args.random_mirror, scaling=args.random_scaling)\n start_time = time.time()\n while True:\n # train a pass\n for data in train_reader():\n if iter_id > TOTAL_STEP:\n end_time = time.time()\n print(\"kpis\ttrain_duration\t%f\" % (end_time - start_time))\n return\n iter_id += 1\n results = exe.run(\n feed=get_feeder_data(data, place),\n fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124])\n t_loss += results[0]\n sub4_loss += results[1]\n sub24_loss += results[2]\n sub124_loss += results[3]\n # training log\n if iter_id % LOG_PERIOD == 0:\n print(\n \"Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f\"\n % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD,\n sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD))\n print(\"kpis\ttrain_cost\t%f\" % (t_loss / LOG_PERIOD))\n\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n sys.stdout.flush()\n\n if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None:\n dir_name = args.checkpoint_path + \"/\" + str(iter_id)\n fluid.io.save_persistables(exe, dirname=dir_name)\n print(\"Saved checkpoint: %s\" % (dir_name))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/train.py"}]}
4,050
261
gh_patches_debug_18682
rasdani/github-patches
git_diff
ycm-core__ycmd-482
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- YCM runs on large-ish non-source files, spews a backtrace when you enter insert mode STR: ``` # Create a file that's large, but not so large as to cause ycm to disable itself $ cd `mktemp -d` $ dd if=/dev/urandom of=test bs=500k count=1 $ vim test ``` Now enter insert mode. Result: YCM error spew, included below. I bisected the test file size and noticed that the error starts occurring somewhere between files of size 100k and 300k. ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py", line 255, in NativeFiletypeCompletionUsable self.NativeFiletypeCompletionAvailable() ) File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py", line 250, in NativeFiletypeCompletionAvailable vimsupport.CurrentFiletypes() ] ) File "/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py", line 240, in FiletypeCompleterExistsForFiletype exists_completer = SendCompleterAvailableRequest( filetype ) File "/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py", line 57, in SendCompleterAvailableRequest request.Start() File "/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py", line 45, in Start 'semantic_completion_available' ) File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 81, in PostDataToHandler timeout ) ) File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 177, in JsonFromFuture _ValidateResponseObject( response ) File "/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py", line 206, in _ValidateResponseObject their_hmac = ToBytes( b64decode( response.headers[ _HMAC_HEADER ] ) ) File "/usr/lib/youcompleteme/third_party/ycmd/third_party/requests/requests/structures.py", line 54, in __getitem__ return self._store[key.lower()][1] KeyError: u'x-ycm-hmac' E858: Eval did not return a valid python object ``` +@r4nt --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ycmd/handlers.py` Content: ``` 1 # Copyright (C) 2013 Google Inc. 2 # 3 # This file is part of ycmd. 4 # 5 # ycmd is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # ycmd is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>. 17 18 from __future__ import absolute_import 19 from __future__ import unicode_literals 20 from __future__ import print_function 21 from __future__ import division 22 from future import standard_library 23 standard_library.install_aliases() 24 from builtins import * # noqa 25 26 import atexit 27 import bottle 28 import http.client 29 import json 30 import logging 31 import traceback 32 from bottle import request 33 34 import ycm_core 35 from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store 36 from ycmd.responses import BuildExceptionResponse, BuildCompletionResponse 37 from ycmd.request_wrap import RequestWrap 38 from ycmd.bottle_utils import SetResponseHeader 39 from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap 40 41 42 # num bytes for the request body buffer; request.json only works if the request 43 # size is less than this 44 bottle.Request.MEMFILE_MAX = 1000 * 1024 45 46 _server_state = None 47 _hmac_secret = bytes() 48 _logger = logging.getLogger( __name__ ) 49 app = bottle.Bottle() 50 51 52 @app.post( '/event_notification' ) 53 def EventNotification(): 54 _logger.info( 'Received event notification' ) 55 request_data = RequestWrap( request.json ) 56 event_name = request_data[ 'event_name' ] 57 _logger.debug( 'Event name: %s', event_name ) 58 59 event_handler = 'On' + event_name 60 getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data ) 61 62 filetypes = request_data[ 'filetypes' ] 63 response_data = None 64 if _server_state.FiletypeCompletionUsable( filetypes ): 65 response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ), 66 event_handler )( request_data ) 67 68 if response_data: 69 return _JsonResponse( response_data ) 70 return _JsonResponse( {} ) 71 72 73 @app.post( '/run_completer_command' ) 74 def RunCompleterCommand(): 75 _logger.info( 'Received command request' ) 76 request_data = RequestWrap( request.json ) 77 completer = _GetCompleterForRequestData( request_data ) 78 79 return _JsonResponse( completer.OnUserCommand( 80 request_data[ 'command_arguments' ], 81 request_data ) ) 82 83 84 @app.post( '/completions' ) 85 def GetCompletions(): 86 _logger.info( 'Received completion request' ) 87 request_data = RequestWrap( request.json ) 88 ( do_filetype_completion, forced_filetype_completion ) = ( 89 _server_state.ShouldUseFiletypeCompleter( request_data ) ) 90 _logger.debug( 'Using filetype completion: %s', do_filetype_completion ) 91 92 errors = None 93 completions = None 94 95 if do_filetype_completion: 96 try: 97 completions = ( _server_state.GetFiletypeCompleter( 98 request_data[ 'filetypes' ] ) 99 .ComputeCandidates( request_data ) ) 100 101 except Exception as exception: 102 if forced_filetype_completion: 103 # user explicitly asked for semantic completion, so just pass the error 104 # back 105 raise 106 else: 107 # store the error to be returned with results from the identifier 108 # completer 109 stack = traceback.format_exc() 110 _logger.error( 'Exception from semantic completer (using general): ' + 111 "".join( stack ) ) 112 errors = [ BuildExceptionResponse( exception, stack ) ] 113 114 if not completions and not forced_filetype_completion: 115 completions = ( _server_state.GetGeneralCompleter() 116 .ComputeCandidates( request_data ) ) 117 118 return _JsonResponse( 119 BuildCompletionResponse( completions if completions else [], 120 request_data.CompletionStartColumn(), 121 errors = errors ) ) 122 123 124 @app.post( '/filter_and_sort_candidates' ) 125 def FilterAndSortCandidates(): 126 _logger.info( 'Received filter & sort request' ) 127 # Not using RequestWrap because no need and the requests coming in aren't like 128 # the usual requests we handle. 129 request_data = request.json 130 131 return _JsonResponse( FilterAndSortCandidatesWrap( 132 request_data[ 'candidates'], 133 request_data[ 'sort_property' ], 134 request_data[ 'query' ] ) ) 135 136 137 @app.get( '/healthy' ) 138 def GetHealthy(): 139 _logger.info( 'Received health request' ) 140 if request.query.include_subservers: 141 cs_completer = _server_state.GetFiletypeCompleter( ['cs'] ) 142 return _JsonResponse( cs_completer.ServerIsHealthy() ) 143 return _JsonResponse( True ) 144 145 146 @app.get( '/ready' ) 147 def GetReady(): 148 _logger.info( 'Received ready request' ) 149 if request.query.subserver: 150 filetype = request.query.subserver 151 return _JsonResponse( _IsSubserverReady( filetype ) ) 152 if request.query.include_subservers: 153 return _JsonResponse( _IsSubserverReady( 'cs' ) ) 154 return _JsonResponse( True ) 155 156 157 def _IsSubserverReady( filetype ): 158 completer = _server_state.GetFiletypeCompleter( [filetype] ) 159 return completer.ServerIsReady() 160 161 162 @app.post( '/semantic_completion_available' ) 163 def FiletypeCompletionAvailable(): 164 _logger.info( 'Received filetype completion available request' ) 165 return _JsonResponse( _server_state.FiletypeCompletionAvailable( 166 RequestWrap( request.json )[ 'filetypes' ] ) ) 167 168 169 @app.post( '/defined_subcommands' ) 170 def DefinedSubcommands(): 171 _logger.info( 'Received defined subcommands request' ) 172 completer = _GetCompleterForRequestData( RequestWrap( request.json ) ) 173 174 return _JsonResponse( completer.DefinedSubcommands() ) 175 176 177 @app.post( '/detailed_diagnostic' ) 178 def GetDetailedDiagnostic(): 179 _logger.info( 'Received detailed diagnostic request' ) 180 request_data = RequestWrap( request.json ) 181 completer = _GetCompleterForRequestData( request_data ) 182 183 return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) ) 184 185 186 @app.post( '/load_extra_conf_file' ) 187 def LoadExtraConfFile(): 188 _logger.info( 'Received extra conf load request' ) 189 request_data = RequestWrap( request.json, validate = False ) 190 extra_conf_store.Load( request_data[ 'filepath' ], force = True ) 191 192 193 @app.post( '/ignore_extra_conf_file' ) 194 def IgnoreExtraConfFile(): 195 _logger.info( 'Received extra conf ignore request' ) 196 request_data = RequestWrap( request.json, validate = False ) 197 extra_conf_store.Disable( request_data[ 'filepath' ] ) 198 199 200 @app.post( '/debug_info' ) 201 def DebugInfo(): 202 _logger.info( 'Received debug info request' ) 203 204 output = [] 205 has_clang_support = ycm_core.HasClangSupport() 206 output.append( 'Server has Clang support compiled in: {0}'.format( 207 has_clang_support ) ) 208 209 if has_clang_support: 210 output.append( 'Clang version: ' + ycm_core.ClangVersion() ) 211 212 request_data = RequestWrap( request.json ) 213 try: 214 output.append( 215 _GetCompleterForRequestData( request_data ).DebugInfo( request_data) ) 216 except Exception: 217 _logger.debug( 'Exception in debug info request: ' 218 + traceback.format_exc() ) 219 220 return _JsonResponse( '\n'.join( output ) ) 221 222 223 # The type of the param is Bottle.HTTPError 224 @app.error( http.client.INTERNAL_SERVER_ERROR ) 225 def ErrorHandler( httperror ): 226 body = _JsonResponse( BuildExceptionResponse( httperror.exception, 227 httperror.traceback ) ) 228 hmac_plugin.SetHmacHeader( body, _hmac_secret ) 229 return body 230 231 232 def _JsonResponse( data ): 233 SetResponseHeader( 'Content-Type', 'application/json' ) 234 return json.dumps( data, default = _UniversalSerialize ) 235 236 237 def _UniversalSerialize( obj ): 238 try: 239 serialized = obj.__dict__.copy() 240 serialized[ 'TYPE' ] = type( obj ).__name__ 241 return serialized 242 except AttributeError: 243 return str( obj ) 244 245 246 def _GetCompleterForRequestData( request_data ): 247 completer_target = request_data.get( 'completer_target', None ) 248 249 if completer_target == 'identifier': 250 return _server_state.GetGeneralCompleter().GetIdentifierCompleter() 251 elif completer_target == 'filetype_default' or not completer_target: 252 return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] ) 253 else: 254 return _server_state.GetFiletypeCompleter( [ completer_target ] ) 255 256 257 @atexit.register 258 def ServerShutdown(): 259 _logger.info( 'Server shutting down' ) 260 if _server_state: 261 _server_state.Shutdown() 262 extra_conf_store.Shutdown() 263 264 265 def SetHmacSecret( hmac_secret ): 266 global _hmac_secret 267 _hmac_secret = hmac_secret 268 269 270 def UpdateUserOptions( options ): 271 global _server_state 272 273 if not options: 274 return 275 276 # This should never be passed in, but let's try to remove it just in case. 277 options.pop( 'hmac_secret', None ) 278 user_options_store.SetAll( options ) 279 _server_state = server_state.ServerState( options ) 280 281 282 def SetServerStateToDefaults(): 283 global _server_state, _logger 284 _logger = logging.getLogger( __name__ ) 285 user_options_store.LoadDefaults() 286 _server_state = server_state.ServerState( user_options_store.GetAll() ) 287 extra_conf_store.Reset() 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ycmd/handlers.py b/ycmd/handlers.py --- a/ycmd/handlers.py +++ b/ycmd/handlers.py @@ -25,7 +25,6 @@ import atexit import bottle -import http.client import json import logging import traceback @@ -221,13 +220,15 @@ # The type of the param is Bottle.HTTPError [email protected]( http.client.INTERNAL_SERVER_ERROR ) def ErrorHandler( httperror ): body = _JsonResponse( BuildExceptionResponse( httperror.exception, httperror.traceback ) ) hmac_plugin.SetHmacHeader( body, _hmac_secret ) return body +# For every error Bottle encounters it will use this as the default handler +app.default_error_handler = ErrorHandler + def _JsonResponse( data ): SetResponseHeader( 'Content-Type', 'application/json' )
{"golden_diff": "diff --git a/ycmd/handlers.py b/ycmd/handlers.py\n--- a/ycmd/handlers.py\n+++ b/ycmd/handlers.py\n@@ -25,7 +25,6 @@\n \n import atexit\n import bottle\n-import http.client\n import json\n import logging\n import traceback\n@@ -221,13 +220,15 @@\n \n \n # The type of the param is Bottle.HTTPError\[email protected]( http.client.INTERNAL_SERVER_ERROR )\n def ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n \n+# For every error Bottle encounters it will use this as the default handler\n+app.default_error_handler = ErrorHandler\n+\n \n def _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n", "issue": "YCM runs on large-ish non-source files, spews a backtrace when you enter insert mode\nSTR:\n\n```\n# Create a file that's large, but not so large as to cause ycm to disable itself\n$ cd `mktemp -d`\n$ dd if=/dev/urandom of=test bs=500k count=1\n$ vim test\n```\n\nNow enter insert mode.\n\nResult: YCM error spew, included below.\n\nI bisected the test file size and noticed that the error starts occurring somewhere between files of size 100k and 300k.\n\n```\nTraceback (most recent call last):\n File \"<string>\", line 1, in <module>\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py\",\nline 255, in NativeFiletypeCompletionUsable\n self.NativeFiletypeCompletionAvailable() )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py\",\nline 250, in NativeFiletypeCompletionAvailable\n vimsupport.CurrentFiletypes() ] )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/youcompleteme.py\",\nline 240, in FiletypeCompleterExistsForFiletype\n exists_completer = SendCompleterAvailableRequest( filetype )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py\",\nline 57, in SendCompleterAvailableRequest\n request.Start()\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/client/completer_available_request.py\",\nline 45, in Start\n 'semantic_completion_available' )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py\",\nline 81, in PostDataToHandler\n timeout ) )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py\",\nline 177, in JsonFromFuture\n _ValidateResponseObject( response )\n File \"/usr/lib/youcompleteme/autoload/../python/ycm/client/base_request.py\",\nline 206, in _ValidateResponseObject\n their_hmac = ToBytes( b64decode( response.headers[ _HMAC_HEADER ] ) )\n File \"/usr/lib/youcompleteme/third_party/ycmd/third_party/requests/requests/structures.py\",\nline 54, in __getitem__\n return self._store[key.lower()][1]\nKeyError: u'x-ycm-hmac'\nE858: Eval did not return a valid python object\n\n```\n\n+@r4nt\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport atexit\nimport bottle\nimport http.client\nimport json\nimport logging\nimport traceback\nfrom bottle import request\n\nimport ycm_core\nfrom ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store\nfrom ycmd.responses import BuildExceptionResponse, BuildCompletionResponse\nfrom ycmd.request_wrap import RequestWrap\nfrom ycmd.bottle_utils import SetResponseHeader\nfrom ycmd.completers.completer_utils import FilterAndSortCandidatesWrap\n\n\n# num bytes for the request body buffer; request.json only works if the request\n# size is less than this\nbottle.Request.MEMFILE_MAX = 1000 * 1024\n\n_server_state = None\n_hmac_secret = bytes()\n_logger = logging.getLogger( __name__ )\napp = bottle.Bottle()\n\n\[email protected]( '/event_notification' )\ndef EventNotification():\n _logger.info( 'Received event notification' )\n request_data = RequestWrap( request.json )\n event_name = request_data[ 'event_name' ]\n _logger.debug( 'Event name: %s', event_name )\n\n event_handler = 'On' + event_name\n getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )\n\n filetypes = request_data[ 'filetypes' ]\n response_data = None\n if _server_state.FiletypeCompletionUsable( filetypes ):\n response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),\n event_handler )( request_data )\n\n if response_data:\n return _JsonResponse( response_data )\n return _JsonResponse( {} )\n\n\[email protected]( '/run_completer_command' )\ndef RunCompleterCommand():\n _logger.info( 'Received command request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.OnUserCommand(\n request_data[ 'command_arguments' ],\n request_data ) )\n\n\[email protected]( '/completions' )\ndef GetCompletions():\n _logger.info( 'Received completion request' )\n request_data = RequestWrap( request.json )\n ( do_filetype_completion, forced_filetype_completion ) = (\n _server_state.ShouldUseFiletypeCompleter( request_data ) )\n _logger.debug( 'Using filetype completion: %s', do_filetype_completion )\n\n errors = None\n completions = None\n\n if do_filetype_completion:\n try:\n completions = ( _server_state.GetFiletypeCompleter(\n request_data[ 'filetypes' ] )\n .ComputeCandidates( request_data ) )\n\n except Exception as exception:\n if forced_filetype_completion:\n # user explicitly asked for semantic completion, so just pass the error\n # back\n raise\n else:\n # store the error to be returned with results from the identifier\n # completer\n stack = traceback.format_exc()\n _logger.error( 'Exception from semantic completer (using general): ' +\n \"\".join( stack ) )\n errors = [ BuildExceptionResponse( exception, stack ) ]\n\n if not completions and not forced_filetype_completion:\n completions = ( _server_state.GetGeneralCompleter()\n .ComputeCandidates( request_data ) )\n\n return _JsonResponse(\n BuildCompletionResponse( completions if completions else [],\n request_data.CompletionStartColumn(),\n errors = errors ) )\n\n\[email protected]( '/filter_and_sort_candidates' )\ndef FilterAndSortCandidates():\n _logger.info( 'Received filter & sort request' )\n # Not using RequestWrap because no need and the requests coming in aren't like\n # the usual requests we handle.\n request_data = request.json\n\n return _JsonResponse( FilterAndSortCandidatesWrap(\n request_data[ 'candidates'],\n request_data[ 'sort_property' ],\n request_data[ 'query' ] ) )\n\n\[email protected]( '/healthy' )\ndef GetHealthy():\n _logger.info( 'Received health request' )\n if request.query.include_subservers:\n cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )\n return _JsonResponse( cs_completer.ServerIsHealthy() )\n return _JsonResponse( True )\n\n\[email protected]( '/ready' )\ndef GetReady():\n _logger.info( 'Received ready request' )\n if request.query.subserver:\n filetype = request.query.subserver\n return _JsonResponse( _IsSubserverReady( filetype ) )\n if request.query.include_subservers:\n return _JsonResponse( _IsSubserverReady( 'cs' ) )\n return _JsonResponse( True )\n\n\ndef _IsSubserverReady( filetype ):\n completer = _server_state.GetFiletypeCompleter( [filetype] )\n return completer.ServerIsReady()\n\n\[email protected]( '/semantic_completion_available' )\ndef FiletypeCompletionAvailable():\n _logger.info( 'Received filetype completion available request' )\n return _JsonResponse( _server_state.FiletypeCompletionAvailable(\n RequestWrap( request.json )[ 'filetypes' ] ) )\n\n\[email protected]( '/defined_subcommands' )\ndef DefinedSubcommands():\n _logger.info( 'Received defined subcommands request' )\n completer = _GetCompleterForRequestData( RequestWrap( request.json ) )\n\n return _JsonResponse( completer.DefinedSubcommands() )\n\n\[email protected]( '/detailed_diagnostic' )\ndef GetDetailedDiagnostic():\n _logger.info( 'Received detailed diagnostic request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )\n\n\[email protected]( '/load_extra_conf_file' )\ndef LoadExtraConfFile():\n _logger.info( 'Received extra conf load request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n\n\[email protected]( '/ignore_extra_conf_file' )\ndef IgnoreExtraConfFile():\n _logger.info( 'Received extra conf ignore request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n\n\[email protected]( '/debug_info' )\ndef DebugInfo():\n _logger.info( 'Received debug info request' )\n\n output = []\n has_clang_support = ycm_core.HasClangSupport()\n output.append( 'Server has Clang support compiled in: {0}'.format(\n has_clang_support ) )\n\n if has_clang_support:\n output.append( 'Clang version: ' + ycm_core.ClangVersion() )\n\n request_data = RequestWrap( request.json )\n try:\n output.append(\n _GetCompleterForRequestData( request_data ).DebugInfo( request_data) )\n except Exception:\n _logger.debug( 'Exception in debug info request: '\n + traceback.format_exc() )\n\n return _JsonResponse( '\\n'.join( output ) )\n\n\n# The type of the param is Bottle.HTTPError\[email protected]( http.client.INTERNAL_SERVER_ERROR )\ndef ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n\n\ndef _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n return json.dumps( data, default = _UniversalSerialize )\n\n\ndef _UniversalSerialize( obj ):\n try:\n serialized = obj.__dict__.copy()\n serialized[ 'TYPE' ] = type( obj ).__name__\n return serialized\n except AttributeError:\n return str( obj )\n\n\ndef _GetCompleterForRequestData( request_data ):\n completer_target = request_data.get( 'completer_target', None )\n\n if completer_target == 'identifier':\n return _server_state.GetGeneralCompleter().GetIdentifierCompleter()\n elif completer_target == 'filetype_default' or not completer_target:\n return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )\n else:\n return _server_state.GetFiletypeCompleter( [ completer_target ] )\n\n\[email protected]\ndef ServerShutdown():\n _logger.info( 'Server shutting down' )\n if _server_state:\n _server_state.Shutdown()\n extra_conf_store.Shutdown()\n\n\ndef SetHmacSecret( hmac_secret ):\n global _hmac_secret\n _hmac_secret = hmac_secret\n\n\ndef UpdateUserOptions( options ):\n global _server_state\n\n if not options:\n return\n\n # This should never be passed in, but let's try to remove it just in case.\n options.pop( 'hmac_secret', None )\n user_options_store.SetAll( options )\n _server_state = server_state.ServerState( options )\n\n\ndef SetServerStateToDefaults():\n global _server_state, _logger\n _logger = logging.getLogger( __name__ )\n user_options_store.LoadDefaults()\n _server_state = server_state.ServerState( user_options_store.GetAll() )\n extra_conf_store.Reset()\n", "path": "ycmd/handlers.py"}], "after_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport atexit\nimport bottle\nimport json\nimport logging\nimport traceback\nfrom bottle import request\n\nimport ycm_core\nfrom ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store\nfrom ycmd.responses import BuildExceptionResponse, BuildCompletionResponse\nfrom ycmd.request_wrap import RequestWrap\nfrom ycmd.bottle_utils import SetResponseHeader\nfrom ycmd.completers.completer_utils import FilterAndSortCandidatesWrap\n\n\n# num bytes for the request body buffer; request.json only works if the request\n# size is less than this\nbottle.Request.MEMFILE_MAX = 1000 * 1024\n\n_server_state = None\n_hmac_secret = bytes()\n_logger = logging.getLogger( __name__ )\napp = bottle.Bottle()\n\n\[email protected]( '/event_notification' )\ndef EventNotification():\n _logger.info( 'Received event notification' )\n request_data = RequestWrap( request.json )\n event_name = request_data[ 'event_name' ]\n _logger.debug( 'Event name: %s', event_name )\n\n event_handler = 'On' + event_name\n getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )\n\n filetypes = request_data[ 'filetypes' ]\n response_data = None\n if _server_state.FiletypeCompletionUsable( filetypes ):\n response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),\n event_handler )( request_data )\n\n if response_data:\n return _JsonResponse( response_data )\n return _JsonResponse( {} )\n\n\[email protected]( '/run_completer_command' )\ndef RunCompleterCommand():\n _logger.info( 'Received command request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.OnUserCommand(\n request_data[ 'command_arguments' ],\n request_data ) )\n\n\[email protected]( '/completions' )\ndef GetCompletions():\n _logger.info( 'Received completion request' )\n request_data = RequestWrap( request.json )\n ( do_filetype_completion, forced_filetype_completion ) = (\n _server_state.ShouldUseFiletypeCompleter( request_data ) )\n _logger.debug( 'Using filetype completion: %s', do_filetype_completion )\n\n errors = None\n completions = None\n\n if do_filetype_completion:\n try:\n completions = ( _server_state.GetFiletypeCompleter(\n request_data[ 'filetypes' ] )\n .ComputeCandidates( request_data ) )\n\n except Exception as exception:\n if forced_filetype_completion:\n # user explicitly asked for semantic completion, so just pass the error\n # back\n raise\n else:\n # store the error to be returned with results from the identifier\n # completer\n stack = traceback.format_exc()\n _logger.error( 'Exception from semantic completer (using general): ' +\n \"\".join( stack ) )\n errors = [ BuildExceptionResponse( exception, stack ) ]\n\n if not completions and not forced_filetype_completion:\n completions = ( _server_state.GetGeneralCompleter()\n .ComputeCandidates( request_data ) )\n\n return _JsonResponse(\n BuildCompletionResponse( completions if completions else [],\n request_data.CompletionStartColumn(),\n errors = errors ) )\n\n\[email protected]( '/filter_and_sort_candidates' )\ndef FilterAndSortCandidates():\n _logger.info( 'Received filter & sort request' )\n # Not using RequestWrap because no need and the requests coming in aren't like\n # the usual requests we handle.\n request_data = request.json\n\n return _JsonResponse( FilterAndSortCandidatesWrap(\n request_data[ 'candidates'],\n request_data[ 'sort_property' ],\n request_data[ 'query' ] ) )\n\n\[email protected]( '/healthy' )\ndef GetHealthy():\n _logger.info( 'Received health request' )\n if request.query.include_subservers:\n cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )\n return _JsonResponse( cs_completer.ServerIsHealthy() )\n return _JsonResponse( True )\n\n\[email protected]( '/ready' )\ndef GetReady():\n _logger.info( 'Received ready request' )\n if request.query.subserver:\n filetype = request.query.subserver\n return _JsonResponse( _IsSubserverReady( filetype ) )\n if request.query.include_subservers:\n return _JsonResponse( _IsSubserverReady( 'cs' ) )\n return _JsonResponse( True )\n\n\ndef _IsSubserverReady( filetype ):\n completer = _server_state.GetFiletypeCompleter( [filetype] )\n return completer.ServerIsReady()\n\n\[email protected]( '/semantic_completion_available' )\ndef FiletypeCompletionAvailable():\n _logger.info( 'Received filetype completion available request' )\n return _JsonResponse( _server_state.FiletypeCompletionAvailable(\n RequestWrap( request.json )[ 'filetypes' ] ) )\n\n\[email protected]( '/defined_subcommands' )\ndef DefinedSubcommands():\n _logger.info( 'Received defined subcommands request' )\n completer = _GetCompleterForRequestData( RequestWrap( request.json ) )\n\n return _JsonResponse( completer.DefinedSubcommands() )\n\n\[email protected]( '/detailed_diagnostic' )\ndef GetDetailedDiagnostic():\n _logger.info( 'Received detailed diagnostic request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )\n\n\[email protected]( '/load_extra_conf_file' )\ndef LoadExtraConfFile():\n _logger.info( 'Received extra conf load request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n\n\[email protected]( '/ignore_extra_conf_file' )\ndef IgnoreExtraConfFile():\n _logger.info( 'Received extra conf ignore request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n\n\[email protected]( '/debug_info' )\ndef DebugInfo():\n _logger.info( 'Received debug info request' )\n\n output = []\n has_clang_support = ycm_core.HasClangSupport()\n output.append( 'Server has Clang support compiled in: {0}'.format(\n has_clang_support ) )\n\n if has_clang_support:\n output.append( 'Clang version: ' + ycm_core.ClangVersion() )\n\n request_data = RequestWrap( request.json )\n try:\n output.append(\n _GetCompleterForRequestData( request_data ).DebugInfo( request_data) )\n except Exception:\n _logger.debug( 'Exception in debug info request: '\n + traceback.format_exc() )\n\n return _JsonResponse( '\\n'.join( output ) )\n\n\n# The type of the param is Bottle.HTTPError\ndef ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n\n# For every error Bottle encounters it will use this as the default handler\napp.default_error_handler = ErrorHandler\n\n\ndef _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n return json.dumps( data, default = _UniversalSerialize )\n\n\ndef _UniversalSerialize( obj ):\n try:\n serialized = obj.__dict__.copy()\n serialized[ 'TYPE' ] = type( obj ).__name__\n return serialized\n except AttributeError:\n return str( obj )\n\n\ndef _GetCompleterForRequestData( request_data ):\n completer_target = request_data.get( 'completer_target', None )\n\n if completer_target == 'identifier':\n return _server_state.GetGeneralCompleter().GetIdentifierCompleter()\n elif completer_target == 'filetype_default' or not completer_target:\n return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )\n else:\n return _server_state.GetFiletypeCompleter( [ completer_target ] )\n\n\[email protected]\ndef ServerShutdown():\n _logger.info( 'Server shutting down' )\n if _server_state:\n _server_state.Shutdown()\n extra_conf_store.Shutdown()\n\n\ndef SetHmacSecret( hmac_secret ):\n global _hmac_secret\n _hmac_secret = hmac_secret\n\n\ndef UpdateUserOptions( options ):\n global _server_state\n\n if not options:\n return\n\n # This should never be passed in, but let's try to remove it just in case.\n options.pop( 'hmac_secret', None )\n user_options_store.SetAll( options )\n _server_state = server_state.ServerState( options )\n\n\ndef SetServerStateToDefaults():\n global _server_state, _logger\n _logger = logging.getLogger( __name__ )\n user_options_store.LoadDefaults()\n _server_state = server_state.ServerState( user_options_store.GetAll() )\n extra_conf_store.Reset()\n", "path": "ycmd/handlers.py"}]}
3,843
205
gh_patches_debug_27776
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1296
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Move first_login logic so that it occurs on all logins ### Issue description Currently, we have a function (first_login in User.py) that is triggered when it is the users first time logging into our system. In this function, we are checking 1.) are they part of a transition domain, if so make sure there is a domain inviation for them and a domain information object, 2) for all domain invitations they have, we add a user domain role to their account so they can access the domains. The problem is when it comes time for transition, if anyone logs into our system BEFORE we run the migration scripts then they will already have a User row and account on our system and they will not trigger the checks above. This is a bit of a race condition that is easily avoided by just having the first_login logic triggered on every login. ### Acceptance criteria - [ ] rename first_login and all uses of it (as it is no longer used just at first login - [ ] move the formerly named first_login to be called for all users. - [ ] add or update existing unit tests ### Additional context To accomplish the second AC above I suggest moving the highlight lint (user.first_login()) in the backend.py Authenticate function to right above the return on line 59 so it is called on each user. See picture below: ![image](https://github.com/cisagov/getgov/assets/109625347/39ca23f7-d547-4ce4-b32d-4f55e4aee4d9) ### Links to other issues _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/djangooidc/backends.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import logging 5 6 from django.conf import settings 7 from django.contrib.auth import get_user_model 8 from django.contrib.auth.backends import ModelBackend 9 from django.utils import timezone 10 11 logger = logging.getLogger(__name__) 12 13 14 class OpenIdConnectBackend(ModelBackend): 15 """ 16 This backend checks a previously performed OIDC authentication. 17 If it is OK and the user already exists in the database, it is returned. 18 If it is OK and user does not exist in the database, it is created and 19 returned unless setting OIDC_CREATE_UNKNOWN_USER is False. 20 In all other cases, None is returned. 21 """ 22 23 def authenticate(self, request, **kwargs): 24 logger.debug("kwargs %s" % kwargs) 25 user = None 26 if not kwargs or "sub" not in kwargs.keys(): 27 return user 28 29 UserModel = get_user_model() 30 username = self.clean_username(kwargs["sub"]) 31 32 # Some OP may actually choose to withhold some information, so we must 33 # test if it is present 34 openid_data = {"last_login": timezone.now()} 35 openid_data["first_name"] = kwargs.get("given_name", "") 36 openid_data["last_name"] = kwargs.get("family_name", "") 37 openid_data["email"] = kwargs.get("email", "") 38 openid_data["phone"] = kwargs.get("phone", "") 39 40 # Note that this could be accomplished in one try-except clause, but 41 # instead we use get_or_create when creating unknown users since it has 42 # built-in safeguards for multiple threads. 43 if getattr(settings, "OIDC_CREATE_UNKNOWN_USER", True): 44 args = { 45 UserModel.USERNAME_FIELD: username, 46 # defaults _will_ be updated, these are not fallbacks 47 "defaults": openid_data, 48 } 49 user, created = UserModel.objects.update_or_create(**args) 50 if created: 51 user = self.configure_user(user, **kwargs) 52 # run a newly created user's callback for a first-time login 53 user.first_login() 54 else: 55 try: 56 user = UserModel.objects.get_by_natural_key(username) 57 except UserModel.DoesNotExist: 58 return None 59 return user 60 61 def clean_username(self, username): 62 """ 63 Performs any cleaning on the "username" prior to using it to get or 64 create the user object. Returns the cleaned username. 65 """ 66 return username 67 68 def configure_user(self, user, **kwargs): 69 """ 70 Configures a user after creation and returns the updated user. 71 """ 72 user.set_unusable_password() 73 return user 74 ``` Path: `src/registrar/models/user.py` Content: ``` 1 import logging 2 3 from django.contrib.auth.models import AbstractUser 4 from django.db import models 5 6 from .domain_invitation import DomainInvitation 7 from .transition_domain import TransitionDomain 8 from .domain_information import DomainInformation 9 from .domain import Domain 10 11 from phonenumber_field.modelfields import PhoneNumberField # type: ignore 12 13 14 logger = logging.getLogger(__name__) 15 16 17 class User(AbstractUser): 18 """ 19 A custom user model that performs identically to the default user model 20 but can be customized later. 21 """ 22 23 # #### Constants for choice fields #### 24 RESTRICTED = "restricted" 25 STATUS_CHOICES = ((RESTRICTED, RESTRICTED),) 26 27 status = models.CharField( 28 max_length=10, 29 choices=STATUS_CHOICES, 30 default=None, # Set the default value to None 31 null=True, # Allow the field to be null 32 blank=True, # Allow the field to be blank 33 ) 34 35 domains = models.ManyToManyField( 36 "registrar.Domain", 37 through="registrar.UserDomainRole", 38 related_name="users", 39 ) 40 41 phone = PhoneNumberField( 42 null=True, 43 blank=True, 44 help_text="Phone", 45 db_index=True, 46 ) 47 48 def __str__(self): 49 # this info is pulled from Login.gov 50 if self.first_name or self.last_name: 51 return f"{self.first_name or ''} {self.last_name or ''} {self.email or ''}" 52 elif self.email: 53 return self.email 54 else: 55 return self.username 56 57 def restrict_user(self): 58 self.status = self.RESTRICTED 59 self.save() 60 61 def unrestrict_user(self): 62 self.status = None 63 self.save() 64 65 def is_restricted(self): 66 return self.status == self.RESTRICTED 67 68 def check_domain_invitations_on_login(self): 69 """When a user first arrives on the site, we need to retrieve any domain 70 invitations that match their email address.""" 71 for invitation in DomainInvitation.objects.filter( 72 email=self.email, status=DomainInvitation.INVITED 73 ): 74 try: 75 invitation.retrieve() 76 invitation.save() 77 except RuntimeError: 78 # retrieving should not fail because of a missing user, but 79 # if it does fail, log the error so a new user can continue 80 # logging in 81 logger.warn( 82 "Failed to retrieve invitation %s", invitation, exc_info=True 83 ) 84 85 def create_domain_and_invite(self, transition_domain: TransitionDomain): 86 transition_domain_name = transition_domain.domain_name 87 transition_domain_status = transition_domain.status 88 transition_domain_email = transition_domain.username 89 90 # type safety check. name should never be none 91 if transition_domain_name is not None: 92 new_domain = Domain( 93 name=transition_domain_name, state=transition_domain_status 94 ) 95 new_domain.save() 96 # check that a domain invitation doesn't already 97 # exist for this e-mail / Domain pair 98 domain_email_already_in_domain_invites = DomainInvitation.objects.filter( 99 email=transition_domain_email.lower(), domain=new_domain 100 ).exists() 101 if not domain_email_already_in_domain_invites: 102 # Create new domain invitation 103 new_domain_invitation = DomainInvitation( 104 email=transition_domain_email.lower(), domain=new_domain 105 ) 106 new_domain_invitation.save() 107 108 def check_transition_domains_on_login(self): 109 """When a user first arrives on the site, we need to check 110 if they are logging in with the same e-mail as a 111 transition domain and update our database accordingly.""" 112 113 for transition_domain in TransitionDomain.objects.filter(username=self.email): 114 # Looks like the user logged in with the same e-mail as 115 # one or more corresponding transition domains. 116 # Create corresponding DomainInformation objects. 117 118 # NOTE: adding an ADMIN user role for this user 119 # for each domain should already be done 120 # in the invitation.retrieve() method. 121 # However, if the migration scripts for transition 122 # domain objects were not executed correctly, 123 # there could be transition domains without 124 # any corresponding Domain & DomainInvitation objects, 125 # which means the invitation.retrieve() method might 126 # not execute. 127 # Check that there is a corresponding domain object 128 # for this transition domain. If not, we have an error 129 # with our data and migrations need to be run again. 130 131 # Get the domain that corresponds with this transition domain 132 domain_exists = Domain.objects.filter( 133 name=transition_domain.domain_name 134 ).exists() 135 if not domain_exists: 136 logger.warn( 137 """There are transition domains without 138 corresponding domain objects! 139 Please run migration scripts for transition domains 140 (See data_migration.md)""" 141 ) 142 # No need to throw an exception...just create a domain 143 # and domain invite, then proceed as normal 144 self.create_domain_and_invite(transition_domain) 145 146 domain = Domain.objects.get(name=transition_domain.domain_name) 147 148 # Create a domain information object, if one doesn't 149 # already exist 150 domain_info_exists = DomainInformation.objects.filter( 151 domain=domain 152 ).exists() 153 if not domain_info_exists: 154 new_domain_info = DomainInformation(creator=self, domain=domain) 155 new_domain_info.save() 156 157 def first_login(self): 158 """Callback when the user is authenticated for the very first time. 159 160 When a user first arrives on the site, we need to retrieve any domain 161 invitations that match their email address. 162 163 We also need to check if they are logging in with the same e-mail 164 as a transition domain and update our domainInfo objects accordingly. 165 """ 166 167 # PART 1: TRANSITION DOMAINS 168 # 169 # NOTE: THIS MUST RUN FIRST 170 # (If we have an issue where transition domains were 171 # not fully converted into Domain and DomainInvitation 172 # objects, this method will fill in the gaps. 173 # This will ensure the Domain Invitations method 174 # runs correctly (no missing invites)) 175 self.check_transition_domains_on_login() 176 177 # PART 2: DOMAIN INVITATIONS 178 self.check_domain_invitations_on_login() 179 180 class Meta: 181 permissions = [ 182 ("analyst_access_permission", "Analyst Access Permission"), 183 ("full_access_permission", "Full Access Permission"), 184 ] 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/djangooidc/backends.py b/src/djangooidc/backends.py --- a/src/djangooidc/backends.py +++ b/src/djangooidc/backends.py @@ -49,13 +49,13 @@ user, created = UserModel.objects.update_or_create(**args) if created: user = self.configure_user(user, **kwargs) - # run a newly created user's callback for a first-time login - user.first_login() else: try: user = UserModel.objects.get_by_natural_key(username) except UserModel.DoesNotExist: return None + # run this callback for a each login + user.on_each_login() return user def clean_username(self, username): diff --git a/src/registrar/models/user.py b/src/registrar/models/user.py --- a/src/registrar/models/user.py +++ b/src/registrar/models/user.py @@ -154,10 +154,10 @@ new_domain_info = DomainInformation(creator=self, domain=domain) new_domain_info.save() - def first_login(self): - """Callback when the user is authenticated for the very first time. + def on_each_login(self): + """Callback each time the user is authenticated. - When a user first arrives on the site, we need to retrieve any domain + When a user arrives on the site each time, we need to retrieve any domain invitations that match their email address. We also need to check if they are logging in with the same e-mail
{"golden_diff": "diff --git a/src/djangooidc/backends.py b/src/djangooidc/backends.py\n--- a/src/djangooidc/backends.py\n+++ b/src/djangooidc/backends.py\n@@ -49,13 +49,13 @@\n user, created = UserModel.objects.update_or_create(**args)\n if created:\n user = self.configure_user(user, **kwargs)\n- # run a newly created user's callback for a first-time login\n- user.first_login()\n else:\n try:\n user = UserModel.objects.get_by_natural_key(username)\n except UserModel.DoesNotExist:\n return None\n+ # run this callback for a each login\n+ user.on_each_login()\n return user\n \n def clean_username(self, username):\ndiff --git a/src/registrar/models/user.py b/src/registrar/models/user.py\n--- a/src/registrar/models/user.py\n+++ b/src/registrar/models/user.py\n@@ -154,10 +154,10 @@\n new_domain_info = DomainInformation(creator=self, domain=domain)\n new_domain_info.save()\n \n- def first_login(self):\n- \"\"\"Callback when the user is authenticated for the very first time.\n+ def on_each_login(self):\n+ \"\"\"Callback each time the user is authenticated.\n \n- When a user first arrives on the site, we need to retrieve any domain\n+ When a user arrives on the site each time, we need to retrieve any domain\n invitations that match their email address.\n \n We also need to check if they are logging in with the same e-mail\n", "issue": "Move first_login logic so that it occurs on all logins\n### Issue description\n\nCurrently, we have a function (first_login in User.py) that is triggered when it is the users first time logging into our system. In this function, we are checking 1.) are they part of a transition domain, if so make sure there is a domain inviation for them and a domain information object, 2) for all domain invitations they have, we add a user domain role to their account so they can access the domains.\r\n\r\nThe problem is when it comes time for transition, if anyone logs into our system BEFORE we run the migration scripts then they will already have a User row and account on our system and they will not trigger the checks above. This is a bit of a race condition that is easily avoided by just having the first_login logic triggered on every login.\n\n### Acceptance criteria\n\n- [ ] rename first_login and all uses of it (as it is no longer used just at first login\r\n- [ ] move the formerly named first_login to be called for all users. \r\n- [ ] add or update existing unit tests \n\n### Additional context\n\nTo accomplish the second AC above I suggest moving the highlight lint (user.first_login()) in the backend.py Authenticate function to right above the return on line 59 so it is called on each user. See picture below:\r\n![image](https://github.com/cisagov/getgov/assets/109625347/39ca23f7-d547-4ce4-b32d-4f55e4aee4d9)\r\n\n\n### Links to other issues\n\n_No response_\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.utils import timezone\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenIdConnectBackend(ModelBackend):\n \"\"\"\n This backend checks a previously performed OIDC authentication.\n If it is OK and the user already exists in the database, it is returned.\n If it is OK and user does not exist in the database, it is created and\n returned unless setting OIDC_CREATE_UNKNOWN_USER is False.\n In all other cases, None is returned.\n \"\"\"\n\n def authenticate(self, request, **kwargs):\n logger.debug(\"kwargs %s\" % kwargs)\n user = None\n if not kwargs or \"sub\" not in kwargs.keys():\n return user\n\n UserModel = get_user_model()\n username = self.clean_username(kwargs[\"sub\"])\n\n # Some OP may actually choose to withhold some information, so we must\n # test if it is present\n openid_data = {\"last_login\": timezone.now()}\n openid_data[\"first_name\"] = kwargs.get(\"given_name\", \"\")\n openid_data[\"last_name\"] = kwargs.get(\"family_name\", \"\")\n openid_data[\"email\"] = kwargs.get(\"email\", \"\")\n openid_data[\"phone\"] = kwargs.get(\"phone\", \"\")\n\n # Note that this could be accomplished in one try-except clause, but\n # instead we use get_or_create when creating unknown users since it has\n # built-in safeguards for multiple threads.\n if getattr(settings, \"OIDC_CREATE_UNKNOWN_USER\", True):\n args = {\n UserModel.USERNAME_FIELD: username,\n # defaults _will_ be updated, these are not fallbacks\n \"defaults\": openid_data,\n }\n user, created = UserModel.objects.update_or_create(**args)\n if created:\n user = self.configure_user(user, **kwargs)\n # run a newly created user's callback for a first-time login\n user.first_login()\n else:\n try:\n user = UserModel.objects.get_by_natural_key(username)\n except UserModel.DoesNotExist:\n return None\n return user\n\n def clean_username(self, username):\n \"\"\"\n Performs any cleaning on the \"username\" prior to using it to get or\n create the user object. Returns the cleaned username.\n \"\"\"\n return username\n\n def configure_user(self, user, **kwargs):\n \"\"\"\n Configures a user after creation and returns the updated user.\n \"\"\"\n user.set_unusable_password()\n return user\n", "path": "src/djangooidc/backends.py"}, {"content": "import logging\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom .domain_invitation import DomainInvitation\nfrom .transition_domain import TransitionDomain\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass User(AbstractUser):\n \"\"\"\n A custom user model that performs identically to the default user model\n but can be customized later.\n \"\"\"\n\n # #### Constants for choice fields ####\n RESTRICTED = \"restricted\"\n STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)\n\n status = models.CharField(\n max_length=10,\n choices=STATUS_CHOICES,\n default=None, # Set the default value to None\n null=True, # Allow the field to be null\n blank=True, # Allow the field to be blank\n )\n\n domains = models.ManyToManyField(\n \"registrar.Domain\",\n through=\"registrar.UserDomainRole\",\n related_name=\"users\",\n )\n\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def __str__(self):\n # this info is pulled from Login.gov\n if self.first_name or self.last_name:\n return f\"{self.first_name or ''} {self.last_name or ''} {self.email or ''}\"\n elif self.email:\n return self.email\n else:\n return self.username\n\n def restrict_user(self):\n self.status = self.RESTRICTED\n self.save()\n\n def unrestrict_user(self):\n self.status = None\n self.save()\n\n def is_restricted(self):\n return self.status == self.RESTRICTED\n\n def check_domain_invitations_on_login(self):\n \"\"\"When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\"\"\"\n for invitation in DomainInvitation.objects.filter(\n email=self.email, status=DomainInvitation.INVITED\n ):\n try:\n invitation.retrieve()\n invitation.save()\n except RuntimeError:\n # retrieving should not fail because of a missing user, but\n # if it does fail, log the error so a new user can continue\n # logging in\n logger.warn(\n \"Failed to retrieve invitation %s\", invitation, exc_info=True\n )\n\n def create_domain_and_invite(self, transition_domain: TransitionDomain):\n transition_domain_name = transition_domain.domain_name\n transition_domain_status = transition_domain.status\n transition_domain_email = transition_domain.username\n\n # type safety check. name should never be none\n if transition_domain_name is not None:\n new_domain = Domain(\n name=transition_domain_name, state=transition_domain_status\n )\n new_domain.save()\n # check that a domain invitation doesn't already\n # exist for this e-mail / Domain pair\n domain_email_already_in_domain_invites = DomainInvitation.objects.filter(\n email=transition_domain_email.lower(), domain=new_domain\n ).exists()\n if not domain_email_already_in_domain_invites:\n # Create new domain invitation\n new_domain_invitation = DomainInvitation(\n email=transition_domain_email.lower(), domain=new_domain\n )\n new_domain_invitation.save()\n\n def check_transition_domains_on_login(self):\n \"\"\"When a user first arrives on the site, we need to check\n if they are logging in with the same e-mail as a\n transition domain and update our database accordingly.\"\"\"\n\n for transition_domain in TransitionDomain.objects.filter(username=self.email):\n # Looks like the user logged in with the same e-mail as\n # one or more corresponding transition domains.\n # Create corresponding DomainInformation objects.\n\n # NOTE: adding an ADMIN user role for this user\n # for each domain should already be done\n # in the invitation.retrieve() method.\n # However, if the migration scripts for transition\n # domain objects were not executed correctly,\n # there could be transition domains without\n # any corresponding Domain & DomainInvitation objects,\n # which means the invitation.retrieve() method might\n # not execute.\n # Check that there is a corresponding domain object\n # for this transition domain. If not, we have an error\n # with our data and migrations need to be run again.\n\n # Get the domain that corresponds with this transition domain\n domain_exists = Domain.objects.filter(\n name=transition_domain.domain_name\n ).exists()\n if not domain_exists:\n logger.warn(\n \"\"\"There are transition domains without\n corresponding domain objects!\n Please run migration scripts for transition domains\n (See data_migration.md)\"\"\"\n )\n # No need to throw an exception...just create a domain\n # and domain invite, then proceed as normal\n self.create_domain_and_invite(transition_domain)\n\n domain = Domain.objects.get(name=transition_domain.domain_name)\n\n # Create a domain information object, if one doesn't\n # already exist\n domain_info_exists = DomainInformation.objects.filter(\n domain=domain\n ).exists()\n if not domain_info_exists:\n new_domain_info = DomainInformation(creator=self, domain=domain)\n new_domain_info.save()\n\n def first_login(self):\n \"\"\"Callback when the user is authenticated for the very first time.\n\n When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\n\n We also need to check if they are logging in with the same e-mail\n as a transition domain and update our domainInfo objects accordingly.\n \"\"\"\n\n # PART 1: TRANSITION DOMAINS\n #\n # NOTE: THIS MUST RUN FIRST\n # (If we have an issue where transition domains were\n # not fully converted into Domain and DomainInvitation\n # objects, this method will fill in the gaps.\n # This will ensure the Domain Invitations method\n # runs correctly (no missing invites))\n self.check_transition_domains_on_login()\n\n # PART 2: DOMAIN INVITATIONS\n self.check_domain_invitations_on_login()\n\n class Meta:\n permissions = [\n (\"analyst_access_permission\", \"Analyst Access Permission\"),\n (\"full_access_permission\", \"Full Access Permission\"),\n ]\n", "path": "src/registrar/models/user.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.utils import timezone\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenIdConnectBackend(ModelBackend):\n \"\"\"\n This backend checks a previously performed OIDC authentication.\n If it is OK and the user already exists in the database, it is returned.\n If it is OK and user does not exist in the database, it is created and\n returned unless setting OIDC_CREATE_UNKNOWN_USER is False.\n In all other cases, None is returned.\n \"\"\"\n\n def authenticate(self, request, **kwargs):\n logger.debug(\"kwargs %s\" % kwargs)\n user = None\n if not kwargs or \"sub\" not in kwargs.keys():\n return user\n\n UserModel = get_user_model()\n username = self.clean_username(kwargs[\"sub\"])\n\n # Some OP may actually choose to withhold some information, so we must\n # test if it is present\n openid_data = {\"last_login\": timezone.now()}\n openid_data[\"first_name\"] = kwargs.get(\"given_name\", \"\")\n openid_data[\"last_name\"] = kwargs.get(\"family_name\", \"\")\n openid_data[\"email\"] = kwargs.get(\"email\", \"\")\n openid_data[\"phone\"] = kwargs.get(\"phone\", \"\")\n\n # Note that this could be accomplished in one try-except clause, but\n # instead we use get_or_create when creating unknown users since it has\n # built-in safeguards for multiple threads.\n if getattr(settings, \"OIDC_CREATE_UNKNOWN_USER\", True):\n args = {\n UserModel.USERNAME_FIELD: username,\n # defaults _will_ be updated, these are not fallbacks\n \"defaults\": openid_data,\n }\n user, created = UserModel.objects.update_or_create(**args)\n if created:\n user = self.configure_user(user, **kwargs)\n else:\n try:\n user = UserModel.objects.get_by_natural_key(username)\n except UserModel.DoesNotExist:\n return None\n # run this callback for a each login\n user.on_each_login()\n return user\n\n def clean_username(self, username):\n \"\"\"\n Performs any cleaning on the \"username\" prior to using it to get or\n create the user object. Returns the cleaned username.\n \"\"\"\n return username\n\n def configure_user(self, user, **kwargs):\n \"\"\"\n Configures a user after creation and returns the updated user.\n \"\"\"\n user.set_unusable_password()\n return user\n", "path": "src/djangooidc/backends.py"}, {"content": "import logging\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom .domain_invitation import DomainInvitation\nfrom .transition_domain import TransitionDomain\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass User(AbstractUser):\n \"\"\"\n A custom user model that performs identically to the default user model\n but can be customized later.\n \"\"\"\n\n # #### Constants for choice fields ####\n RESTRICTED = \"restricted\"\n STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)\n\n status = models.CharField(\n max_length=10,\n choices=STATUS_CHOICES,\n default=None, # Set the default value to None\n null=True, # Allow the field to be null\n blank=True, # Allow the field to be blank\n )\n\n domains = models.ManyToManyField(\n \"registrar.Domain\",\n through=\"registrar.UserDomainRole\",\n related_name=\"users\",\n )\n\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def __str__(self):\n # this info is pulled from Login.gov\n if self.first_name or self.last_name:\n return f\"{self.first_name or ''} {self.last_name or ''} {self.email or ''}\"\n elif self.email:\n return self.email\n else:\n return self.username\n\n def restrict_user(self):\n self.status = self.RESTRICTED\n self.save()\n\n def unrestrict_user(self):\n self.status = None\n self.save()\n\n def is_restricted(self):\n return self.status == self.RESTRICTED\n\n def check_domain_invitations_on_login(self):\n \"\"\"When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\"\"\"\n for invitation in DomainInvitation.objects.filter(\n email=self.email, status=DomainInvitation.INVITED\n ):\n try:\n invitation.retrieve()\n invitation.save()\n except RuntimeError:\n # retrieving should not fail because of a missing user, but\n # if it does fail, log the error so a new user can continue\n # logging in\n logger.warn(\n \"Failed to retrieve invitation %s\", invitation, exc_info=True\n )\n\n def create_domain_and_invite(self, transition_domain: TransitionDomain):\n transition_domain_name = transition_domain.domain_name\n transition_domain_status = transition_domain.status\n transition_domain_email = transition_domain.username\n\n # type safety check. name should never be none\n if transition_domain_name is not None:\n new_domain = Domain(\n name=transition_domain_name, state=transition_domain_status\n )\n new_domain.save()\n # check that a domain invitation doesn't already\n # exist for this e-mail / Domain pair\n domain_email_already_in_domain_invites = DomainInvitation.objects.filter(\n email=transition_domain_email.lower(), domain=new_domain\n ).exists()\n if not domain_email_already_in_domain_invites:\n # Create new domain invitation\n new_domain_invitation = DomainInvitation(\n email=transition_domain_email.lower(), domain=new_domain\n )\n new_domain_invitation.save()\n\n def check_transition_domains_on_login(self):\n \"\"\"When a user first arrives on the site, we need to check\n if they are logging in with the same e-mail as a\n transition domain and update our database accordingly.\"\"\"\n\n for transition_domain in TransitionDomain.objects.filter(username=self.email):\n # Looks like the user logged in with the same e-mail as\n # one or more corresponding transition domains.\n # Create corresponding DomainInformation objects.\n\n # NOTE: adding an ADMIN user role for this user\n # for each domain should already be done\n # in the invitation.retrieve() method.\n # However, if the migration scripts for transition\n # domain objects were not executed correctly,\n # there could be transition domains without\n # any corresponding Domain & DomainInvitation objects,\n # which means the invitation.retrieve() method might\n # not execute.\n # Check that there is a corresponding domain object\n # for this transition domain. If not, we have an error\n # with our data and migrations need to be run again.\n\n # Get the domain that corresponds with this transition domain\n domain_exists = Domain.objects.filter(\n name=transition_domain.domain_name\n ).exists()\n if not domain_exists:\n logger.warn(\n \"\"\"There are transition domains without\n corresponding domain objects!\n Please run migration scripts for transition domains\n (See data_migration.md)\"\"\"\n )\n # No need to throw an exception...just create a domain\n # and domain invite, then proceed as normal\n self.create_domain_and_invite(transition_domain)\n\n domain = Domain.objects.get(name=transition_domain.domain_name)\n\n # Create a domain information object, if one doesn't\n # already exist\n domain_info_exists = DomainInformation.objects.filter(\n domain=domain\n ).exists()\n if not domain_info_exists:\n new_domain_info = DomainInformation(creator=self, domain=domain)\n new_domain_info.save()\n\n def on_each_login(self):\n \"\"\"Callback each time the user is authenticated.\n\n When a user arrives on the site each time, we need to retrieve any domain\n invitations that match their email address.\n\n We also need to check if they are logging in with the same e-mail\n as a transition domain and update our domainInfo objects accordingly.\n \"\"\"\n\n # PART 1: TRANSITION DOMAINS\n #\n # NOTE: THIS MUST RUN FIRST\n # (If we have an issue where transition domains were\n # not fully converted into Domain and DomainInvitation\n # objects, this method will fill in the gaps.\n # This will ensure the Domain Invitations method\n # runs correctly (no missing invites))\n self.check_transition_domains_on_login()\n\n # PART 2: DOMAIN INVITATIONS\n self.check_domain_invitations_on_login()\n\n class Meta:\n permissions = [\n (\"analyst_access_permission\", \"Analyst Access Permission\"),\n (\"full_access_permission\", \"Full Access Permission\"),\n ]\n", "path": "src/registrar/models/user.py"}]}
3,124
344
gh_patches_debug_37850
rasdani/github-patches
git_diff
vispy__vispy-2523
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade to Cython 3 for all builds Cython 3.0 is out now. There are a lot of changes including many changes to the defaults. I've learned quite a few gotchas from my other cython-based projects that hopefully upgrade will be easy for us since we only have a few Cython things in vispy. Just making an issue so if someone else wants to tackle it before me feel free. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) Vispy Development Team. All Rights Reserved. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 4 """Vispy setup script. 5 6 Steps to do a new release: 7 8 Preparations: 9 * Test on Windows, Linux, Mac 10 * Make release notes 11 * Update API documentation and other docs that need updating. 12 13 Define the version and release: 14 * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m "Version X.Y.Z"` 15 * push tag to github 16 * verify that azure pipelines complete 17 * verify that `.tar.gz` sdist and binary wheels are available on PyPI 18 19 Announcing: 20 * It can be worth waiting a day for eager users to report critical bugs 21 * Announce in scipy-user, vispy mailing list, twitter (@vispyproject) 22 23 """ 24 25 import os 26 from os import path as op 27 from distutils import log 28 from setuptools import setup, find_packages, Extension 29 30 import numpy as np 31 from Cython.Build import cythonize 32 33 log.set_verbosity(log.DEBUG) 34 log.info('setup.py entered') 35 log.info('$PATH=%s' % os.environ['PATH']) 36 37 name = 'vispy' 38 description = 'Interactive visualization in Python' 39 40 # Special commands for building jupyter notebook extension 41 here = os.path.dirname(os.path.abspath(__file__)) 42 node_root = os.path.join(here, 'js') 43 is_repo = os.path.exists(os.path.join(here, '.git')) 44 45 npm_path = os.pathsep.join([ 46 os.path.join(node_root, 'node_modules', '.bin'), 47 os.environ.get('PATH', os.defpath), 48 ]) 49 50 51 def set_builtin(name, value): 52 if isinstance(__builtins__, dict): 53 __builtins__[name] = value 54 else: 55 setattr(__builtins__, name, value) 56 57 58 extensions = [Extension('vispy.visuals.text._sdf_cpu', 59 [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')], 60 include_dirs=[np.get_include()]), 61 ] 62 63 readme = open('README.rst', 'r').read() 64 setup( 65 name=name, 66 use_scm_version={ 67 'write_to': 'vispy/version.py', 68 # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string) 69 'local_scheme': 'dirty-tag', 70 }, 71 author='Vispy contributors', 72 author_email='[email protected]', 73 license='(new) BSD', 74 url='http://vispy.org', 75 download_url='https://pypi.python.org/pypi/vispy', 76 keywords=[ 77 'visualization', 78 'OpenGl', 79 'ES', 80 'medical', 81 'imaging', 82 '3D', 83 'plotting', 84 'numpy', 85 'bigdata', 86 'ipython', 87 'jupyter', 88 'widgets', 89 ], 90 description=description, 91 long_description=readme, 92 long_description_content_type='text/x-rst', 93 platforms='any', 94 provides=['vispy'], 95 python_requires='>=3.6', 96 install_requires=['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging'], 97 setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive', 'packaging'], 98 extras_require={ 99 'ipython-static': ['ipython'], 100 'pyglet': ['pyglet>=1.2'], 101 'pyqt5': ['pyqt5'], 102 'pyqt6': ['pyqt6'], 103 'pyside': ['PySide'], 104 'pyside2': ['PySide2'], 105 'pyside6': ['PySide6'], 106 'sdl2': ['PySDL2'], 107 'wx': ['wxPython'], 108 'tk': ['pyopengltk'], 109 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc', 110 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest', 111 'pyopengl'], 112 'io': ['meshio', 'Pillow'], 113 }, 114 packages=find_packages(exclude=['make']), 115 ext_modules=cythonize(extensions, language_level=3), 116 package_dir={'vispy': 'vispy'}, 117 data_files=[], 118 include_package_data=True, 119 package_data={ 120 'vispy': [op.join('io', '_data', '*'), 121 op.join('app', 'tests', 'qt-designer.ui'), 122 op.join('util', 'fonts', 'data', '*.ttf'), 123 ], 124 125 'vispy.glsl': ['*.vert', '*.frag', "*.glsl"], 126 'vispy.glsl.antialias': ['*.vert', '*.frag', "*.glsl"], 127 'vispy.glsl.arrowheads': ['*.vert', '*.frag', "*.glsl"], 128 'vispy.glsl.arrows': ['*.vert', '*.frag', "*.glsl"], 129 'vispy.glsl.collections': ['*.vert', '*.frag', "*.glsl"], 130 'vispy.glsl.colormaps': ['*.vert', '*.frag', "*.glsl"], 131 'vispy.glsl.lines': ['*.vert', '*.frag', "*.glsl"], 132 'vispy.glsl.markers': ['*.vert', '*.frag', "*.glsl"], 133 'vispy.glsl.math': ['*.vert', '*.frag', "*.glsl"], 134 'vispy.glsl.misc': ['*.vert', '*.frag', "*.glsl"], 135 'vispy.glsl.transforms': ['*.vert', '*.frag', "*.glsl"], 136 137 }, 138 zip_safe=False, 139 classifiers=[ 140 'Development Status :: 3 - Alpha', 141 'Intended Audience :: Science/Research', 142 'Intended Audience :: Education', 143 'Intended Audience :: Developers', 144 'Topic :: Scientific/Engineering :: Visualization', 145 'License :: OSI Approved :: BSD License', 146 'Operating System :: MacOS :: MacOS X', 147 'Operating System :: Microsoft :: Windows', 148 'Operating System :: POSIX', 149 'Programming Language :: Python', 150 'Programming Language :: Python :: 3.6', 151 'Programming Language :: Python :: 3.7', 152 'Programming Language :: Python :: 3.8', 153 'Framework :: IPython' 154 ], 155 ) 156 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,15 +24,11 @@ import os from os import path as op -from distutils import log -from setuptools import setup, find_packages, Extension +from setuptools import setup, find_packages import numpy as np from Cython.Build import cythonize - -log.set_verbosity(log.DEBUG) -log.info('setup.py entered') -log.info('$PATH=%s' % os.environ['PATH']) +from Cython.Distutils import Extension name = 'vispy' description = 'Interactive visualization in Python' @@ -56,8 +52,11 @@ extensions = [Extension('vispy.visuals.text._sdf_cpu', - [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')], - include_dirs=[np.get_include()]), + sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')], + include_dirs=[np.get_include()], + cython_directives={"language_level": "3"}, + define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")], + ), ] readme = open('README.rst', 'r').read() @@ -70,7 +69,7 @@ }, author='Vispy contributors', author_email='[email protected]', - license='(new) BSD', + license='BSD-3-Clause', url='http://vispy.org', download_url='https://pypi.python.org/pypi/vispy', keywords=[ @@ -92,9 +91,8 @@ long_description_content_type='text/x-rst', platforms='any', provides=['vispy'], - python_requires='>=3.6', + python_requires='>=3.8', install_requires=['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging'], - setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive', 'packaging'], extras_require={ 'ipython-static': ['ipython'], 'pyglet': ['pyglet>=1.2'], @@ -147,9 +145,10 @@ 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Framework :: IPython' ], )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,15 +24,11 @@\n \n import os\n from os import path as op\n-from distutils import log\n-from setuptools import setup, find_packages, Extension\n+from setuptools import setup, find_packages\n \n import numpy as np\n from Cython.Build import cythonize\n-\n-log.set_verbosity(log.DEBUG)\n-log.info('setup.py entered')\n-log.info('$PATH=%s' % os.environ['PATH'])\n+from Cython.Distutils import Extension\n \n name = 'vispy'\n description = 'Interactive visualization in Python'\n@@ -56,8 +52,11 @@\n \n \n extensions = [Extension('vispy.visuals.text._sdf_cpu',\n- [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n- include_dirs=[np.get_include()]),\n+ sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n+ include_dirs=[np.get_include()],\n+ cython_directives={\"language_level\": \"3\"},\n+ define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n+ ),\n ]\n \n readme = open('README.rst', 'r').read()\n@@ -70,7 +69,7 @@\n },\n author='Vispy contributors',\n author_email='[email protected]',\n- license='(new) BSD',\n+ license='BSD-3-Clause',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n@@ -92,9 +91,8 @@\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n- python_requires='>=3.6',\n+ python_requires='>=3.8',\n install_requires=['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging'],\n- setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive', 'packaging'],\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n@@ -147,9 +145,10 @@\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n+ 'Programming Language :: Python :: 3.10',\n+ 'Programming Language :: Python :: 3.11',\n 'Framework :: IPython'\n ],\n )\n", "issue": "Upgrade to Cython 3 for all builds\nCython 3.0 is out now. There are a lot of changes including many changes to the defaults. I've learned quite a few gotchas from my other cython-based projects that hopefully upgrade will be easy for us since we only have a few Cython things in vispy. Just making an issue so if someone else wants to tackle it before me feel free.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nfrom os import path as op\nfrom distutils import log\nfrom setuptools import setup, find_packages, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\n\nlog.set_verbosity(log.DEBUG)\nlog.info('setup.py entered')\nlog.info('$PATH=%s' % os.environ['PATH'])\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()]),\n ]\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={\n 'write_to': 'vispy/version.py',\n # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)\n 'local_scheme': 'dirty-tag',\n },\n author='Vispy contributors',\n author_email='[email protected]',\n license='(new) BSD',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n python_requires='>=3.6',\n install_requires=['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging'],\n setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive', 'packaging'],\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyqt6': ['pyqt6'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',\n 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',\n 'pyopengl'],\n 'io': ['meshio', 'Pillow'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions, language_level=3),\n package_dir={'vispy': 'vispy'},\n data_files=[],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert', '*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nfrom os import path as op\nfrom setuptools import setup, find_packages\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import Extension\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()],\n cython_directives={\"language_level\": \"3\"},\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n ),\n ]\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={\n 'write_to': 'vispy/version.py',\n # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)\n 'local_scheme': 'dirty-tag',\n },\n author='Vispy contributors',\n author_email='[email protected]',\n license='BSD-3-Clause',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n python_requires='>=3.8',\n install_requires=['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging'],\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyqt6': ['pyqt6'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',\n 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',\n 'pyopengl'],\n 'io': ['meshio', 'Pillow'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions, language_level=3),\n package_dir={'vispy': 'vispy'},\n data_files=[],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert', '*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py"}]}
2,072
631
gh_patches_debug_12979
rasdani/github-patches
git_diff
beetbox__beets-4960
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sidebar scrolling in docs not working and general adjustment ideas I was too lazy for a PR but I a quickfixed the sidebar in Alabaster. It did not stay "in place" like we had it before. More adjustments might come in proper PR. **_~Well, the fix does not work in master but it does work on my local docs build!~_** Update: Actually the sidebar scrolling did work, probably there was some caching issue with my local browser, so that is [x] DONE Anyway, we should take care of docs build and customization. Some ideas: - Explicitely state Alabaster as the theme to use. - Set the pic we have for the beetbox organization as the logo in the docs. (Or do we have a logo or does someone want to design one? - Default to "latest" version of the docs - ...I might add some more ideas here... Might be related: https://github.com/beetbox/beets/issues/4912 _Originally posted by @JOJ0 in https://github.com/beetbox/beets/issues/4644#issuecomment-1728881928_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # This file is part of beets. 4 # Copyright 2016, Adrian Sampson. 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining 7 # a copy of this software and associated documentation files (the 8 # "Software"), to deal in the Software without restriction, including 9 # without limitation the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the Software, and to 11 # permit persons to whom the Software is furnished to do so, subject to 12 # the following conditions: 13 # 14 # The above copyright notice and this permission notice shall be 15 # included in all copies or substantial portions of the Software. 16 17 18 import os 19 import shutil 20 import subprocess 21 import sys 22 23 from setuptools import setup 24 25 26 def _read(fn): 27 path = os.path.join(os.path.dirname(__file__), fn) 28 return open(path).read() 29 30 31 def build_manpages(): 32 # Go into the docs directory and build the manpage. 33 docdir = os.path.join(os.path.dirname(__file__), "docs") 34 curdir = os.getcwd() 35 os.chdir(docdir) 36 try: 37 subprocess.check_call(["make", "man"]) 38 except OSError: 39 print("Could not build manpages (make man failed)!", file=sys.stderr) 40 return 41 finally: 42 os.chdir(curdir) 43 44 # Copy resulting manpages. 45 mandir = os.path.join(os.path.dirname(__file__), "man") 46 if os.path.exists(mandir): 47 shutil.rmtree(mandir) 48 shutil.copytree(os.path.join(docdir, "_build", "man"), mandir) 49 50 51 # Build manpages if we're making a source distribution tarball. 52 if "sdist" in sys.argv: 53 build_manpages() 54 55 56 setup( 57 name="beets", 58 version="1.6.1", 59 description="music tagger and library organizer", 60 author="Adrian Sampson", 61 author_email="[email protected]", 62 url="https://beets.io/", 63 license="MIT", 64 platforms="ALL", 65 long_description=_read("README.rst"), 66 test_suite="test.testall.suite", 67 zip_safe=False, 68 include_package_data=True, # Install plugin resources. 69 packages=[ 70 "beets", 71 "beets.ui", 72 "beets.autotag", 73 "beets.util", 74 "beets.dbcore", 75 "beetsplug", 76 "beetsplug.bpd", 77 "beetsplug.web", 78 "beetsplug.lastgenre", 79 "beetsplug.metasync", 80 ], 81 entry_points={ 82 "console_scripts": [ 83 "beet = beets.ui:main", 84 ], 85 }, 86 install_requires=[ 87 "unidecode>=1.3.6", 88 "musicbrainzngs>=0.4", 89 "pyyaml", 90 "mediafile>=0.12.0", 91 "confuse>=1.5.0", 92 "munkres>=1.0.0", 93 "jellyfish", 94 "typing_extensions", 95 ] 96 + ( 97 # Support for ANSI console colors on Windows. 98 ["colorama"] 99 if (sys.platform == "win32") 100 else [] 101 ), 102 extras_require={ 103 "test": [ 104 "beautifulsoup4", 105 "coverage", 106 "flask", 107 "mock", 108 "pylast", 109 "pytest", 110 "python-mpd2", 111 "pyxdg", 112 "responses>=0.3.0", 113 "requests_oauthlib", 114 "reflink", 115 "rarfile", 116 "python3-discogs-client>=2.3.15", 117 "py7zr", 118 ], 119 "lint": [ 120 "flake8", 121 "flake8-docstrings", 122 "pep8-naming", 123 ], 124 "mypy": [ 125 "mypy", 126 "types-Pillow", 127 "types-urllib3", 128 "types-beautifulsoup4", 129 "types-PyYAML", 130 "types-requests", 131 "types-Flask-Cors", 132 ], 133 "docs": [ 134 "sphinx", 135 "sphinx_rtd_theme", 136 ], 137 # Plugin (optional) dependencies: 138 "absubmit": ["requests"], 139 "fetchart": ["requests", "Pillow", "beautifulsoup4"], 140 "embedart": ["Pillow"], 141 "embyupdate": ["requests"], 142 "chroma": ["pyacoustid"], 143 "discogs": ["python3-discogs-client>=2.3.15"], 144 "beatport": ["requests-oauthlib>=0.6.1"], 145 "kodiupdate": ["requests"], 146 "lastgenre": ["pylast"], 147 "lastimport": ["pylast"], 148 "lyrics": ["requests", "beautifulsoup4", "langdetect"], 149 "mpdstats": ["python-mpd2>=0.4.2"], 150 "plexupdate": ["requests"], 151 "web": ["flask", "flask-cors"], 152 "import": ["rarfile", "py7zr"], 153 "thumbnails": ["pyxdg", "Pillow"], 154 "metasync": ["dbus-python"], 155 "sonosupdate": ["soco"], 156 "scrub": ["mutagen>=1.33"], 157 "bpd": ["PyGObject"], 158 "replaygain": ["PyGObject"], 159 "reflink": ["reflink"], 160 }, 161 # Non-Python/non-PyPI plugin dependencies: 162 # chroma: chromaprint or fpcalc 163 # convert: ffmpeg 164 # badfiles: mp3val and flac 165 # bpd: python-gi and GStreamer 1.0+ 166 # embedart: ImageMagick 167 # absubmit: extractor binary from https://acousticbrainz.org/download 168 # keyfinder: KeyFinder 169 # replaygain: python-gi and GStreamer 1.0+ 170 # or mp3gain/aacgain 171 # or Python Audio Tools 172 # or ffmpeg 173 # ipfs: go-ipfs 174 classifiers=[ 175 "Topic :: Multimedia :: Sound/Audio", 176 "Topic :: Multimedia :: Sound/Audio :: Players :: MP3", 177 "License :: OSI Approved :: MIT License", 178 "Environment :: Console", 179 "Environment :: Web Environment", 180 "Programming Language :: Python", 181 "Programming Language :: Python :: 3", 182 "Programming Language :: Python :: 3.7", 183 "Programming Language :: Python :: 3.8", 184 "Programming Language :: Python :: 3.9", 185 "Programming Language :: Python :: 3.10", 186 "Programming Language :: Python :: Implementation :: CPython", 187 ], 188 ) 189 ``` Path: `docs/conf.py` Content: ``` 1 AUTHOR = "Adrian Sampson" 2 3 # General configuration 4 5 extensions = ["sphinx.ext.autodoc", "sphinx.ext.extlinks"] 6 7 exclude_patterns = ["_build"] 8 source_suffix = ".rst" 9 master_doc = "index" 10 11 project = "beets" 12 copyright = "2016, Adrian Sampson" 13 14 version = "1.6" 15 release = "1.6.1" 16 17 pygments_style = "sphinx" 18 19 # External links to the bug tracker and other sites. 20 extlinks = { 21 "bug": ("https://github.com/beetbox/beets/issues/%s", "#%s"), 22 "user": ("https://github.com/%s", "%s"), 23 "pypi": ("https://pypi.org/project/%s/", "%s"), 24 "stdlib": ("https://docs.python.org/3/library/%s.html", "%s"), 25 } 26 27 linkcheck_ignore = [ 28 r"https://github.com/beetbox/beets/issues/", 29 r"https://github.com/[^/]+$", # ignore user pages 30 r".*localhost.*", 31 r"https?://127\.0\.0\.1", 32 r"https://www.musixmatch.com/", # blocks requests 33 r"https://genius.com/", # blocks requests 34 ] 35 36 # Options for HTML output 37 htmlhelp_basename = "beetsdoc" 38 39 # Options for LaTeX output 40 latex_documents = [ 41 ("index", "beets.tex", "beets Documentation", AUTHOR, "manual"), 42 ] 43 44 # Options for manual page output 45 man_pages = [ 46 ( 47 "reference/cli", 48 "beet", 49 "music tagger and library organizer", 50 [AUTHOR], 51 1, 52 ), 53 ( 54 "reference/config", 55 "beetsconfig", 56 "beets configuration file", 57 [AUTHOR], 58 5, 59 ), 60 ] 61 62 # Options for Alabaster theme 63 html_theme_options = {"fixed_sidebar": True} 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -59,5 +59,16 @@ ), ] -# Options for Alabaster theme -html_theme_options = {"fixed_sidebar": True} +# Options for pydata theme +html_theme = 'pydata_sphinx_theme' +html_theme_options = { + 'collapse_navigation': True, + "logo": { + "text": "beets", + }, + "pygment_light_style": "bw", +} +html_title = "beets" +html_logo = "_static/beets_logo.png" +html_static_path = ['_static'] +html_css_files = ['beets.css'] diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -132,7 +132,7 @@ ], "docs": [ "sphinx", - "sphinx_rtd_theme", + "pydata_sphinx_theme", ], # Plugin (optional) dependencies: "absubmit": ["requests"],
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,5 +59,16 @@\n ),\n ]\n \n-# Options for Alabaster theme\n-html_theme_options = {\"fixed_sidebar\": True}\n+# Options for pydata theme\n+html_theme = 'pydata_sphinx_theme'\n+html_theme_options = {\n+ 'collapse_navigation': True,\n+ \"logo\": {\n+ \"text\": \"beets\",\n+ },\n+ \"pygment_light_style\": \"bw\",\n+}\n+html_title = \"beets\"\n+html_logo = \"_static/beets_logo.png\"\n+html_static_path = ['_static']\n+html_css_files = ['beets.css']\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -132,7 +132,7 @@\n ],\n \"docs\": [\n \"sphinx\",\n- \"sphinx_rtd_theme\",\n+ \"pydata_sphinx_theme\",\n ],\n # Plugin (optional) dependencies:\n \"absubmit\": [\"requests\"],\n", "issue": "Sidebar scrolling in docs not working and general adjustment ideas\nI was too lazy for a PR but I a quickfixed the sidebar in Alabaster. It did not stay \"in place\" like we had it before. More adjustments might come in proper PR.\r\n\r\n**_~Well, the fix does not work in master but it does work on my local docs build!~_**\r\n\r\nUpdate: Actually the sidebar scrolling did work, probably there was some caching issue with my local browser, so that is [x] DONE\r\n\r\nAnyway, we should take care of docs build and customization. Some ideas:\r\n\r\n- Explicitely state Alabaster as the theme to use.\r\n- Set the pic we have for the beetbox organization as the logo in the docs. (Or do we have a logo or does someone want to design one?\r\n- Default to \"latest\" version of the docs\r\n- ...I might add some more ideas here...\r\n\r\nMight be related: https://github.com/beetbox/beets/issues/4912\r\n\r\n_Originally posted by @JOJ0 in https://github.com/beetbox/beets/issues/4644#issuecomment-1728881928_\r\n \n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), \"docs\")\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call([\"make\", \"man\"])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), \"man\")\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, \"_build\", \"man\"), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif \"sdist\" in sys.argv:\n build_manpages()\n\n\nsetup(\n name=\"beets\",\n version=\"1.6.1\",\n description=\"music tagger and library organizer\",\n author=\"Adrian Sampson\",\n author_email=\"[email protected]\",\n url=\"https://beets.io/\",\n license=\"MIT\",\n platforms=\"ALL\",\n long_description=_read(\"README.rst\"),\n test_suite=\"test.testall.suite\",\n zip_safe=False,\n include_package_data=True, # Install plugin resources.\n packages=[\n \"beets\",\n \"beets.ui\",\n \"beets.autotag\",\n \"beets.util\",\n \"beets.dbcore\",\n \"beetsplug\",\n \"beetsplug.bpd\",\n \"beetsplug.web\",\n \"beetsplug.lastgenre\",\n \"beetsplug.metasync\",\n ],\n entry_points={\n \"console_scripts\": [\n \"beet = beets.ui:main\",\n ],\n },\n install_requires=[\n \"unidecode>=1.3.6\",\n \"musicbrainzngs>=0.4\",\n \"pyyaml\",\n \"mediafile>=0.12.0\",\n \"confuse>=1.5.0\",\n \"munkres>=1.0.0\",\n \"jellyfish\",\n \"typing_extensions\",\n ]\n + (\n # Support for ANSI console colors on Windows.\n [\"colorama\"]\n if (sys.platform == \"win32\")\n else []\n ),\n extras_require={\n \"test\": [\n \"beautifulsoup4\",\n \"coverage\",\n \"flask\",\n \"mock\",\n \"pylast\",\n \"pytest\",\n \"python-mpd2\",\n \"pyxdg\",\n \"responses>=0.3.0\",\n \"requests_oauthlib\",\n \"reflink\",\n \"rarfile\",\n \"python3-discogs-client>=2.3.15\",\n \"py7zr\",\n ],\n \"lint\": [\n \"flake8\",\n \"flake8-docstrings\",\n \"pep8-naming\",\n ],\n \"mypy\": [\n \"mypy\",\n \"types-Pillow\",\n \"types-urllib3\",\n \"types-beautifulsoup4\",\n \"types-PyYAML\",\n \"types-requests\",\n \"types-Flask-Cors\",\n ],\n \"docs\": [\n \"sphinx\",\n \"sphinx_rtd_theme\",\n ],\n # Plugin (optional) dependencies:\n \"absubmit\": [\"requests\"],\n \"fetchart\": [\"requests\", \"Pillow\", \"beautifulsoup4\"],\n \"embedart\": [\"Pillow\"],\n \"embyupdate\": [\"requests\"],\n \"chroma\": [\"pyacoustid\"],\n \"discogs\": [\"python3-discogs-client>=2.3.15\"],\n \"beatport\": [\"requests-oauthlib>=0.6.1\"],\n \"kodiupdate\": [\"requests\"],\n \"lastgenre\": [\"pylast\"],\n \"lastimport\": [\"pylast\"],\n \"lyrics\": [\"requests\", \"beautifulsoup4\", \"langdetect\"],\n \"mpdstats\": [\"python-mpd2>=0.4.2\"],\n \"plexupdate\": [\"requests\"],\n \"web\": [\"flask\", \"flask-cors\"],\n \"import\": [\"rarfile\", \"py7zr\"],\n \"thumbnails\": [\"pyxdg\", \"Pillow\"],\n \"metasync\": [\"dbus-python\"],\n \"sonosupdate\": [\"soco\"],\n \"scrub\": [\"mutagen>=1.33\"],\n \"bpd\": [\"PyGObject\"],\n \"replaygain\": [\"PyGObject\"],\n \"reflink\": [\"reflink\"],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # chroma: chromaprint or fpcalc\n # convert: ffmpeg\n # badfiles: mp3val and flac\n # bpd: python-gi and GStreamer 1.0+\n # embedart: ImageMagick\n # absubmit: extractor binary from https://acousticbrainz.org/download\n # keyfinder: KeyFinder\n # replaygain: python-gi and GStreamer 1.0+\n # or mp3gain/aacgain\n # or Python Audio Tools\n # or ffmpeg\n # ipfs: go-ipfs\n classifiers=[\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Sound/Audio :: Players :: MP3\",\n \"License :: OSI Approved :: MIT License\",\n \"Environment :: Console\",\n \"Environment :: Web Environment\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n ],\n)\n", "path": "setup.py"}, {"content": "AUTHOR = \"Adrian Sampson\"\n\n# General configuration\n\nextensions = [\"sphinx.ext.autodoc\", \"sphinx.ext.extlinks\"]\n\nexclude_patterns = [\"_build\"]\nsource_suffix = \".rst\"\nmaster_doc = \"index\"\n\nproject = \"beets\"\ncopyright = \"2016, Adrian Sampson\"\n\nversion = \"1.6\"\nrelease = \"1.6.1\"\n\npygments_style = \"sphinx\"\n\n# External links to the bug tracker and other sites.\nextlinks = {\n \"bug\": (\"https://github.com/beetbox/beets/issues/%s\", \"#%s\"),\n \"user\": (\"https://github.com/%s\", \"%s\"),\n \"pypi\": (\"https://pypi.org/project/%s/\", \"%s\"),\n \"stdlib\": (\"https://docs.python.org/3/library/%s.html\", \"%s\"),\n}\n\nlinkcheck_ignore = [\n r\"https://github.com/beetbox/beets/issues/\",\n r\"https://github.com/[^/]+$\", # ignore user pages\n r\".*localhost.*\",\n r\"https?://127\\.0\\.0\\.1\",\n r\"https://www.musixmatch.com/\", # blocks requests\n r\"https://genius.com/\", # blocks requests\n]\n\n# Options for HTML output\nhtmlhelp_basename = \"beetsdoc\"\n\n# Options for LaTeX output\nlatex_documents = [\n (\"index\", \"beets.tex\", \"beets Documentation\", AUTHOR, \"manual\"),\n]\n\n# Options for manual page output\nman_pages = [\n (\n \"reference/cli\",\n \"beet\",\n \"music tagger and library organizer\",\n [AUTHOR],\n 1,\n ),\n (\n \"reference/config\",\n \"beetsconfig\",\n \"beets configuration file\",\n [AUTHOR],\n 5,\n ),\n]\n\n# Options for Alabaster theme\nhtml_theme_options = {\"fixed_sidebar\": True}\n", "path": "docs/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), \"docs\")\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call([\"make\", \"man\"])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), \"man\")\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, \"_build\", \"man\"), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif \"sdist\" in sys.argv:\n build_manpages()\n\n\nsetup(\n name=\"beets\",\n version=\"1.6.1\",\n description=\"music tagger and library organizer\",\n author=\"Adrian Sampson\",\n author_email=\"[email protected]\",\n url=\"https://beets.io/\",\n license=\"MIT\",\n platforms=\"ALL\",\n long_description=_read(\"README.rst\"),\n test_suite=\"test.testall.suite\",\n zip_safe=False,\n include_package_data=True, # Install plugin resources.\n packages=[\n \"beets\",\n \"beets.ui\",\n \"beets.autotag\",\n \"beets.util\",\n \"beets.dbcore\",\n \"beetsplug\",\n \"beetsplug.bpd\",\n \"beetsplug.web\",\n \"beetsplug.lastgenre\",\n \"beetsplug.metasync\",\n ],\n entry_points={\n \"console_scripts\": [\n \"beet = beets.ui:main\",\n ],\n },\n install_requires=[\n \"unidecode>=1.3.6\",\n \"musicbrainzngs>=0.4\",\n \"pyyaml\",\n \"mediafile>=0.12.0\",\n \"confuse>=1.5.0\",\n \"munkres>=1.0.0\",\n \"jellyfish\",\n \"typing_extensions\",\n ]\n + (\n # Support for ANSI console colors on Windows.\n [\"colorama\"]\n if (sys.platform == \"win32\")\n else []\n ),\n extras_require={\n \"test\": [\n \"beautifulsoup4\",\n \"coverage\",\n \"flask\",\n \"mock\",\n \"pylast\",\n \"pytest\",\n \"python-mpd2\",\n \"pyxdg\",\n \"responses>=0.3.0\",\n \"requests_oauthlib\",\n \"reflink\",\n \"rarfile\",\n \"python3-discogs-client>=2.3.15\",\n \"py7zr\",\n ],\n \"lint\": [\n \"flake8\",\n \"flake8-docstrings\",\n \"pep8-naming\",\n ],\n \"mypy\": [\n \"mypy\",\n \"types-Pillow\",\n \"types-urllib3\",\n \"types-beautifulsoup4\",\n \"types-PyYAML\",\n \"types-requests\",\n \"types-Flask-Cors\",\n ],\n \"docs\": [\n \"sphinx\",\n \"pydata_sphinx_theme\",\n ],\n # Plugin (optional) dependencies:\n \"absubmit\": [\"requests\"],\n \"fetchart\": [\"requests\", \"Pillow\", \"beautifulsoup4\"],\n \"embedart\": [\"Pillow\"],\n \"embyupdate\": [\"requests\"],\n \"chroma\": [\"pyacoustid\"],\n \"discogs\": [\"python3-discogs-client>=2.3.15\"],\n \"beatport\": [\"requests-oauthlib>=0.6.1\"],\n \"kodiupdate\": [\"requests\"],\n \"lastgenre\": [\"pylast\"],\n \"lastimport\": [\"pylast\"],\n \"lyrics\": [\"requests\", \"beautifulsoup4\", \"langdetect\"],\n \"mpdstats\": [\"python-mpd2>=0.4.2\"],\n \"plexupdate\": [\"requests\"],\n \"web\": [\"flask\", \"flask-cors\"],\n \"import\": [\"rarfile\", \"py7zr\"],\n \"thumbnails\": [\"pyxdg\", \"Pillow\"],\n \"metasync\": [\"dbus-python\"],\n \"sonosupdate\": [\"soco\"],\n \"scrub\": [\"mutagen>=1.33\"],\n \"bpd\": [\"PyGObject\"],\n \"replaygain\": [\"PyGObject\"],\n \"reflink\": [\"reflink\"],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # chroma: chromaprint or fpcalc\n # convert: ffmpeg\n # badfiles: mp3val and flac\n # bpd: python-gi and GStreamer 1.0+\n # embedart: ImageMagick\n # absubmit: extractor binary from https://acousticbrainz.org/download\n # keyfinder: KeyFinder\n # replaygain: python-gi and GStreamer 1.0+\n # or mp3gain/aacgain\n # or Python Audio Tools\n # or ffmpeg\n # ipfs: go-ipfs\n classifiers=[\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Sound/Audio :: Players :: MP3\",\n \"License :: OSI Approved :: MIT License\",\n \"Environment :: Console\",\n \"Environment :: Web Environment\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n ],\n)\n", "path": "setup.py"}, {"content": "AUTHOR = \"Adrian Sampson\"\n\n# General configuration\n\nextensions = [\"sphinx.ext.autodoc\", \"sphinx.ext.extlinks\"]\n\nexclude_patterns = [\"_build\"]\nsource_suffix = \".rst\"\nmaster_doc = \"index\"\n\nproject = \"beets\"\ncopyright = \"2016, Adrian Sampson\"\n\nversion = \"1.6\"\nrelease = \"1.6.1\"\n\npygments_style = \"sphinx\"\n\n# External links to the bug tracker and other sites.\nextlinks = {\n \"bug\": (\"https://github.com/beetbox/beets/issues/%s\", \"#%s\"),\n \"user\": (\"https://github.com/%s\", \"%s\"),\n \"pypi\": (\"https://pypi.org/project/%s/\", \"%s\"),\n \"stdlib\": (\"https://docs.python.org/3/library/%s.html\", \"%s\"),\n}\n\nlinkcheck_ignore = [\n r\"https://github.com/beetbox/beets/issues/\",\n r\"https://github.com/[^/]+$\", # ignore user pages\n r\".*localhost.*\",\n r\"https?://127\\.0\\.0\\.1\",\n r\"https://www.musixmatch.com/\", # blocks requests\n r\"https://genius.com/\", # blocks requests\n]\n\n# Options for HTML output\nhtmlhelp_basename = \"beetsdoc\"\n\n# Options for LaTeX output\nlatex_documents = [\n (\"index\", \"beets.tex\", \"beets Documentation\", AUTHOR, \"manual\"),\n]\n\n# Options for manual page output\nman_pages = [\n (\n \"reference/cli\",\n \"beet\",\n \"music tagger and library organizer\",\n [AUTHOR],\n 1,\n ),\n (\n \"reference/config\",\n \"beetsconfig\",\n \"beets configuration file\",\n [AUTHOR],\n 5,\n ),\n]\n\n# Options for pydata theme\nhtml_theme = 'pydata_sphinx_theme'\nhtml_theme_options = {\n 'collapse_navigation': True,\n \"logo\": {\n \"text\": \"beets\",\n },\n \"pygment_light_style\": \"bw\",\n}\nhtml_title = \"beets\"\nhtml_logo = \"_static/beets_logo.png\"\nhtml_static_path = ['_static']\nhtml_css_files = ['beets.css']\n", "path": "docs/conf.py"}]}
2,982
243
gh_patches_debug_63371
rasdani/github-patches
git_diff
mkdocs__mkdocs-190
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make syntax highlighting optional It would be nice to have an option to disable the prettify class from being added to the pre-tag. Personally, I prefer using another highlighter that doesn't rely on extra classes. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/build.py` Content: ``` 1 # coding: utf-8 2 from __future__ import print_function 3 4 from mkdocs import nav, toc, utils 5 from mkdocs.compat import urljoin, urlparse, urlunparse, PY2 6 import jinja2 7 import markdown 8 import os 9 import re 10 import json 11 12 13 class PathToURL(object): 14 def __init__(self, template, nav=None): 15 self.template = template 16 self.nav = nav 17 18 def __call__(self, match): 19 url = match.groups()[0] 20 scheme, netloc, path, query, query, fragment = urlparse(url) 21 22 if scheme or netloc: 23 # Ignore URLs unless they are a relative link to a markdown file. 24 return self.template % url 25 26 if self.nav and not utils.is_markdown_file(path): 27 path = utils.create_media_urls(self.nav, [path])[0] 28 elif self.nav: 29 # If the site navigation has been provided, then validate 30 # the internal hyperlink, making sure the target actually exists. 31 target_file = self.nav.file_context.make_absolute(path) 32 if target_file not in self.nav.source_files: 33 source_file = self.nav.file_context.current_file 34 msg = ( 35 'The page "%s" contained a hyperlink to "%s" which ' 36 'is not listed in the "pages" configuration.' 37 ) 38 assert False, msg % (source_file, target_file) 39 path = utils.get_url_path(target_file, self.nav.use_directory_urls) 40 path = self.nav.url_context.make_relative(path) 41 else: 42 path = utils.get_url_path(path).lstrip('/') 43 44 # Convert the .md hyperlink to a relative hyperlink to the HTML page. 45 url = urlunparse((scheme, netloc, path, query, query, fragment)) 46 return self.template % url 47 48 49 def convert_markdown(markdown_source, extensions=()): 50 """ 51 Convert the Markdown source file to HTML content, and additionally 52 return the parsed table of contents, and a dictionary of any metadata 53 that was specified in the Markdown file. 54 55 `extensions` is an optional sequence of Python Markdown extensions to add 56 to the default set. 57 """ 58 59 # Prepend a table of contents marker for the TOC extension 60 markdown_source = toc.pre_process(markdown_source) 61 62 # Generate the HTML from the markdown source 63 md = markdown.Markdown( 64 extensions=['meta', 'toc', 'tables', 'fenced_code'] + list(extensions) 65 ) 66 html_content = md.convert(markdown_source) 67 meta = md.Meta 68 69 # Strip out the generated table of contents 70 (html_content, toc_html) = toc.post_process(html_content) 71 72 # Post process the generated table of contents into a data structure 73 table_of_contents = toc.TableOfContents(toc_html) 74 75 return (html_content, table_of_contents, meta) 76 77 78 def post_process_html(html_content, nav=None): 79 80 anchor_sub = PathToURL('a href="%s"', nav) 81 html_content = re.sub(r'a href="([^"]*)"', anchor_sub, html_content) 82 83 img_sub = PathToURL('src="%s"', nav) 84 html_content = re.sub(r'src="([^"]*)"', img_sub, html_content) 85 86 html_content = html_content.replace('<pre>', '<pre class="prettyprint well">') 87 88 return html_content 89 90 91 def get_context(page, content, nav, toc, meta, config): 92 site_name = config['site_name'] 93 94 if page.is_homepage or page.title is None: 95 page_title = site_name 96 else: 97 page_title = page.title + ' - ' + site_name 98 99 if page.is_homepage: 100 page_description = config['site_description'] 101 else: 102 page_description = None 103 104 if config['site_url']: 105 base = config['site_url'] 106 if not base.endswith('/'): 107 base += '/' 108 canonical_url = urljoin(base, page.abs_url.lstrip('/')) 109 else: 110 canonical_url = None 111 112 if config['site_favicon']: 113 site_favicon = nav.url_context.make_relative('/' + config['site_favicon']) 114 else: 115 site_favicon = None 116 117 extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript']) 118 119 extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css']) 120 121 return { 122 'site_name': site_name, 123 'site_author': config['site_author'], 124 'favicon': site_favicon, 125 126 'page_title': page_title, 127 'page_description': page_description, 128 129 'content': content, 130 'toc': toc, 131 'nav': nav, 132 'meta': meta, 133 134 'base_url': nav.url_context.make_relative('/'), 135 'homepage_url': nav.homepage.url, 136 'canonical_url': canonical_url, 137 138 'current_page': page, 139 'previous_page': page.previous_page, 140 'next_page': page.next_page, 141 142 # Note that there's intentionally repetition here. Rather than simply 143 # provide the config dictionary we instead pass everything explicitly. 144 # 145 # This helps ensure that we can throughly document the context that 146 # gets passed to themes. 147 'repo_url': config['repo_url'], 148 'repo_name': config['repo_name'], 149 150 'extra_css': extra_css, 151 'extra_javascript': extra_javascript, 152 153 'include_nav': config['include_nav'], 154 'include_next_prev': config['include_next_prev'], 155 'include_search': config['include_search'], 156 157 'copyright': config['copyright'], 158 'google-analytics': config['google-analytics'] 159 } 160 161 162 def build_pages(config, dump_json=False): 163 """ 164 Builds all the pages and writes them into the build directory. 165 """ 166 site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls']) 167 loader = jinja2.FileSystemLoader(config['theme_dir']) 168 env = jinja2.Environment(loader=loader) 169 170 for page in site_navigation.walk_pages(): 171 # Read the input file 172 input_path = os.path.join(config['docs_dir'], page.input_path) 173 input_content = open(input_path, 'r').read() 174 if PY2: 175 input_content = input_content.decode('utf-8') 176 177 # Process the markdown text 178 html_content, table_of_contents, meta = convert_markdown( 179 input_content, extensions=config['markdown_extensions'] 180 ) 181 html_content = post_process_html(html_content, site_navigation) 182 183 context = get_context( 184 page, html_content, site_navigation, 185 table_of_contents, meta, config 186 ) 187 188 # Allow 'template:' override in md source files. 189 if 'template' in meta: 190 template = env.get_template(meta['template'][0]) 191 else: 192 template = env.get_template('base.html') 193 194 # Render the template. 195 output_content = template.render(context) 196 197 # Write the output file. 198 output_path = os.path.join(config['site_dir'], page.output_path) 199 if dump_json: 200 json_context = { 201 'content': context['content'], 202 'title': context['current_page'].title, 203 'url': context['current_page'].abs_url, 204 'language': 'en', 205 } 206 utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json')) 207 else: 208 utils.write_file(output_content.encode('utf-8'), output_path) 209 210 211 def build(config, live_server=False, dump_json=False, clean_site_dir=False): 212 """ 213 Perform a full site build. 214 """ 215 if clean_site_dir: 216 print("Cleaning site directory") 217 utils.clean_directory(config['site_dir']) 218 if not live_server: 219 print("Building documentation to directory: %s" % config['site_dir']) 220 if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']): 221 print("Directory %s contains stale files. Use --clean to remove them." % config['site_dir']) 222 223 if dump_json: 224 build_pages(config, dump_json=True) 225 else: 226 # Reversed as we want to take the media files from the builtin theme 227 # and then from the custom theme_dir so the custom versions take take 228 # precedence. 229 for theme_dir in reversed(config['theme_dir']): 230 utils.copy_media_files(theme_dir, config['site_dir']) 231 utils.copy_media_files(config['docs_dir'], config['site_dir']) 232 build_pages(config) 233 234 235 def site_directory_contains_stale_files(site_directory): 236 """ 237 Check if the site directory contains stale files from a previous build. 238 Right now the check returns true if the directory is not empty. 239 A more sophisticated approach should be found to trigger only if there are 240 files that won't be overwritten anyway. 241 """ 242 if os.path.exists(site_directory): 243 if os.listdir(site_directory): 244 return True 245 return False 246 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/build.py b/mkdocs/build.py --- a/mkdocs/build.py +++ b/mkdocs/build.py @@ -83,8 +83,6 @@ img_sub = PathToURL('src="%s"', nav) html_content = re.sub(r'src="([^"]*)"', img_sub, html_content) - html_content = html_content.replace('<pre>', '<pre class="prettyprint well">') - return html_content
{"golden_diff": "diff --git a/mkdocs/build.py b/mkdocs/build.py\n--- a/mkdocs/build.py\n+++ b/mkdocs/build.py\n@@ -83,8 +83,6 @@\n img_sub = PathToURL('src=\"%s\"', nav)\n html_content = re.sub(r'src=\"([^\"]*)\"', img_sub, html_content)\n \n- html_content = html_content.replace('<pre>', '<pre class=\"prettyprint well\">')\n-\n return html_content\n", "issue": "Make syntax highlighting optional\nIt would be nice to have an option to disable the prettify class from being added to the pre-tag. Personally, I prefer using another highlighter that doesn't rely on extra classes.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom mkdocs import nav, toc, utils\nfrom mkdocs.compat import urljoin, urlparse, urlunparse, PY2\nimport jinja2\nimport markdown\nimport os\nimport re\nimport json\n\n\nclass PathToURL(object):\n def __init__(self, template, nav=None):\n self.template = template\n self.nav = nav\n\n def __call__(self, match):\n url = match.groups()[0]\n scheme, netloc, path, query, query, fragment = urlparse(url)\n\n if scheme or netloc:\n # Ignore URLs unless they are a relative link to a markdown file.\n return self.template % url\n\n if self.nav and not utils.is_markdown_file(path):\n path = utils.create_media_urls(self.nav, [path])[0]\n elif self.nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = self.nav.file_context.make_absolute(path)\n if target_file not in self.nav.source_files:\n source_file = self.nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n )\n assert False, msg % (source_file, target_file)\n path = utils.get_url_path(target_file, self.nav.use_directory_urls)\n path = self.nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n url = urlunparse((scheme, netloc, path, query, query, fragment))\n return self.template % url\n\n\ndef convert_markdown(markdown_source, extensions=()):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n\n # Prepend a table of contents marker for the TOC extension\n markdown_source = toc.pre_process(markdown_source)\n\n # Generate the HTML from the markdown source\n md = markdown.Markdown(\n extensions=['meta', 'toc', 'tables', 'fenced_code'] + list(extensions)\n )\n html_content = md.convert(markdown_source)\n meta = md.Meta\n\n # Strip out the generated table of contents\n (html_content, toc_html) = toc.post_process(html_content)\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef post_process_html(html_content, nav=None):\n\n anchor_sub = PathToURL('a href=\"%s\"', nav)\n html_content = re.sub(r'a href=\"([^\"]*)\"', anchor_sub, html_content)\n\n img_sub = PathToURL('src=\"%s\"', nav)\n html_content = re.sub(r'src=\"([^\"]*)\"', img_sub, html_content)\n\n html_content = html_content.replace('<pre>', '<pre class=\"prettyprint well\">')\n\n return html_content\n\n\ndef get_context(page, content, nav, toc, meta, config):\n site_name = config['site_name']\n\n if page.is_homepage or page.title is None:\n page_title = site_name\n else:\n page_title = page.title + ' - ' + site_name\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n if config['site_url']:\n base = config['site_url']\n if not base.endswith('/'):\n base += '/'\n canonical_url = urljoin(base, page.abs_url.lstrip('/'))\n else:\n canonical_url = None\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'nav': nav,\n 'meta': meta,\n\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n 'canonical_url': canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n 'include_search': config['include_search'],\n\n 'copyright': config['copyright'],\n 'google-analytics': config['google-analytics']\n }\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'])\n env = jinja2.Environment(loader=loader)\n\n for page in site_navigation.walk_pages():\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n input_content = open(input_path, 'r').read()\n if PY2:\n input_content = input_content.decode('utf-8')\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n input_content, extensions=config['markdown_extensions']\n )\n html_content = post_process_html(html_content, site_navigation)\n\n context = get_context(\n page, html_content, site_navigation,\n table_of_contents, meta, config\n )\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n template = env.get_template('base.html')\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n print(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n print(\"Building documentation to directory: %s\" % config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n print(\"Directory %s contains stale files. Use --clean to remove them.\" % config['site_dir'])\n\n if dump_json:\n build_pages(config, dump_json=True)\n else:\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so the custom versions take take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n utils.copy_media_files(theme_dir, config['site_dir'])\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/build.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom mkdocs import nav, toc, utils\nfrom mkdocs.compat import urljoin, urlparse, urlunparse, PY2\nimport jinja2\nimport markdown\nimport os\nimport re\nimport json\n\n\nclass PathToURL(object):\n def __init__(self, template, nav=None):\n self.template = template\n self.nav = nav\n\n def __call__(self, match):\n url = match.groups()[0]\n scheme, netloc, path, query, query, fragment = urlparse(url)\n\n if scheme or netloc:\n # Ignore URLs unless they are a relative link to a markdown file.\n return self.template % url\n\n if self.nav and not utils.is_markdown_file(path):\n path = utils.create_media_urls(self.nav, [path])[0]\n elif self.nav:\n # If the site navigation has been provided, then validate\n # the internal hyperlink, making sure the target actually exists.\n target_file = self.nav.file_context.make_absolute(path)\n if target_file not in self.nav.source_files:\n source_file = self.nav.file_context.current_file\n msg = (\n 'The page \"%s\" contained a hyperlink to \"%s\" which '\n 'is not listed in the \"pages\" configuration.'\n )\n assert False, msg % (source_file, target_file)\n path = utils.get_url_path(target_file, self.nav.use_directory_urls)\n path = self.nav.url_context.make_relative(path)\n else:\n path = utils.get_url_path(path).lstrip('/')\n\n # Convert the .md hyperlink to a relative hyperlink to the HTML page.\n url = urlunparse((scheme, netloc, path, query, query, fragment))\n return self.template % url\n\n\ndef convert_markdown(markdown_source, extensions=()):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n\n # Prepend a table of contents marker for the TOC extension\n markdown_source = toc.pre_process(markdown_source)\n\n # Generate the HTML from the markdown source\n md = markdown.Markdown(\n extensions=['meta', 'toc', 'tables', 'fenced_code'] + list(extensions)\n )\n html_content = md.convert(markdown_source)\n meta = md.Meta\n\n # Strip out the generated table of contents\n (html_content, toc_html) = toc.post_process(html_content)\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef post_process_html(html_content, nav=None):\n\n anchor_sub = PathToURL('a href=\"%s\"', nav)\n html_content = re.sub(r'a href=\"([^\"]*)\"', anchor_sub, html_content)\n\n img_sub = PathToURL('src=\"%s\"', nav)\n html_content = re.sub(r'src=\"([^\"]*)\"', img_sub, html_content)\n\n return html_content\n\n\ndef get_context(page, content, nav, toc, meta, config):\n site_name = config['site_name']\n\n if page.is_homepage or page.title is None:\n page_title = site_name\n else:\n page_title = page.title + ' - ' + site_name\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n if config['site_url']:\n base = config['site_url']\n if not base.endswith('/'):\n base += '/'\n canonical_url = urljoin(base, page.abs_url.lstrip('/'))\n else:\n canonical_url = None\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'nav': nav,\n 'meta': meta,\n\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n 'canonical_url': canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n 'include_search': config['include_search'],\n\n 'copyright': config['copyright'],\n 'google-analytics': config['google-analytics']\n }\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'])\n env = jinja2.Environment(loader=loader)\n\n for page in site_navigation.walk_pages():\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n input_content = open(input_path, 'r').read()\n if PY2:\n input_content = input_content.decode('utf-8')\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n input_content, extensions=config['markdown_extensions']\n )\n html_content = post_process_html(html_content, site_navigation)\n\n context = get_context(\n page, html_content, site_navigation,\n table_of_contents, meta, config\n )\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n template = env.get_template('base.html')\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n print(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n print(\"Building documentation to directory: %s\" % config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n print(\"Directory %s contains stale files. Use --clean to remove them.\" % config['site_dir'])\n\n if dump_json:\n build_pages(config, dump_json=True)\n else:\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so the custom versions take take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n utils.copy_media_files(theme_dir, config['site_dir'])\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/build.py"}]}
2,838
105
gh_patches_debug_1475
rasdani/github-patches
git_diff
graspologic-org__graspologic-654
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Possible issue with direct import ``` import graspologic dir(graspologic) ``` returns ``` ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version', '__version__', 'graspologic', 'layouts', 'models', 'partition', 'plot', 'preprocessing', 'subgraph', 'version'] ``` and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils). Is this intentional? [BUG] Possible issue with direct import ``` import graspologic dir(graspologic) ``` returns ``` ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version', '__version__', 'graspologic', 'layouts', 'models', 'partition', 'plot', 'preprocessing', 'subgraph', 'version'] ``` and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils). Is this intentional? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `graspologic/__init__.py` Content: ``` 1 # Copyright (c) Microsoft Corporation and contributors. 2 # Licensed under the MIT License. 3 4 import graspologic.align 5 import graspologic.cluster 6 import graspologic.datasets 7 import graspologic.embed 8 import graspologic.inference 9 import graspologic.layouts 10 import graspologic.models 11 import graspologic.partition 12 import graspologic.preprocessing 13 import graspologic.plot 14 import graspologic.simulations 15 import graspologic.subgraph 16 import graspologic.utils 17 18 from graspologic.version import __version 19 20 __version__ = __version() 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/graspologic/__init__.py b/graspologic/__init__.py --- a/graspologic/__init__.py +++ b/graspologic/__init__.py @@ -8,6 +8,7 @@ import graspologic.inference import graspologic.layouts import graspologic.models +import graspologic.nominate import graspologic.partition import graspologic.preprocessing import graspologic.plot
{"golden_diff": "diff --git a/graspologic/__init__.py b/graspologic/__init__.py\n--- a/graspologic/__init__.py\n+++ b/graspologic/__init__.py\n@@ -8,6 +8,7 @@\n import graspologic.inference\n import graspologic.layouts\n import graspologic.models\n+import graspologic.nominate\n import graspologic.partition\n import graspologic.preprocessing\n import graspologic.plot\n", "issue": "[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n", "path": "graspologic/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.nominate\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n", "path": "graspologic/__init__.py"}]}
655
88
gh_patches_debug_37333
rasdani/github-patches
git_diff
sublimelsp__LSP-2024
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide an optional response handler in LspExecuteCommand **Is your feature request related to a problem? Please describe.** I would like to extends `LspExecuteCommand` in order to send a `workspace/executeCommand` but `LspExecuteCommand` simply [logs](https://github.com/sublimelsp/LSP/blob/acfd6406ba4680a0e537dc87a72aa5b410a154e7/plugin/execute_command.py#L47) the response. In my case I have to open a file URI that is in the reponse. **Describe the solution you'd like** `LspExecuteCommand` should provide an optional response handler. When missing simply log the response as the case now. Otherwise delegate the response handling. **Describe alternatives you've considered** The current alternative is to copy `LspExecuteCommand` logic. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `boot.py` Content: ``` 1 import os 2 import sublime 3 import sublime_plugin 4 5 # Please keep this list sorted (Edit -> Sort Lines) 6 from .plugin.code_actions import LspCodeActionsCommand 7 from .plugin.code_lens import LspCodeLensCommand 8 from .plugin.completion import LspResolveDocsCommand 9 from .plugin.completion import LspSelectCompletionItemCommand 10 from .plugin.configuration import LspDisableLanguageServerGloballyCommand 11 from .plugin.configuration import LspDisableLanguageServerInProjectCommand 12 from .plugin.configuration import LspEnableLanguageServerGloballyCommand 13 from .plugin.configuration import LspEnableLanguageServerInProjectCommand 14 from .plugin.core.collections import DottedDict 15 from .plugin.core.css import load as load_css 16 from .plugin.core.logging import exception_log 17 from .plugin.core.open import opening_files 18 from .plugin.core.panels import destroy_output_panels 19 from .plugin.core.panels import LspClearPanelCommand 20 from .plugin.core.panels import LspUpdatePanelCommand 21 from .plugin.core.panels import LspUpdateServerPanelCommand 22 from .plugin.core.panels import WindowPanelListener 23 from .plugin.core.protocol import Location 24 from .plugin.core.registry import LspRecheckSessionsCommand 25 from .plugin.core.registry import LspRestartServerCommand 26 from .plugin.core.registry import windows 27 from .plugin.core.sessions import AbstractPlugin 28 from .plugin.core.sessions import register_plugin 29 from .plugin.core.settings import client_configs 30 from .plugin.core.settings import load_settings 31 from .plugin.core.settings import unload_settings 32 from .plugin.core.signature_help import LspSignatureHelpNavigateCommand 33 from .plugin.core.signature_help import LspSignatureHelpShowCommand 34 from .plugin.core.transports import kill_all_subprocesses 35 from .plugin.core.typing import Any, Optional, List, Type, Dict 36 from .plugin.core.views import get_uri_and_position_from_location 37 from .plugin.core.views import LspRunTextCommandHelperCommand 38 from .plugin.document_link import LspOpenLinkCommand 39 from .plugin.documents import DocumentSyncListener 40 from .plugin.documents import TextChangeListener 41 from .plugin.edit import LspApplyDocumentEditCommand 42 from .plugin.execute_command import LspExecuteCommand 43 from .plugin.formatting import LspFormatDocumentCommand 44 from .plugin.formatting import LspFormatDocumentRangeCommand 45 from .plugin.goto import LspSymbolDeclarationCommand 46 from .plugin.goto import LspSymbolDefinitionCommand 47 from .plugin.goto import LspSymbolImplementationCommand 48 from .plugin.goto import LspSymbolTypeDefinitionCommand 49 from .plugin.goto_diagnostic import LspGotoDiagnosticCommand 50 from .plugin.hover import LspHoverCommand 51 from .plugin.inlay_hint import LspInlayHintClickCommand 52 from .plugin.panels import LspShowDiagnosticsPanelCommand 53 from .plugin.panels import LspToggleServerPanelCommand 54 from .plugin.references import LspSymbolReferencesCommand 55 from .plugin.rename import LspSymbolRenameCommand 56 from .plugin.save_command import LspSaveAllCommand 57 from .plugin.save_command import LspSaveCommand 58 from .plugin.selection_range import LspExpandSelectionCommand 59 from .plugin.semantic_highlighting import LspShowScopeNameCommand 60 from .plugin.symbols import LspDocumentSymbolsCommand 61 from .plugin.symbols import LspSelectionAddCommand 62 from .plugin.symbols import LspSelectionClearCommand 63 from .plugin.symbols import LspSelectionSetCommand 64 from .plugin.symbols import LspWorkspaceSymbolsCommand 65 from .plugin.tooling import LspCopyToClipboardFromBase64Command 66 from .plugin.tooling import LspDumpBufferCapabilities 67 from .plugin.tooling import LspDumpWindowConfigs 68 from .plugin.tooling import LspParseVscodePackageJson 69 from .plugin.tooling import LspTroubleshootServerCommand 70 71 72 def _get_final_subclasses(derived: List[Type], results: List[Type]) -> None: 73 for d in derived: 74 d_subclasses = d.__subclasses__() 75 if len(d_subclasses) > 0: 76 _get_final_subclasses(d_subclasses, results) 77 else: 78 results.append(d) 79 80 81 def _register_all_plugins() -> None: 82 plugin_classes = [] # type: List[Type[AbstractPlugin]] 83 _get_final_subclasses(AbstractPlugin.__subclasses__(), plugin_classes) 84 for plugin_class in plugin_classes: 85 try: 86 if not plugin_class.name(): 87 continue 88 except NotImplementedError: 89 continue 90 register_plugin(plugin_class, notify_listener=False) 91 92 93 def _unregister_all_plugins() -> None: 94 from LSP.plugin.core.sessions import _plugins 95 _plugins.clear() 96 client_configs.external.clear() 97 client_configs.all.clear() 98 99 100 def plugin_loaded() -> None: 101 load_settings() 102 load_css() 103 _register_all_plugins() 104 client_configs.update_configs() 105 for window in sublime.windows(): 106 windows.lookup(window) 107 108 109 def plugin_unloaded() -> None: 110 _unregister_all_plugins() 111 for window in sublime.windows(): 112 destroy_output_panels(window) # references and diagnostics panels 113 try: 114 windows.lookup(window).plugin_unloaded() 115 windows.discard(window) 116 except Exception as ex: 117 exception_log("failed to unload window", ex) 118 unload_settings() 119 120 121 class Listener(sublime_plugin.EventListener): 122 123 def on_exit(self) -> None: 124 kill_all_subprocesses() 125 126 def on_load_project_async(self, w: sublime.Window) -> None: 127 windows.lookup(w).on_load_project_async() 128 129 def on_post_save_project_async(self, w: sublime.Window) -> None: 130 windows.lookup(w).on_post_save_project_async() 131 132 def on_new_window_async(self, w: sublime.Window) -> None: 133 sublime.set_timeout(lambda: windows.lookup(w)) 134 135 def on_pre_close_window(self, w: sublime.Window) -> None: 136 windows.discard(w) 137 138 # Note: EventListener.on_post_move_async does not fire when a tab is moved out of the current window in such a way 139 # that a new window is created: https://github.com/sublimehq/sublime_text/issues/4630 140 # Hence, as a workaround we use on_pre_move, which still works in that case. 141 def on_pre_move(self, view: sublime.View) -> None: 142 listeners = sublime_plugin.view_event_listeners.get(view.id()) 143 if not isinstance(listeners, list): 144 return 145 for listener in listeners: 146 if isinstance(listener, DocumentSyncListener): 147 # we need a small delay here, so that the DocumentSyncListener will recognize a possible new window 148 sublime.set_timeout_async(listener.on_post_move_window_async, timeout_ms=1) 149 return 150 151 def on_load(self, view: sublime.View) -> None: 152 file_name = view.file_name() 153 if not file_name: 154 return 155 for fn in opening_files.keys(): 156 if fn == file_name or os.path.samefile(fn, file_name): 157 # Remove it from the pending opening files, and resolve the promise. 158 opening_files.pop(fn)[1](view) 159 break 160 161 def on_pre_close(self, view: sublime.View) -> None: 162 file_name = view.file_name() 163 if not file_name: 164 return 165 for fn in opening_files.keys(): 166 if fn == file_name or os.path.samefile(fn, file_name): 167 tup = opening_files.pop(fn, None) 168 if tup: 169 # The view got closed before it finished loading. This can happen. 170 tup[1](None) 171 break 172 173 def on_post_window_command(self, window: sublime.Window, command_name: str, args: Optional[Dict[str, Any]]) -> None: 174 if command_name in ("next_result", "prev_result"): 175 view = window.active_view() 176 if view: 177 view.run_command("lsp_hover", {"only_diagnostics": True}) 178 179 180 class LspOpenLocationCommand(sublime_plugin.TextCommand): 181 """ 182 A command to be used by third-party ST packages that need to open an URI with some abstract scheme. 183 """ 184 185 def run( 186 self, 187 _: sublime.Edit, 188 location: Location, 189 session_name: Optional[str] = None, 190 flags: int = 0, 191 group: int = -1 192 ) -> None: 193 sublime.set_timeout_async(lambda: self._run_async(location, session_name, flags, group)) 194 195 def _run_async(self, location: Location, session_name: Optional[str], flags: int = 0, group: int = -1) -> None: 196 window = self.view.window() 197 if not window: 198 return 199 windows.lookup(window).open_location_async(location, session_name, self.view, flags, group).then( 200 lambda success: self._handle_continuation(location, success)) 201 202 def _handle_continuation(self, location: Location, success: bool) -> None: 203 if not success: 204 uri, _ = get_uri_and_position_from_location(location) 205 message = "Failed to open {}".format(uri) 206 sublime.status_message(message) 207 ``` Path: `plugin/execute_command.py` Content: ``` 1 import sublime 2 from .core.protocol import Error 3 from .core.protocol import ExecuteCommandParams 4 from .core.registry import LspTextCommand 5 from .core.registry import windows 6 from .core.typing import List, Optional, Any 7 from .core.views import first_selection_region 8 from .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier 9 10 11 class LspExecuteCommand(LspTextCommand): 12 13 def run(self, 14 edit: sublime.Edit, 15 command_name: Optional[str] = None, 16 command_args: Optional[List[Any]] = None, 17 session_name: Optional[str] = None, 18 event: Optional[dict] = None) -> None: 19 # Handle VSCode-specific command for triggering AC/sighelp 20 if command_name == "editor.action.triggerSuggest": 21 # Triggered from set_timeout as suggestions popup doesn't trigger otherwise. 22 return sublime.set_timeout(lambda: self.view.run_command("auto_complete")) 23 if command_name == "editor.action.triggerParameterHints": 24 25 def run_async() -> None: 26 listener = windows.listener_for_view(self.view) 27 if listener: 28 listener.do_signature_help_async(manual=False) 29 30 return sublime.set_timeout_async(run_async) 31 session = self.session_by_name(session_name if session_name else self.session_name) 32 if session and command_name: 33 params = {"command": command_name} # type: ExecuteCommandParams 34 if command_args: 35 params["arguments"] = self._expand_variables(command_args) 36 37 def handle_response(response: Any) -> None: 38 assert command_name 39 if isinstance(response, Error): 40 sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(response))) 41 return 42 msg = "command {} completed".format(command_name) 43 if response: 44 msg += "with response: {}".format(response) 45 window = self.view.window() 46 if window: 47 window.status_message(msg) 48 49 session.execute_command(params, progress=True).then(handle_response) 50 51 def _expand_variables(self, command_args: List[Any]) -> List[Any]: 52 view = self.view # type: sublime.View 53 region = first_selection_region(view) 54 for i, arg in enumerate(command_args): 55 if arg in ["$document_id", "${document_id}"]: 56 command_args[i] = text_document_identifier(view) 57 elif arg in ["$file_uri", "${file_uri}"]: 58 command_args[i] = uri_from_view(view) 59 elif region is not None: 60 if arg in ["$selection", "${selection}"]: 61 command_args[i] = view.substr(region) 62 elif arg in ["$offset", "${offset}"]: 63 command_args[i] = region.b 64 elif arg in ["$selection_begin", "${selection_begin}"]: 65 command_args[i] = region.begin() 66 elif arg in ["$selection_end", "${selection_end}"]: 67 command_args[i] = region.end() 68 elif arg in ["$position", "${position}"]: 69 command_args[i] = offset_to_point(view, region.b).to_lsp() 70 elif arg in ["$range", "${range}"]: 71 command_args[i] = region_to_range(view, region).to_lsp() 72 window = view.window() 73 window_variables = window.extract_variables() if window else {} 74 return sublime.expand_variables(command_args, window_variables) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/boot.py b/boot.py --- a/boot.py +++ b/boot.py @@ -20,6 +20,7 @@ from .plugin.core.panels import LspUpdatePanelCommand from .plugin.core.panels import LspUpdateServerPanelCommand from .plugin.core.panels import WindowPanelListener +from .plugin.core.protocol import Error from .plugin.core.protocol import Location from .plugin.core.registry import LspRecheckSessionsCommand from .plugin.core.registry import LspRestartServerCommand diff --git a/plugin/execute_command.py b/plugin/execute_command.py --- a/plugin/execute_command.py +++ b/plugin/execute_command.py @@ -9,6 +9,9 @@ class LspExecuteCommand(LspTextCommand): + """ + Helper command for triggering workspace/executeCommand requests. + """ def run(self, edit: sublime.Edit, @@ -37,17 +40,33 @@ def handle_response(response: Any) -> None: assert command_name if isinstance(response, Error): - sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(response))) + self.handle_error_async(response, command_name) return - msg = "command {} completed".format(command_name) - if response: - msg += "with response: {}".format(response) - window = self.view.window() - if window: - window.status_message(msg) + self.handle_success_async(response, command_name) session.execute_command(params, progress=True).then(handle_response) + def handle_success_async(self, result: Any, command_name: str) -> None: + """ + Override this method to handle successful response to workspace/executeCommand. + + :param result: The result returned from the server. + :param command_name: The name of the command that was executed. + """ + msg = "command {} completed".format(command_name) + window = self.view.window() + if window: + window.status_message(msg) + + def handle_error_async(self, error: Error, command_name: str) -> None: + """ + Override this method to handle failed response to workspace/executeCommand. + + :param error: The Error object. + :param command_name: The name of the command that was executed. + """ + sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(error))) + def _expand_variables(self, command_args: List[Any]) -> List[Any]: view = self.view # type: sublime.View region = first_selection_region(view)
{"golden_diff": "diff --git a/boot.py b/boot.py\n--- a/boot.py\n+++ b/boot.py\n@@ -20,6 +20,7 @@\n from .plugin.core.panels import LspUpdatePanelCommand\n from .plugin.core.panels import LspUpdateServerPanelCommand\n from .plugin.core.panels import WindowPanelListener\n+from .plugin.core.protocol import Error\n from .plugin.core.protocol import Location\n from .plugin.core.registry import LspRecheckSessionsCommand\n from .plugin.core.registry import LspRestartServerCommand\ndiff --git a/plugin/execute_command.py b/plugin/execute_command.py\n--- a/plugin/execute_command.py\n+++ b/plugin/execute_command.py\n@@ -9,6 +9,9 @@\n \n \n class LspExecuteCommand(LspTextCommand):\n+ \"\"\"\n+ Helper command for triggering workspace/executeCommand requests.\n+ \"\"\"\n \n def run(self,\n edit: sublime.Edit,\n@@ -37,17 +40,33 @@\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n- sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n+ self.handle_error_async(response, command_name)\n return\n- msg = \"command {} completed\".format(command_name)\n- if response:\n- msg += \"with response: {}\".format(response)\n- window = self.view.window()\n- if window:\n- window.status_message(msg)\n+ self.handle_success_async(response, command_name)\n \n session.execute_command(params, progress=True).then(handle_response)\n \n+ def handle_success_async(self, result: Any, command_name: str) -> None:\n+ \"\"\"\n+ Override this method to handle successful response to workspace/executeCommand.\n+\n+ :param result: The result returned from the server.\n+ :param command_name: The name of the command that was executed.\n+ \"\"\"\n+ msg = \"command {} completed\".format(command_name)\n+ window = self.view.window()\n+ if window:\n+ window.status_message(msg)\n+\n+ def handle_error_async(self, error: Error, command_name: str) -> None:\n+ \"\"\"\n+ Override this method to handle failed response to workspace/executeCommand.\n+\n+ :param error: The Error object.\n+ :param command_name: The name of the command that was executed.\n+ \"\"\"\n+ sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(error)))\n+\n def _expand_variables(self, command_args: List[Any]) -> List[Any]:\n view = self.view # type: sublime.View\n region = first_selection_region(view)\n", "issue": "Provide an optional response handler in LspExecuteCommand\n**Is your feature request related to a problem? Please describe.**\r\nI would like to extends `LspExecuteCommand` in order to send a `workspace/executeCommand` but `LspExecuteCommand` simply [logs](https://github.com/sublimelsp/LSP/blob/acfd6406ba4680a0e537dc87a72aa5b410a154e7/plugin/execute_command.py#L47) the response. In my case I have to open a file URI that is in the reponse.\r\n\r\n**Describe the solution you'd like**\r\n`LspExecuteCommand` should provide an optional response handler. When missing simply log the response as the case now. Otherwise delegate the response handling. \r\n\r\n**Describe alternatives you've considered**\r\nThe current alternative is to copy `LspExecuteCommand` logic.\r\n\n", "before_files": [{"content": "import os\nimport sublime\nimport sublime_plugin\n\n# Please keep this list sorted (Edit -> Sort Lines)\nfrom .plugin.code_actions import LspCodeActionsCommand\nfrom .plugin.code_lens import LspCodeLensCommand\nfrom .plugin.completion import LspResolveDocsCommand\nfrom .plugin.completion import LspSelectCompletionItemCommand\nfrom .plugin.configuration import LspDisableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspDisableLanguageServerInProjectCommand\nfrom .plugin.configuration import LspEnableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspEnableLanguageServerInProjectCommand\nfrom .plugin.core.collections import DottedDict\nfrom .plugin.core.css import load as load_css\nfrom .plugin.core.logging import exception_log\nfrom .plugin.core.open import opening_files\nfrom .plugin.core.panels import destroy_output_panels\nfrom .plugin.core.panels import LspClearPanelCommand\nfrom .plugin.core.panels import LspUpdatePanelCommand\nfrom .plugin.core.panels import LspUpdateServerPanelCommand\nfrom .plugin.core.panels import WindowPanelListener\nfrom .plugin.core.protocol import Location\nfrom .plugin.core.registry import LspRecheckSessionsCommand\nfrom .plugin.core.registry import LspRestartServerCommand\nfrom .plugin.core.registry import windows\nfrom .plugin.core.sessions import AbstractPlugin\nfrom .plugin.core.sessions import register_plugin\nfrom .plugin.core.settings import client_configs\nfrom .plugin.core.settings import load_settings\nfrom .plugin.core.settings import unload_settings\nfrom .plugin.core.signature_help import LspSignatureHelpNavigateCommand\nfrom .plugin.core.signature_help import LspSignatureHelpShowCommand\nfrom .plugin.core.transports import kill_all_subprocesses\nfrom .plugin.core.typing import Any, Optional, List, Type, Dict\nfrom .plugin.core.views import get_uri_and_position_from_location\nfrom .plugin.core.views import LspRunTextCommandHelperCommand\nfrom .plugin.document_link import LspOpenLinkCommand\nfrom .plugin.documents import DocumentSyncListener\nfrom .plugin.documents import TextChangeListener\nfrom .plugin.edit import LspApplyDocumentEditCommand\nfrom .plugin.execute_command import LspExecuteCommand\nfrom .plugin.formatting import LspFormatDocumentCommand\nfrom .plugin.formatting import LspFormatDocumentRangeCommand\nfrom .plugin.goto import LspSymbolDeclarationCommand\nfrom .plugin.goto import LspSymbolDefinitionCommand\nfrom .plugin.goto import LspSymbolImplementationCommand\nfrom .plugin.goto import LspSymbolTypeDefinitionCommand\nfrom .plugin.goto_diagnostic import LspGotoDiagnosticCommand\nfrom .plugin.hover import LspHoverCommand\nfrom .plugin.inlay_hint import LspInlayHintClickCommand\nfrom .plugin.panels import LspShowDiagnosticsPanelCommand\nfrom .plugin.panels import LspToggleServerPanelCommand\nfrom .plugin.references import LspSymbolReferencesCommand\nfrom .plugin.rename import LspSymbolRenameCommand\nfrom .plugin.save_command import LspSaveAllCommand\nfrom .plugin.save_command import LspSaveCommand\nfrom .plugin.selection_range import LspExpandSelectionCommand\nfrom .plugin.semantic_highlighting import LspShowScopeNameCommand\nfrom .plugin.symbols import LspDocumentSymbolsCommand\nfrom .plugin.symbols import LspSelectionAddCommand\nfrom .plugin.symbols import LspSelectionClearCommand\nfrom .plugin.symbols import LspSelectionSetCommand\nfrom .plugin.symbols import LspWorkspaceSymbolsCommand\nfrom .plugin.tooling import LspCopyToClipboardFromBase64Command\nfrom .plugin.tooling import LspDumpBufferCapabilities\nfrom .plugin.tooling import LspDumpWindowConfigs\nfrom .plugin.tooling import LspParseVscodePackageJson\nfrom .plugin.tooling import LspTroubleshootServerCommand\n\n\ndef _get_final_subclasses(derived: List[Type], results: List[Type]) -> None:\n for d in derived:\n d_subclasses = d.__subclasses__()\n if len(d_subclasses) > 0:\n _get_final_subclasses(d_subclasses, results)\n else:\n results.append(d)\n\n\ndef _register_all_plugins() -> None:\n plugin_classes = [] # type: List[Type[AbstractPlugin]]\n _get_final_subclasses(AbstractPlugin.__subclasses__(), plugin_classes)\n for plugin_class in plugin_classes:\n try:\n if not plugin_class.name():\n continue\n except NotImplementedError:\n continue\n register_plugin(plugin_class, notify_listener=False)\n\n\ndef _unregister_all_plugins() -> None:\n from LSP.plugin.core.sessions import _plugins\n _plugins.clear()\n client_configs.external.clear()\n client_configs.all.clear()\n\n\ndef plugin_loaded() -> None:\n load_settings()\n load_css()\n _register_all_plugins()\n client_configs.update_configs()\n for window in sublime.windows():\n windows.lookup(window)\n\n\ndef plugin_unloaded() -> None:\n _unregister_all_plugins()\n for window in sublime.windows():\n destroy_output_panels(window) # references and diagnostics panels\n try:\n windows.lookup(window).plugin_unloaded()\n windows.discard(window)\n except Exception as ex:\n exception_log(\"failed to unload window\", ex)\n unload_settings()\n\n\nclass Listener(sublime_plugin.EventListener):\n\n def on_exit(self) -> None:\n kill_all_subprocesses()\n\n def on_load_project_async(self, w: sublime.Window) -> None:\n windows.lookup(w).on_load_project_async()\n\n def on_post_save_project_async(self, w: sublime.Window) -> None:\n windows.lookup(w).on_post_save_project_async()\n\n def on_new_window_async(self, w: sublime.Window) -> None:\n sublime.set_timeout(lambda: windows.lookup(w))\n\n def on_pre_close_window(self, w: sublime.Window) -> None:\n windows.discard(w)\n\n # Note: EventListener.on_post_move_async does not fire when a tab is moved out of the current window in such a way\n # that a new window is created: https://github.com/sublimehq/sublime_text/issues/4630\n # Hence, as a workaround we use on_pre_move, which still works in that case.\n def on_pre_move(self, view: sublime.View) -> None:\n listeners = sublime_plugin.view_event_listeners.get(view.id())\n if not isinstance(listeners, list):\n return\n for listener in listeners:\n if isinstance(listener, DocumentSyncListener):\n # we need a small delay here, so that the DocumentSyncListener will recognize a possible new window\n sublime.set_timeout_async(listener.on_post_move_window_async, timeout_ms=1)\n return\n\n def on_load(self, view: sublime.View) -> None:\n file_name = view.file_name()\n if not file_name:\n return\n for fn in opening_files.keys():\n if fn == file_name or os.path.samefile(fn, file_name):\n # Remove it from the pending opening files, and resolve the promise.\n opening_files.pop(fn)[1](view)\n break\n\n def on_pre_close(self, view: sublime.View) -> None:\n file_name = view.file_name()\n if not file_name:\n return\n for fn in opening_files.keys():\n if fn == file_name or os.path.samefile(fn, file_name):\n tup = opening_files.pop(fn, None)\n if tup:\n # The view got closed before it finished loading. This can happen.\n tup[1](None)\n break\n\n def on_post_window_command(self, window: sublime.Window, command_name: str, args: Optional[Dict[str, Any]]) -> None:\n if command_name in (\"next_result\", \"prev_result\"):\n view = window.active_view()\n if view:\n view.run_command(\"lsp_hover\", {\"only_diagnostics\": True})\n\n\nclass LspOpenLocationCommand(sublime_plugin.TextCommand):\n \"\"\"\n A command to be used by third-party ST packages that need to open an URI with some abstract scheme.\n \"\"\"\n\n def run(\n self,\n _: sublime.Edit,\n location: Location,\n session_name: Optional[str] = None,\n flags: int = 0,\n group: int = -1\n ) -> None:\n sublime.set_timeout_async(lambda: self._run_async(location, session_name, flags, group))\n\n def _run_async(self, location: Location, session_name: Optional[str], flags: int = 0, group: int = -1) -> None:\n window = self.view.window()\n if not window:\n return\n windows.lookup(window).open_location_async(location, session_name, self.view, flags, group).then(\n lambda success: self._handle_continuation(location, success))\n\n def _handle_continuation(self, location: Location, success: bool) -> None:\n if not success:\n uri, _ = get_uri_and_position_from_location(location)\n message = \"Failed to open {}\".format(uri)\n sublime.status_message(message)\n", "path": "boot.py"}, {"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import first_selection_region\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = self._expand_variables(command_args)\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> List[Any]:\n view = self.view # type: sublime.View\n region = first_selection_region(view)\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(view)\n elif arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(view)\n elif region is not None:\n if arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(view, region).to_lsp()\n window = view.window()\n window_variables = window.extract_variables() if window else {}\n return sublime.expand_variables(command_args, window_variables)\n", "path": "plugin/execute_command.py"}], "after_files": [{"content": "import os\nimport sublime\nimport sublime_plugin\n\n# Please keep this list sorted (Edit -> Sort Lines)\nfrom .plugin.code_actions import LspCodeActionsCommand\nfrom .plugin.code_lens import LspCodeLensCommand\nfrom .plugin.completion import LspResolveDocsCommand\nfrom .plugin.completion import LspSelectCompletionItemCommand\nfrom .plugin.configuration import LspDisableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspDisableLanguageServerInProjectCommand\nfrom .plugin.configuration import LspEnableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspEnableLanguageServerInProjectCommand\nfrom .plugin.core.collections import DottedDict\nfrom .plugin.core.css import load as load_css\nfrom .plugin.core.logging import exception_log\nfrom .plugin.core.open import opening_files\nfrom .plugin.core.panels import destroy_output_panels\nfrom .plugin.core.panels import LspClearPanelCommand\nfrom .plugin.core.panels import LspUpdatePanelCommand\nfrom .plugin.core.panels import LspUpdateServerPanelCommand\nfrom .plugin.core.panels import WindowPanelListener\nfrom .plugin.core.protocol import Error\nfrom .plugin.core.protocol import Location\nfrom .plugin.core.registry import LspRecheckSessionsCommand\nfrom .plugin.core.registry import LspRestartServerCommand\nfrom .plugin.core.registry import windows\nfrom .plugin.core.sessions import AbstractPlugin\nfrom .plugin.core.sessions import register_plugin\nfrom .plugin.core.settings import client_configs\nfrom .plugin.core.settings import load_settings\nfrom .plugin.core.settings import unload_settings\nfrom .plugin.core.signature_help import LspSignatureHelpNavigateCommand\nfrom .plugin.core.signature_help import LspSignatureHelpShowCommand\nfrom .plugin.core.transports import kill_all_subprocesses\nfrom .plugin.core.typing import Any, Optional, List, Type, Dict\nfrom .plugin.core.views import get_uri_and_position_from_location\nfrom .plugin.core.views import LspRunTextCommandHelperCommand\nfrom .plugin.document_link import LspOpenLinkCommand\nfrom .plugin.documents import DocumentSyncListener\nfrom .plugin.documents import TextChangeListener\nfrom .plugin.edit import LspApplyDocumentEditCommand\nfrom .plugin.execute_command import LspExecuteCommand\nfrom .plugin.formatting import LspFormatDocumentCommand\nfrom .plugin.formatting import LspFormatDocumentRangeCommand\nfrom .plugin.goto import LspSymbolDeclarationCommand\nfrom .plugin.goto import LspSymbolDefinitionCommand\nfrom .plugin.goto import LspSymbolImplementationCommand\nfrom .plugin.goto import LspSymbolTypeDefinitionCommand\nfrom .plugin.goto_diagnostic import LspGotoDiagnosticCommand\nfrom .plugin.hover import LspHoverCommand\nfrom .plugin.inlay_hint import LspInlayHintClickCommand\nfrom .plugin.panels import LspShowDiagnosticsPanelCommand\nfrom .plugin.panels import LspToggleServerPanelCommand\nfrom .plugin.references import LspSymbolReferencesCommand\nfrom .plugin.rename import LspSymbolRenameCommand\nfrom .plugin.save_command import LspSaveAllCommand\nfrom .plugin.save_command import LspSaveCommand\nfrom .plugin.selection_range import LspExpandSelectionCommand\nfrom .plugin.semantic_highlighting import LspShowScopeNameCommand\nfrom .plugin.symbols import LspDocumentSymbolsCommand\nfrom .plugin.symbols import LspSelectionAddCommand\nfrom .plugin.symbols import LspSelectionClearCommand\nfrom .plugin.symbols import LspSelectionSetCommand\nfrom .plugin.symbols import LspWorkspaceSymbolsCommand\nfrom .plugin.tooling import LspCopyToClipboardFromBase64Command\nfrom .plugin.tooling import LspDumpBufferCapabilities\nfrom .plugin.tooling import LspDumpWindowConfigs\nfrom .plugin.tooling import LspParseVscodePackageJson\nfrom .plugin.tooling import LspTroubleshootServerCommand\n\n\ndef _get_final_subclasses(derived: List[Type], results: List[Type]) -> None:\n for d in derived:\n d_subclasses = d.__subclasses__()\n if len(d_subclasses) > 0:\n _get_final_subclasses(d_subclasses, results)\n else:\n results.append(d)\n\n\ndef _register_all_plugins() -> None:\n plugin_classes = [] # type: List[Type[AbstractPlugin]]\n _get_final_subclasses(AbstractPlugin.__subclasses__(), plugin_classes)\n for plugin_class in plugin_classes:\n try:\n if not plugin_class.name():\n continue\n except NotImplementedError:\n continue\n register_plugin(plugin_class, notify_listener=False)\n\n\ndef _unregister_all_plugins() -> None:\n from LSP.plugin.core.sessions import _plugins\n _plugins.clear()\n client_configs.external.clear()\n client_configs.all.clear()\n\n\ndef plugin_loaded() -> None:\n load_settings()\n load_css()\n _register_all_plugins()\n client_configs.update_configs()\n for window in sublime.windows():\n windows.lookup(window)\n\n\ndef plugin_unloaded() -> None:\n _unregister_all_plugins()\n for window in sublime.windows():\n destroy_output_panels(window) # references and diagnostics panels\n try:\n windows.lookup(window).plugin_unloaded()\n windows.discard(window)\n except Exception as ex:\n exception_log(\"failed to unload window\", ex)\n unload_settings()\n\n\nclass Listener(sublime_plugin.EventListener):\n\n def on_exit(self) -> None:\n kill_all_subprocesses()\n\n def on_load_project_async(self, w: sublime.Window) -> None:\n windows.lookup(w).on_load_project_async()\n\n def on_post_save_project_async(self, w: sublime.Window) -> None:\n windows.lookup(w).on_post_save_project_async()\n\n def on_new_window_async(self, w: sublime.Window) -> None:\n sublime.set_timeout(lambda: windows.lookup(w))\n\n def on_pre_close_window(self, w: sublime.Window) -> None:\n windows.discard(w)\n\n # Note: EventListener.on_post_move_async does not fire when a tab is moved out of the current window in such a way\n # that a new window is created: https://github.com/sublimehq/sublime_text/issues/4630\n # Hence, as a workaround we use on_pre_move, which still works in that case.\n def on_pre_move(self, view: sublime.View) -> None:\n listeners = sublime_plugin.view_event_listeners.get(view.id())\n if not isinstance(listeners, list):\n return\n for listener in listeners:\n if isinstance(listener, DocumentSyncListener):\n # we need a small delay here, so that the DocumentSyncListener will recognize a possible new window\n sublime.set_timeout_async(listener.on_post_move_window_async, timeout_ms=1)\n return\n\n def on_load(self, view: sublime.View) -> None:\n file_name = view.file_name()\n if not file_name:\n return\n for fn in opening_files.keys():\n if fn == file_name or os.path.samefile(fn, file_name):\n # Remove it from the pending opening files, and resolve the promise.\n opening_files.pop(fn)[1](view)\n break\n\n def on_pre_close(self, view: sublime.View) -> None:\n file_name = view.file_name()\n if not file_name:\n return\n for fn in opening_files.keys():\n if fn == file_name or os.path.samefile(fn, file_name):\n tup = opening_files.pop(fn, None)\n if tup:\n # The view got closed before it finished loading. This can happen.\n tup[1](None)\n break\n\n def on_post_window_command(self, window: sublime.Window, command_name: str, args: Optional[Dict[str, Any]]) -> None:\n if command_name in (\"next_result\", \"prev_result\"):\n view = window.active_view()\n if view:\n view.run_command(\"lsp_hover\", {\"only_diagnostics\": True})\n\n\nclass LspOpenLocationCommand(sublime_plugin.TextCommand):\n \"\"\"\n A command to be used by third-party ST packages that need to open an URI with some abstract scheme.\n \"\"\"\n\n def run(\n self,\n _: sublime.Edit,\n location: Location,\n session_name: Optional[str] = None,\n flags: int = 0,\n group: int = -1\n ) -> None:\n sublime.set_timeout_async(lambda: self._run_async(location, session_name, flags, group))\n\n def _run_async(self, location: Location, session_name: Optional[str], flags: int = 0, group: int = -1) -> None:\n window = self.view.window()\n if not window:\n return\n windows.lookup(window).open_location_async(location, session_name, self.view, flags, group).then(\n lambda success: self._handle_continuation(location, success))\n\n def _handle_continuation(self, location: Location, success: bool) -> None:\n if not success:\n uri, _ = get_uri_and_position_from_location(location)\n message = \"Failed to open {}\".format(uri)\n sublime.status_message(message)\n", "path": "boot.py"}, {"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import first_selection_region\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n \"\"\"\n Helper command for triggering workspace/executeCommand requests.\n \"\"\"\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = self._expand_variables(command_args)\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n self.handle_error_async(response, command_name)\n return\n self.handle_success_async(response, command_name)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def handle_success_async(self, result: Any, command_name: str) -> None:\n \"\"\"\n Override this method to handle successful response to workspace/executeCommand.\n\n :param result: The result returned from the server.\n :param command_name: The name of the command that was executed.\n \"\"\"\n msg = \"command {} completed\".format(command_name)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n def handle_error_async(self, error: Error, command_name: str) -> None:\n \"\"\"\n Override this method to handle failed response to workspace/executeCommand.\n\n :param error: The Error object.\n :param command_name: The name of the command that was executed.\n \"\"\"\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(error)))\n\n def _expand_variables(self, command_args: List[Any]) -> List[Any]:\n view = self.view # type: sublime.View\n region = first_selection_region(view)\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(view)\n elif arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(view)\n elif region is not None:\n if arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(view, region).to_lsp()\n window = view.window()\n window_variables = window.extract_variables() if window else {}\n return sublime.expand_variables(command_args, window_variables)\n", "path": "plugin/execute_command.py"}]}
3,730
573
gh_patches_debug_30097
rasdani/github-patches
git_diff
facebookresearch__CompilerGym-547
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation for compiler_gym.wrappers is incomplete ## 📚 Documentation The module docstring for `compiler_gym/wrappers/__init__.py` simply reads: > The `compiler_gym.wrappers` module provides. 👎 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `compiler_gym/wrappers/__init__.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 """The :code:`compiler_gym.wrappers` module provides. 6 """ 7 from compiler_gym.wrappers.commandline import ( 8 CommandlineWithTerminalAction, 9 ConstrainedCommandline, 10 ) 11 from compiler_gym.wrappers.core import ( 12 ActionWrapper, 13 CompilerEnvWrapper, 14 ObservationWrapper, 15 RewardWrapper, 16 ) 17 from compiler_gym.wrappers.datasets import ( 18 CycleOverBenchmarks, 19 CycleOverBenchmarksIterator, 20 IterateOverBenchmarks, 21 RandomOrderBenchmarks, 22 ) 23 from compiler_gym.wrappers.llvm import RuntimePointEstimateReward 24 from compiler_gym.wrappers.time_limit import TimeLimit 25 26 __all__ = [ 27 "ActionWrapper", 28 "CommandlineWithTerminalAction", 29 "CompilerEnvWrapper", 30 "ConstrainedCommandline", 31 "CycleOverBenchmarks", 32 "CycleOverBenchmarksIterator", 33 "IterateOverBenchmarks", 34 "ObservationWrapper", 35 "RandomOrderBenchmarks", 36 "RewardWrapper", 37 "RuntimePointEstimateReward", 38 "TimeLimit", 39 ] 40 ``` Path: `compiler_gym/wrappers/datasets.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 from itertools import cycle 6 from typing import Callable, Iterable, Optional, Union 7 8 import numpy as np 9 10 from compiler_gym.datasets import Benchmark 11 from compiler_gym.envs import CompilerEnv 12 from compiler_gym.util.parallelization import thread_safe_tee 13 from compiler_gym.wrappers.core import CompilerEnvWrapper 14 15 BenchmarkLike = Union[str, Benchmark] 16 17 18 class IterateOverBenchmarks(CompilerEnvWrapper): 19 """Iterate over a (possibly infinite) sequence of benchmarks on each call to 20 reset(). Will raise :code:`StopIteration` on :meth:`reset() 21 <compiler_gym.envs.CompilerEnv.reset>` once the iterator is exhausted. Use 22 :class:`CycleOverBenchmarks` or :class:`RandomOrderBenchmarks` for wrappers 23 which will loop over the benchmarks. 24 """ 25 26 def __init__( 27 self, 28 env: CompilerEnv, 29 benchmarks: Iterable[BenchmarkLike], 30 fork_shares_iterator: bool = False, 31 ): 32 """Constructor. 33 34 :param env: The environment to wrap. 35 36 :param benchmarks: An iterable sequence of benchmarks. 37 38 :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks` 39 iterator will bet shared by a forked environment created by 40 :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means 41 that calling :meth:`env.reset() 42 <compiler_gym.envs.CompilerEnv.reset>` with one environment will 43 advance the iterator in the other. If :code:`False`, forked 44 environments will use :code:`itertools.tee()` to create a copy of 45 the iterator so that each iterator may advance independently. 46 However, this requires shared buffers between the environments which 47 can lead to memory overheads if :meth:`env.reset() 48 <compiler_gym.envs.CompilerEnv.reset>` is called many times more in 49 one environment than the other. 50 """ 51 super().__init__(env) 52 self.benchmarks = iter(benchmarks) 53 self.fork_shares_iterator = fork_shares_iterator 54 55 def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs): 56 if benchmark is not None: 57 raise TypeError("Benchmark passed to IterateOverBenchmarks.reset()") 58 benchmark: BenchmarkLike = next(self.benchmarks) 59 return self.env.reset(benchmark=benchmark) 60 61 def fork(self) -> "IterateOverBenchmarks": 62 if self.fork_shares_iterator: 63 other_benchmarks_iterator = self.benchmarks 64 else: 65 self.benchmarks, other_benchmarks_iterator = thread_safe_tee( 66 self.benchmarks 67 ) 68 return IterateOverBenchmarks( 69 env=self.env.fork(), 70 benchmarks=other_benchmarks_iterator, 71 fork_shares_iterator=self.fork_shares_iterator, 72 ) 73 74 75 class CycleOverBenchmarks(IterateOverBenchmarks): 76 """Cycle through a list of benchmarks on each call to :meth:`reset() 77 <compiler_gym.envs.CompilerEnv.reset>`. Same as 78 :class:`IterateOverBenchmarks` except the list of benchmarks repeats once 79 exhausted. 80 """ 81 82 def __init__( 83 self, 84 env: CompilerEnv, 85 benchmarks: Iterable[BenchmarkLike], 86 fork_shares_iterator: bool = False, 87 ): 88 """Constructor. 89 90 :param env: The environment to wrap. 91 92 :param benchmarks: An iterable sequence of benchmarks. 93 94 :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks` 95 iterator will be shared by a forked environment created by 96 :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means 97 that calling :meth:`env.reset() 98 <compiler_gym.envs.CompilerEnv.reset>` with one environment will 99 advance the iterator in the other. If :code:`False`, forked 100 environments will use :code:`itertools.tee()` to create a copy of 101 the iterator so that each iterator may advance independently. 102 However, this requires shared buffers between the environments which 103 can lead to memory overheads if :meth:`env.reset() 104 <compiler_gym.envs.CompilerEnv.reset>` is called many times more in 105 one environment than the other. 106 """ 107 super().__init__( 108 env, benchmarks=cycle(benchmarks), fork_shares_iterator=fork_shares_iterator 109 ) 110 111 112 class CycleOverBenchmarksIterator(CompilerEnvWrapper): 113 """Same as :class:`CycleOverBenchmarks 114 <compiler_gym.wrappers.CycleOverBenchmarks>` except that the user generates 115 the iterator. 116 """ 117 118 def __init__( 119 self, 120 env: CompilerEnv, 121 make_benchmark_iterator: Callable[[], Iterable[BenchmarkLike]], 122 ): 123 """Constructor. 124 125 :param env: The environment to wrap. 126 127 :param make_benchmark_iterator: A callback that returns an iterator over 128 a sequence of benchmarks. Once the iterator is exhausted, this 129 callback is called to produce a new iterator. 130 """ 131 super().__init__(env) 132 self.make_benchmark_iterator = make_benchmark_iterator 133 self.benchmarks = iter(self.make_benchmark_iterator()) 134 135 def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs): 136 if benchmark is not None: 137 raise TypeError("Benchmark passed toIterateOverBenchmarks.reset()") 138 try: 139 benchmark: BenchmarkLike = next(self.benchmarks) 140 except StopIteration: 141 self.benchmarks = iter(self.make_benchmark_iterator()) 142 benchmark: BenchmarkLike = next(self.benchmarks) 143 144 return self.env.reset(benchmark=benchmark) 145 146 def fork(self) -> "CycleOverBenchmarksIterator": 147 return CycleOverBenchmarksIterator( 148 env=self.env.fork(), 149 make_benchmark_iterator=self.make_benchmark_iterator, 150 ) 151 152 153 class RandomOrderBenchmarks(IterateOverBenchmarks): 154 """Select randomly from a list of benchmarks on each call to :meth:`reset() 155 <compiler_gym.envs.CompilerEnv.reset>`. 156 157 .. note:: 158 159 Uniform random selection is provided by evaluating the input benchmarks 160 iterator into a list and sampling randomly from the list. This will not 161 work for random iteration over infinite or very large iterables of 162 benchmarks. 163 """ 164 165 def __init__( 166 self, 167 env: CompilerEnv, 168 benchmarks: Iterable[BenchmarkLike], 169 rng: Optional[np.random.Generator] = None, 170 ): 171 """Constructor. 172 173 :param env: The environment to wrap. 174 175 :param benchmarks: An iterable sequence of benchmarks. The entirety of 176 this input iterator is evaluated during construction. 177 178 :param rng: A random number generator to use for random benchmark 179 selection. 180 """ 181 self._all_benchmarks = list(benchmarks) 182 rng = rng or np.random.default_rng() 183 super().__init__( 184 env, 185 benchmarks=(rng.choice(self._all_benchmarks) for _ in iter(int, 1)), 186 fork_shares_iterator=True, 187 ) 188 189 def fork(self) -> "IterateOverBenchmarks": 190 """Fork the random order benchmark wrapper. 191 192 Note that RNG state is not copied to forked environments. 193 """ 194 return IterateOverBenchmarks( 195 env=self.env.fork(), benchmarks=self._all_benchmarks 196 ) 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/compiler_gym/wrappers/__init__.py b/compiler_gym/wrappers/__init__.py --- a/compiler_gym/wrappers/__init__.py +++ b/compiler_gym/wrappers/__init__.py @@ -2,7 +2,30 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""The :code:`compiler_gym.wrappers` module provides. +"""The :code:`compiler_gym.wrappers` module provides a set of classes that can +be used to transform an environment in a modular way. + +For example: + + >>> env = compiler_gym.make("llvm-v0") + >>> env = TimeLimit(env, n=10) + >>> env = CycleOverBenchmarks( + ... env, + ... benchmarks=[ + ... "benchmark://cbench-v1/crc32", + ... "benchmark://cbench-v1/qsort", + ... ], + ... ) + +.. warning:: + + CompilerGym environments are incompatible with the `OpenAI Gym wrappers + <https://github.com/openai/gym/tree/master/gym/wrappers>`_. This is because + CompilerGym extends the environment API with additional arguments and + methods. You must use the wrappers from this module when wrapping + CompilerGym environments. We provide a set of base wrappers that are + equivalent to those in OpenAI Gym that you can use to write your own + wrappers. """ from compiler_gym.wrappers.commandline import ( CommandlineWithTerminalAction, diff --git a/compiler_gym/wrappers/datasets.py b/compiler_gym/wrappers/datasets.py --- a/compiler_gym/wrappers/datasets.py +++ b/compiler_gym/wrappers/datasets.py @@ -157,9 +157,11 @@ .. note:: Uniform random selection is provided by evaluating the input benchmarks - iterator into a list and sampling randomly from the list. This will not - work for random iteration over infinite or very large iterables of - benchmarks. + iterator into a list and sampling randomly from the list. For very large + and infinite iterables of benchmarks you must use the + :class:`IterateOverBenchmarks + <compiler_gym.wrappers.IterateOverBenchmarks>` wrapper with your own + random sampling iterator. """ def __init__(
{"golden_diff": "diff --git a/compiler_gym/wrappers/__init__.py b/compiler_gym/wrappers/__init__.py\n--- a/compiler_gym/wrappers/__init__.py\n+++ b/compiler_gym/wrappers/__init__.py\n@@ -2,7 +2,30 @@\n #\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n-\"\"\"The :code:`compiler_gym.wrappers` module provides.\n+\"\"\"The :code:`compiler_gym.wrappers` module provides a set of classes that can\n+be used to transform an environment in a modular way.\n+\n+For example:\n+\n+ >>> env = compiler_gym.make(\"llvm-v0\")\n+ >>> env = TimeLimit(env, n=10)\n+ >>> env = CycleOverBenchmarks(\n+ ... env,\n+ ... benchmarks=[\n+ ... \"benchmark://cbench-v1/crc32\",\n+ ... \"benchmark://cbench-v1/qsort\",\n+ ... ],\n+ ... )\n+\n+.. warning::\n+\n+ CompilerGym environments are incompatible with the `OpenAI Gym wrappers\n+ <https://github.com/openai/gym/tree/master/gym/wrappers>`_. This is because\n+ CompilerGym extends the environment API with additional arguments and\n+ methods. You must use the wrappers from this module when wrapping\n+ CompilerGym environments. We provide a set of base wrappers that are\n+ equivalent to those in OpenAI Gym that you can use to write your own\n+ wrappers.\n \"\"\"\n from compiler_gym.wrappers.commandline import (\n CommandlineWithTerminalAction,\ndiff --git a/compiler_gym/wrappers/datasets.py b/compiler_gym/wrappers/datasets.py\n--- a/compiler_gym/wrappers/datasets.py\n+++ b/compiler_gym/wrappers/datasets.py\n@@ -157,9 +157,11 @@\n .. note::\n \n Uniform random selection is provided by evaluating the input benchmarks\n- iterator into a list and sampling randomly from the list. This will not\n- work for random iteration over infinite or very large iterables of\n- benchmarks.\n+ iterator into a list and sampling randomly from the list. For very large\n+ and infinite iterables of benchmarks you must use the\n+ :class:`IterateOverBenchmarks\n+ <compiler_gym.wrappers.IterateOverBenchmarks>` wrapper with your own\n+ random sampling iterator.\n \"\"\"\n \n def __init__(\n", "issue": "Documentation for compiler_gym.wrappers is incomplete\n## \ud83d\udcda Documentation\r\n\r\nThe module docstring for `compiler_gym/wrappers/__init__.py` simply reads:\r\n\r\n> The `compiler_gym.wrappers` module provides.\r\n\r\n\ud83d\udc4e \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"The :code:`compiler_gym.wrappers` module provides.\n\"\"\"\nfrom compiler_gym.wrappers.commandline import (\n CommandlineWithTerminalAction,\n ConstrainedCommandline,\n)\nfrom compiler_gym.wrappers.core import (\n ActionWrapper,\n CompilerEnvWrapper,\n ObservationWrapper,\n RewardWrapper,\n)\nfrom compiler_gym.wrappers.datasets import (\n CycleOverBenchmarks,\n CycleOverBenchmarksIterator,\n IterateOverBenchmarks,\n RandomOrderBenchmarks,\n)\nfrom compiler_gym.wrappers.llvm import RuntimePointEstimateReward\nfrom compiler_gym.wrappers.time_limit import TimeLimit\n\n__all__ = [\n \"ActionWrapper\",\n \"CommandlineWithTerminalAction\",\n \"CompilerEnvWrapper\",\n \"ConstrainedCommandline\",\n \"CycleOverBenchmarks\",\n \"CycleOverBenchmarksIterator\",\n \"IterateOverBenchmarks\",\n \"ObservationWrapper\",\n \"RandomOrderBenchmarks\",\n \"RewardWrapper\",\n \"RuntimePointEstimateReward\",\n \"TimeLimit\",\n]\n", "path": "compiler_gym/wrappers/__init__.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom itertools import cycle\nfrom typing import Callable, Iterable, Optional, Union\n\nimport numpy as np\n\nfrom compiler_gym.datasets import Benchmark\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.parallelization import thread_safe_tee\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\nBenchmarkLike = Union[str, Benchmark]\n\n\nclass IterateOverBenchmarks(CompilerEnvWrapper):\n \"\"\"Iterate over a (possibly infinite) sequence of benchmarks on each call to\n reset(). Will raise :code:`StopIteration` on :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>` once the iterator is exhausted. Use\n :class:`CycleOverBenchmarks` or :class:`RandomOrderBenchmarks` for wrappers\n which will loop over the benchmarks.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n fork_shares_iterator: bool = False,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks.\n\n :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`\n iterator will bet shared by a forked environment created by\n :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means\n that calling :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` with one environment will\n advance the iterator in the other. If :code:`False`, forked\n environments will use :code:`itertools.tee()` to create a copy of\n the iterator so that each iterator may advance independently.\n However, this requires shared buffers between the environments which\n can lead to memory overheads if :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` is called many times more in\n one environment than the other.\n \"\"\"\n super().__init__(env)\n self.benchmarks = iter(benchmarks)\n self.fork_shares_iterator = fork_shares_iterator\n\n def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):\n if benchmark is not None:\n raise TypeError(\"Benchmark passed to IterateOverBenchmarks.reset()\")\n benchmark: BenchmarkLike = next(self.benchmarks)\n return self.env.reset(benchmark=benchmark)\n\n def fork(self) -> \"IterateOverBenchmarks\":\n if self.fork_shares_iterator:\n other_benchmarks_iterator = self.benchmarks\n else:\n self.benchmarks, other_benchmarks_iterator = thread_safe_tee(\n self.benchmarks\n )\n return IterateOverBenchmarks(\n env=self.env.fork(),\n benchmarks=other_benchmarks_iterator,\n fork_shares_iterator=self.fork_shares_iterator,\n )\n\n\nclass CycleOverBenchmarks(IterateOverBenchmarks):\n \"\"\"Cycle through a list of benchmarks on each call to :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>`. Same as\n :class:`IterateOverBenchmarks` except the list of benchmarks repeats once\n exhausted.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n fork_shares_iterator: bool = False,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks.\n\n :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`\n iterator will be shared by a forked environment created by\n :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means\n that calling :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` with one environment will\n advance the iterator in the other. If :code:`False`, forked\n environments will use :code:`itertools.tee()` to create a copy of\n the iterator so that each iterator may advance independently.\n However, this requires shared buffers between the environments which\n can lead to memory overheads if :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` is called many times more in\n one environment than the other.\n \"\"\"\n super().__init__(\n env, benchmarks=cycle(benchmarks), fork_shares_iterator=fork_shares_iterator\n )\n\n\nclass CycleOverBenchmarksIterator(CompilerEnvWrapper):\n \"\"\"Same as :class:`CycleOverBenchmarks\n <compiler_gym.wrappers.CycleOverBenchmarks>` except that the user generates\n the iterator.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n make_benchmark_iterator: Callable[[], Iterable[BenchmarkLike]],\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param make_benchmark_iterator: A callback that returns an iterator over\n a sequence of benchmarks. Once the iterator is exhausted, this\n callback is called to produce a new iterator.\n \"\"\"\n super().__init__(env)\n self.make_benchmark_iterator = make_benchmark_iterator\n self.benchmarks = iter(self.make_benchmark_iterator())\n\n def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):\n if benchmark is not None:\n raise TypeError(\"Benchmark passed toIterateOverBenchmarks.reset()\")\n try:\n benchmark: BenchmarkLike = next(self.benchmarks)\n except StopIteration:\n self.benchmarks = iter(self.make_benchmark_iterator())\n benchmark: BenchmarkLike = next(self.benchmarks)\n\n return self.env.reset(benchmark=benchmark)\n\n def fork(self) -> \"CycleOverBenchmarksIterator\":\n return CycleOverBenchmarksIterator(\n env=self.env.fork(),\n make_benchmark_iterator=self.make_benchmark_iterator,\n )\n\n\nclass RandomOrderBenchmarks(IterateOverBenchmarks):\n \"\"\"Select randomly from a list of benchmarks on each call to :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>`.\n\n .. note::\n\n Uniform random selection is provided by evaluating the input benchmarks\n iterator into a list and sampling randomly from the list. This will not\n work for random iteration over infinite or very large iterables of\n benchmarks.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n rng: Optional[np.random.Generator] = None,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks. The entirety of\n this input iterator is evaluated during construction.\n\n :param rng: A random number generator to use for random benchmark\n selection.\n \"\"\"\n self._all_benchmarks = list(benchmarks)\n rng = rng or np.random.default_rng()\n super().__init__(\n env,\n benchmarks=(rng.choice(self._all_benchmarks) for _ in iter(int, 1)),\n fork_shares_iterator=True,\n )\n\n def fork(self) -> \"IterateOverBenchmarks\":\n \"\"\"Fork the random order benchmark wrapper.\n\n Note that RNG state is not copied to forked environments.\n \"\"\"\n return IterateOverBenchmarks(\n env=self.env.fork(), benchmarks=self._all_benchmarks\n )\n", "path": "compiler_gym/wrappers/datasets.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"The :code:`compiler_gym.wrappers` module provides a set of classes that can\nbe used to transform an environment in a modular way.\n\nFor example:\n\n >>> env = compiler_gym.make(\"llvm-v0\")\n >>> env = TimeLimit(env, n=10)\n >>> env = CycleOverBenchmarks(\n ... env,\n ... benchmarks=[\n ... \"benchmark://cbench-v1/crc32\",\n ... \"benchmark://cbench-v1/qsort\",\n ... ],\n ... )\n\n.. warning::\n\n CompilerGym environments are incompatible with the `OpenAI Gym wrappers\n <https://github.com/openai/gym/tree/master/gym/wrappers>`_. This is because\n CompilerGym extends the environment API with additional arguments and\n methods. You must use the wrappers from this module when wrapping\n CompilerGym environments. We provide a set of base wrappers that are\n equivalent to those in OpenAI Gym that you can use to write your own\n wrappers.\n\"\"\"\nfrom compiler_gym.wrappers.commandline import (\n CommandlineWithTerminalAction,\n ConstrainedCommandline,\n)\nfrom compiler_gym.wrappers.core import (\n ActionWrapper,\n CompilerEnvWrapper,\n ObservationWrapper,\n RewardWrapper,\n)\nfrom compiler_gym.wrappers.datasets import (\n CycleOverBenchmarks,\n CycleOverBenchmarksIterator,\n IterateOverBenchmarks,\n RandomOrderBenchmarks,\n)\nfrom compiler_gym.wrappers.llvm import RuntimePointEstimateReward\nfrom compiler_gym.wrappers.time_limit import TimeLimit\n\n__all__ = [\n \"ActionWrapper\",\n \"CommandlineWithTerminalAction\",\n \"CompilerEnvWrapper\",\n \"ConstrainedCommandline\",\n \"CycleOverBenchmarks\",\n \"CycleOverBenchmarksIterator\",\n \"IterateOverBenchmarks\",\n \"ObservationWrapper\",\n \"RandomOrderBenchmarks\",\n \"RewardWrapper\",\n \"RuntimePointEstimateReward\",\n \"TimeLimit\",\n]\n", "path": "compiler_gym/wrappers/__init__.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom itertools import cycle\nfrom typing import Callable, Iterable, Optional, Union\n\nimport numpy as np\n\nfrom compiler_gym.datasets import Benchmark\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.parallelization import thread_safe_tee\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\nBenchmarkLike = Union[str, Benchmark]\n\n\nclass IterateOverBenchmarks(CompilerEnvWrapper):\n \"\"\"Iterate over a (possibly infinite) sequence of benchmarks on each call to\n reset(). Will raise :code:`StopIteration` on :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>` once the iterator is exhausted. Use\n :class:`CycleOverBenchmarks` or :class:`RandomOrderBenchmarks` for wrappers\n which will loop over the benchmarks.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n fork_shares_iterator: bool = False,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks.\n\n :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`\n iterator will bet shared by a forked environment created by\n :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means\n that calling :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` with one environment will\n advance the iterator in the other. If :code:`False`, forked\n environments will use :code:`itertools.tee()` to create a copy of\n the iterator so that each iterator may advance independently.\n However, this requires shared buffers between the environments which\n can lead to memory overheads if :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` is called many times more in\n one environment than the other.\n \"\"\"\n super().__init__(env)\n self.benchmarks = iter(benchmarks)\n self.fork_shares_iterator = fork_shares_iterator\n\n def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):\n if benchmark is not None:\n raise TypeError(\"Benchmark passed to IterateOverBenchmarks.reset()\")\n benchmark: BenchmarkLike = next(self.benchmarks)\n return self.env.reset(benchmark=benchmark)\n\n def fork(self) -> \"IterateOverBenchmarks\":\n if self.fork_shares_iterator:\n other_benchmarks_iterator = self.benchmarks\n else:\n self.benchmarks, other_benchmarks_iterator = thread_safe_tee(\n self.benchmarks\n )\n return IterateOverBenchmarks(\n env=self.env.fork(),\n benchmarks=other_benchmarks_iterator,\n fork_shares_iterator=self.fork_shares_iterator,\n )\n\n\nclass CycleOverBenchmarks(IterateOverBenchmarks):\n \"\"\"Cycle through a list of benchmarks on each call to :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>`. Same as\n :class:`IterateOverBenchmarks` except the list of benchmarks repeats once\n exhausted.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n fork_shares_iterator: bool = False,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks.\n\n :param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`\n iterator will be shared by a forked environment created by\n :meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means\n that calling :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` with one environment will\n advance the iterator in the other. If :code:`False`, forked\n environments will use :code:`itertools.tee()` to create a copy of\n the iterator so that each iterator may advance independently.\n However, this requires shared buffers between the environments which\n can lead to memory overheads if :meth:`env.reset()\n <compiler_gym.envs.CompilerEnv.reset>` is called many times more in\n one environment than the other.\n \"\"\"\n super().__init__(\n env, benchmarks=cycle(benchmarks), fork_shares_iterator=fork_shares_iterator\n )\n\n\nclass CycleOverBenchmarksIterator(CompilerEnvWrapper):\n \"\"\"Same as :class:`CycleOverBenchmarks\n <compiler_gym.wrappers.CycleOverBenchmarks>` except that the user generates\n the iterator.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n make_benchmark_iterator: Callable[[], Iterable[BenchmarkLike]],\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param make_benchmark_iterator: A callback that returns an iterator over\n a sequence of benchmarks. Once the iterator is exhausted, this\n callback is called to produce a new iterator.\n \"\"\"\n super().__init__(env)\n self.make_benchmark_iterator = make_benchmark_iterator\n self.benchmarks = iter(self.make_benchmark_iterator())\n\n def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):\n if benchmark is not None:\n raise TypeError(\"Benchmark passed toIterateOverBenchmarks.reset()\")\n try:\n benchmark: BenchmarkLike = next(self.benchmarks)\n except StopIteration:\n self.benchmarks = iter(self.make_benchmark_iterator())\n benchmark: BenchmarkLike = next(self.benchmarks)\n\n return self.env.reset(benchmark=benchmark)\n\n def fork(self) -> \"CycleOverBenchmarksIterator\":\n return CycleOverBenchmarksIterator(\n env=self.env.fork(),\n make_benchmark_iterator=self.make_benchmark_iterator,\n )\n\n\nclass RandomOrderBenchmarks(IterateOverBenchmarks):\n \"\"\"Select randomly from a list of benchmarks on each call to :meth:`reset()\n <compiler_gym.envs.CompilerEnv.reset>`.\n\n .. note::\n\n Uniform random selection is provided by evaluating the input benchmarks\n iterator into a list and sampling randomly from the list. For very large\n and infinite iterables of benchmarks you must use the\n :class:`IterateOverBenchmarks\n <compiler_gym.wrappers.IterateOverBenchmarks>` wrapper with your own\n random sampling iterator.\n \"\"\"\n\n def __init__(\n self,\n env: CompilerEnv,\n benchmarks: Iterable[BenchmarkLike],\n rng: Optional[np.random.Generator] = None,\n ):\n \"\"\"Constructor.\n\n :param env: The environment to wrap.\n\n :param benchmarks: An iterable sequence of benchmarks. The entirety of\n this input iterator is evaluated during construction.\n\n :param rng: A random number generator to use for random benchmark\n selection.\n \"\"\"\n self._all_benchmarks = list(benchmarks)\n rng = rng or np.random.default_rng()\n super().__init__(\n env,\n benchmarks=(rng.choice(self._all_benchmarks) for _ in iter(int, 1)),\n fork_shares_iterator=True,\n )\n\n def fork(self) -> \"IterateOverBenchmarks\":\n \"\"\"Fork the random order benchmark wrapper.\n\n Note that RNG state is not copied to forked environments.\n \"\"\"\n return IterateOverBenchmarks(\n env=self.env.fork(), benchmarks=self._all_benchmarks\n )\n", "path": "compiler_gym/wrappers/datasets.py"}]}
2,787
558
gh_patches_debug_29334
rasdani/github-patches
git_diff
svthalia__concrexit-1676
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sales order payments are not always saved ### Describe the bug When paying for a Thalia pay order (via the sales payment view, so the QR code flow), the payment is not always stored back to the order. The payment is created properly, but after payment, the foreign key to the payment in the order is not saved. ### How to reproduce I am not sure exactly when this happens, at least it happens for the current shift 2 on the current staging environment. It might be because the shift has already been ended. ### Expected behaviour Store the payment properly ### Additional context Might be related to https://github.com/svthalia/concrexit/blob/6d0866022afb7fdf3edab34709d4d99e28039d59/website/sales/models/order.py#L123 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/sales/models/order.py` Content: ``` 1 from decimal import Decimal 2 3 from django.conf import settings 4 from django.core.exceptions import ValidationError 5 from django.core.validators import MinValueValidator 6 from django.db import models 7 from django.db.models import ( 8 Sum, 9 Value, 10 F, 11 DecimalField, 12 Q, 13 IntegerField, 14 BooleanField, 15 Count, 16 ) 17 from django.db.models.functions import Coalesce 18 from django.urls import reverse 19 from django.utils import timezone 20 from django.utils.translation import gettext_lazy as _ 21 from queryable_properties.managers import QueryablePropertiesManager 22 from queryable_properties.properties import AnnotationProperty 23 24 from members.models import uuid, Member 25 from payments.models import Payable, Payment 26 from sales.models.product import ProductListItem 27 from sales.models.shift import Shift 28 29 30 def default_order_shift(): 31 return Shift.objects.filter(active=True).first() 32 33 34 class Order(models.Model, Payable): 35 36 objects = QueryablePropertiesManager() 37 38 class Meta: 39 verbose_name = _("order") 40 verbose_name_plural = _("orders") 41 permissions = [ 42 ("custom_prices", _("Can use custom prices and discounts in orders")), 43 ] 44 ordering = ["created_at"] 45 46 id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) 47 48 created_at = models.DateTimeField( 49 verbose_name=_("created at"), default=timezone.now 50 ) 51 52 shift = models.ForeignKey( 53 Shift, 54 verbose_name=_("shift"), 55 related_name="orders", 56 default=default_order_shift, 57 null=False, 58 blank=False, 59 on_delete=models.PROTECT, 60 ) 61 62 items = models.ManyToManyField( 63 ProductListItem, through="OrderItem", verbose_name=_("items"), 64 ) 65 66 payment = models.OneToOneField( 67 Payment, 68 verbose_name=_("payment"), 69 related_name="sales_order", 70 on_delete=models.SET_NULL, 71 blank=True, 72 null=True, 73 ) 74 75 discount = models.DecimalField( 76 verbose_name=_("discount"), 77 max_digits=6, 78 decimal_places=2, 79 null=True, 80 blank=True, 81 validators=[MinValueValidator(Decimal("0.00"))], 82 ) 83 84 payer = models.ForeignKey( 85 Member, 86 models.SET_NULL, 87 verbose_name=_("payer"), 88 related_name="sales_order", 89 blank=True, 90 null=True, 91 ) 92 93 age_restricted = AnnotationProperty( 94 Count( 95 "order_items__pk", 96 filter=Q(order_items__product__product__age_restricted=True), 97 output_field=BooleanField(), 98 ) 99 ) 100 101 subtotal = AnnotationProperty( 102 Coalesce(Sum("order_items__total"), Value(0.00), output_field=DecimalField()) 103 ) 104 105 total_amount = AnnotationProperty( 106 Coalesce(Sum("order_items__total"), Value(0.00), output_field=DecimalField()) 107 - Coalesce(F("discount"), Value(0.00), output_field=DecimalField()) 108 ) 109 110 num_items = AnnotationProperty( 111 Coalesce(Sum("order_items__amount"), Value(0), output_field=IntegerField()) 112 ) 113 114 def save( 115 self, force_insert=False, force_update=False, using=None, update_fields=None 116 ): 117 if self.shift.locked: 118 return 119 if self.shift.start > timezone.now(): 120 return 121 if ( 122 self.payment 123 and float(sum(self.order_items.values_list("total", flat=True))) 124 - (self.discount or 0) 125 != self.payment.amount 126 ): 127 return 128 if self.payment and not self.payer: 129 self.payer = self.payment.paid_by 130 131 return super(Order, self).save(force_insert, force_update, using, update_fields) 132 133 def clean(self): 134 super().clean() 135 errors = {} 136 137 if self.shift.start > timezone.now(): 138 errors.update({"shift": _("The shift hasn't started yet.")}) 139 140 if self.shift.locked: 141 errors.update({"shift": _("The shift this order belongs to is locked.")}) 142 143 if self.discount and self.discount > self.total_amount: 144 errors.update( 145 {"discount": _("Discount cannot be higher than total amount.")} 146 ) 147 148 if errors: 149 raise ValidationError(errors) 150 151 @property 152 def payment_amount(self): 153 return self.total_amount 154 155 @property 156 def payment_topic(self): 157 return f"Sales at {self.shift}" 158 159 @property 160 def order_description(self): 161 return ", ".join(str(x) for x in self.order_items.all()) 162 163 @property 164 def payment_notes(self): 165 return ( 166 f"{self.order_description}. Ordered at {self.created_at.time()} ({self.id})" 167 ) 168 169 @property 170 def payment_payer(self): 171 return self.payer 172 173 @property 174 def accept_payment_from_any_user(self): 175 return True 176 177 @property 178 def payment_url(self): 179 return ( 180 settings.BASE_URL + reverse("sales:order-pay", kwargs={"pk": self.pk}) 181 if not self.payment 182 and (self.payment_amount is not None and self.payment_amount != 0) 183 else None 184 ) 185 186 def __str__(self): 187 return f"Order {self.id} ({self.shift})" 188 189 190 class OrderItem(models.Model): 191 class Meta: 192 verbose_name = "item" 193 verbose_name_plural = "items" 194 ordering = ["pk"] 195 indexes = [ 196 models.Index(fields=["order"]), 197 ] 198 199 product = models.ForeignKey( 200 ProductListItem, 201 verbose_name=_("product"), 202 null=False, 203 blank=False, 204 on_delete=models.PROTECT, 205 ) 206 order = models.ForeignKey( 207 Order, 208 verbose_name=_("order"), 209 related_name="order_items", 210 null=False, 211 blank=False, 212 on_delete=models.CASCADE, 213 ) 214 total = models.DecimalField( 215 verbose_name=_("total"), 216 max_digits=6, 217 decimal_places=2, 218 null=False, 219 blank=True, 220 validators=[MinValueValidator(Decimal("0.00"))], 221 help_text="Only when overriding the default", 222 ) 223 amount = models.PositiveSmallIntegerField( 224 verbose_name=_("amount"), null=False, blank=False 225 ) 226 227 def save( 228 self, force_insert=False, force_update=False, using=None, update_fields=None 229 ): 230 if self.order.shift.locked: 231 return 232 if self.order.payment: 233 return 234 235 if not self.total: 236 self.total = self.product.price * self.amount 237 238 return super(OrderItem, self).save( 239 force_insert, force_update, using, update_fields 240 ) 241 242 def clean(self): 243 super().clean() 244 errors = {} 245 246 if self.order.shift.locked: 247 errors.update({"order": _("The shift is locked.")}) 248 249 if self.product not in self.order.shift.product_list.product_items.all(): 250 errors.update({"product": _("This product is not available.")}) 251 252 if errors: 253 raise ValidationError(errors) 254 255 def __str__(self): 256 return f"{self.amount}x {self.product.product.name}" 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/sales/models/order.py b/website/sales/models/order.py --- a/website/sales/models/order.py +++ b/website/sales/models/order.py @@ -115,16 +115,17 @@ self, force_insert=False, force_update=False, using=None, update_fields=None ): if self.shift.locked: - return + raise ValueError("The shift this order belongs to is locked.") if self.shift.start > timezone.now(): - return + raise ValueError("The shift hasn't started yet.") if ( self.payment - and float(sum(self.order_items.values_list("total", flat=True))) - - (self.discount or 0) - != self.payment.amount + and self.subtotal - Decimal(self.discount or 0) != self.payment.amount ): - return + # We cannot use self.total_amount as it is a requires a database query and hence will not use any updated values + raise ValueError( + "The payment amount does not match the order total amount." + ) if self.payment and not self.payer: self.payer = self.payment.paid_by @@ -228,9 +229,9 @@ self, force_insert=False, force_update=False, using=None, update_fields=None ): if self.order.shift.locked: - return + raise ValueError("The shift this order belongs to is locked.") if self.order.payment: - return + raise ValueError("This order has already been paid for.") if not self.total: self.total = self.product.price * self.amount
{"golden_diff": "diff --git a/website/sales/models/order.py b/website/sales/models/order.py\n--- a/website/sales/models/order.py\n+++ b/website/sales/models/order.py\n@@ -115,16 +115,17 @@\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.shift.locked:\n- return\n+ raise ValueError(\"The shift this order belongs to is locked.\")\n if self.shift.start > timezone.now():\n- return\n+ raise ValueError(\"The shift hasn't started yet.\")\n if (\n self.payment\n- and float(sum(self.order_items.values_list(\"total\", flat=True)))\n- - (self.discount or 0)\n- != self.payment.amount\n+ and self.subtotal - Decimal(self.discount or 0) != self.payment.amount\n ):\n- return\n+ # We cannot use self.total_amount as it is a requires a database query and hence will not use any updated values\n+ raise ValueError(\n+ \"The payment amount does not match the order total amount.\"\n+ )\n if self.payment and not self.payer:\n self.payer = self.payment.paid_by\n \n@@ -228,9 +229,9 @@\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.order.shift.locked:\n- return\n+ raise ValueError(\"The shift this order belongs to is locked.\")\n if self.order.payment:\n- return\n+ raise ValueError(\"This order has already been paid for.\")\n \n if not self.total:\n self.total = self.product.price * self.amount\n", "issue": "Sales order payments are not always saved\n### Describe the bug\r\nWhen paying for a Thalia pay order (via the sales payment view, so the QR code flow), the payment is not always stored back to the order. The payment is created properly, but after payment, the foreign key to the payment in the order is not saved. \r\n\r\n### How to reproduce\r\nI am not sure exactly when this happens, at least it happens for the current shift 2 on the current staging environment. It might be because the shift has already been ended.\r\n\r\n### Expected behaviour\r\nStore the payment properly\r\n\r\n### Additional context\r\nMight be related to https://github.com/svthalia/concrexit/blob/6d0866022afb7fdf3edab34709d4d99e28039d59/website/sales/models/order.py#L123\n", "before_files": [{"content": "from decimal import Decimal\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.db.models import (\n Sum,\n Value,\n F,\n DecimalField,\n Q,\n IntegerField,\n BooleanField,\n Count,\n)\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\n\nfrom members.models import uuid, Member\nfrom payments.models import Payable, Payment\nfrom sales.models.product import ProductListItem\nfrom sales.models.shift import Shift\n\n\ndef default_order_shift():\n return Shift.objects.filter(active=True).first()\n\n\nclass Order(models.Model, Payable):\n\n objects = QueryablePropertiesManager()\n\n class Meta:\n verbose_name = _(\"order\")\n verbose_name_plural = _(\"orders\")\n permissions = [\n (\"custom_prices\", _(\"Can use custom prices and discounts in orders\")),\n ]\n ordering = [\"created_at\"]\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n created_at = models.DateTimeField(\n verbose_name=_(\"created at\"), default=timezone.now\n )\n\n shift = models.ForeignKey(\n Shift,\n verbose_name=_(\"shift\"),\n related_name=\"orders\",\n default=default_order_shift,\n null=False,\n blank=False,\n on_delete=models.PROTECT,\n )\n\n items = models.ManyToManyField(\n ProductListItem, through=\"OrderItem\", verbose_name=_(\"items\"),\n )\n\n payment = models.OneToOneField(\n Payment,\n verbose_name=_(\"payment\"),\n related_name=\"sales_order\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n discount = models.DecimalField(\n verbose_name=_(\"discount\"),\n max_digits=6,\n decimal_places=2,\n null=True,\n blank=True,\n validators=[MinValueValidator(Decimal(\"0.00\"))],\n )\n\n payer = models.ForeignKey(\n Member,\n models.SET_NULL,\n verbose_name=_(\"payer\"),\n related_name=\"sales_order\",\n blank=True,\n null=True,\n )\n\n age_restricted = AnnotationProperty(\n Count(\n \"order_items__pk\",\n filter=Q(order_items__product__product__age_restricted=True),\n output_field=BooleanField(),\n )\n )\n\n subtotal = AnnotationProperty(\n Coalesce(Sum(\"order_items__total\"), Value(0.00), output_field=DecimalField())\n )\n\n total_amount = AnnotationProperty(\n Coalesce(Sum(\"order_items__total\"), Value(0.00), output_field=DecimalField())\n - Coalesce(F(\"discount\"), Value(0.00), output_field=DecimalField())\n )\n\n num_items = AnnotationProperty(\n Coalesce(Sum(\"order_items__amount\"), Value(0), output_field=IntegerField())\n )\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.shift.locked:\n return\n if self.shift.start > timezone.now():\n return\n if (\n self.payment\n and float(sum(self.order_items.values_list(\"total\", flat=True)))\n - (self.discount or 0)\n != self.payment.amount\n ):\n return\n if self.payment and not self.payer:\n self.payer = self.payment.paid_by\n\n return super(Order, self).save(force_insert, force_update, using, update_fields)\n\n def clean(self):\n super().clean()\n errors = {}\n\n if self.shift.start > timezone.now():\n errors.update({\"shift\": _(\"The shift hasn't started yet.\")})\n\n if self.shift.locked:\n errors.update({\"shift\": _(\"The shift this order belongs to is locked.\")})\n\n if self.discount and self.discount > self.total_amount:\n errors.update(\n {\"discount\": _(\"Discount cannot be higher than total amount.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n @property\n def payment_amount(self):\n return self.total_amount\n\n @property\n def payment_topic(self):\n return f\"Sales at {self.shift}\"\n\n @property\n def order_description(self):\n return \", \".join(str(x) for x in self.order_items.all())\n\n @property\n def payment_notes(self):\n return (\n f\"{self.order_description}. Ordered at {self.created_at.time()} ({self.id})\"\n )\n\n @property\n def payment_payer(self):\n return self.payer\n\n @property\n def accept_payment_from_any_user(self):\n return True\n\n @property\n def payment_url(self):\n return (\n settings.BASE_URL + reverse(\"sales:order-pay\", kwargs={\"pk\": self.pk})\n if not self.payment\n and (self.payment_amount is not None and self.payment_amount != 0)\n else None\n )\n\n def __str__(self):\n return f\"Order {self.id} ({self.shift})\"\n\n\nclass OrderItem(models.Model):\n class Meta:\n verbose_name = \"item\"\n verbose_name_plural = \"items\"\n ordering = [\"pk\"]\n indexes = [\n models.Index(fields=[\"order\"]),\n ]\n\n product = models.ForeignKey(\n ProductListItem,\n verbose_name=_(\"product\"),\n null=False,\n blank=False,\n on_delete=models.PROTECT,\n )\n order = models.ForeignKey(\n Order,\n verbose_name=_(\"order\"),\n related_name=\"order_items\",\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n )\n total = models.DecimalField(\n verbose_name=_(\"total\"),\n max_digits=6,\n decimal_places=2,\n null=False,\n blank=True,\n validators=[MinValueValidator(Decimal(\"0.00\"))],\n help_text=\"Only when overriding the default\",\n )\n amount = models.PositiveSmallIntegerField(\n verbose_name=_(\"amount\"), null=False, blank=False\n )\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.order.shift.locked:\n return\n if self.order.payment:\n return\n\n if not self.total:\n self.total = self.product.price * self.amount\n\n return super(OrderItem, self).save(\n force_insert, force_update, using, update_fields\n )\n\n def clean(self):\n super().clean()\n errors = {}\n\n if self.order.shift.locked:\n errors.update({\"order\": _(\"The shift is locked.\")})\n\n if self.product not in self.order.shift.product_list.product_items.all():\n errors.update({\"product\": _(\"This product is not available.\")})\n\n if errors:\n raise ValidationError(errors)\n\n def __str__(self):\n return f\"{self.amount}x {self.product.product.name}\"\n", "path": "website/sales/models/order.py"}], "after_files": [{"content": "from decimal import Decimal\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.db.models import (\n Sum,\n Value,\n F,\n DecimalField,\n Q,\n IntegerField,\n BooleanField,\n Count,\n)\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\n\nfrom members.models import uuid, Member\nfrom payments.models import Payable, Payment\nfrom sales.models.product import ProductListItem\nfrom sales.models.shift import Shift\n\n\ndef default_order_shift():\n return Shift.objects.filter(active=True).first()\n\n\nclass Order(models.Model, Payable):\n\n objects = QueryablePropertiesManager()\n\n class Meta:\n verbose_name = _(\"order\")\n verbose_name_plural = _(\"orders\")\n permissions = [\n (\"custom_prices\", _(\"Can use custom prices and discounts in orders\")),\n ]\n ordering = [\"created_at\"]\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n created_at = models.DateTimeField(\n verbose_name=_(\"created at\"), default=timezone.now\n )\n\n shift = models.ForeignKey(\n Shift,\n verbose_name=_(\"shift\"),\n related_name=\"orders\",\n default=default_order_shift,\n null=False,\n blank=False,\n on_delete=models.PROTECT,\n )\n\n items = models.ManyToManyField(\n ProductListItem, through=\"OrderItem\", verbose_name=_(\"items\"),\n )\n\n payment = models.OneToOneField(\n Payment,\n verbose_name=_(\"payment\"),\n related_name=\"sales_order\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n discount = models.DecimalField(\n verbose_name=_(\"discount\"),\n max_digits=6,\n decimal_places=2,\n null=True,\n blank=True,\n validators=[MinValueValidator(Decimal(\"0.00\"))],\n )\n\n payer = models.ForeignKey(\n Member,\n models.SET_NULL,\n verbose_name=_(\"payer\"),\n related_name=\"sales_order\",\n blank=True,\n null=True,\n )\n\n age_restricted = AnnotationProperty(\n Count(\n \"order_items__pk\",\n filter=Q(order_items__product__product__age_restricted=True),\n output_field=BooleanField(),\n )\n )\n\n subtotal = AnnotationProperty(\n Coalesce(Sum(\"order_items__total\"), Value(0.00), output_field=DecimalField())\n )\n\n total_amount = AnnotationProperty(\n Coalesce(Sum(\"order_items__total\"), Value(0.00), output_field=DecimalField())\n - Coalesce(F(\"discount\"), Value(0.00), output_field=DecimalField())\n )\n\n num_items = AnnotationProperty(\n Coalesce(Sum(\"order_items__amount\"), Value(0), output_field=IntegerField())\n )\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.shift.locked:\n raise ValueError(\"The shift this order belongs to is locked.\")\n if self.shift.start > timezone.now():\n raise ValueError(\"The shift hasn't started yet.\")\n if (\n self.payment\n and self.subtotal - Decimal(self.discount or 0) != self.payment.amount\n ):\n # We cannot use self.total_amount as it is a requires a database query and hence will not use any updated values\n raise ValueError(\n \"The payment amount does not match the order total amount.\"\n )\n if self.payment and not self.payer:\n self.payer = self.payment.paid_by\n\n return super(Order, self).save(force_insert, force_update, using, update_fields)\n\n def clean(self):\n super().clean()\n errors = {}\n\n if self.shift.start > timezone.now():\n errors.update({\"shift\": _(\"The shift hasn't started yet.\")})\n\n if self.shift.locked:\n errors.update({\"shift\": _(\"The shift this order belongs to is locked.\")})\n\n if self.discount and self.discount > self.total_amount:\n errors.update(\n {\"discount\": _(\"Discount cannot be higher than total amount.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n @property\n def payment_amount(self):\n return self.total_amount\n\n @property\n def payment_topic(self):\n return f\"Sales at {self.shift}\"\n\n @property\n def order_description(self):\n return \", \".join(str(x) for x in self.order_items.all())\n\n @property\n def payment_notes(self):\n return (\n f\"{self.order_description}. Ordered at {self.created_at.time()} ({self.id})\"\n )\n\n @property\n def payment_payer(self):\n return self.payer\n\n @property\n def accept_payment_from_any_user(self):\n return True\n\n @property\n def payment_url(self):\n return (\n settings.BASE_URL + reverse(\"sales:order-pay\", kwargs={\"pk\": self.pk})\n if not self.payment\n and (self.payment_amount is not None and self.payment_amount != 0)\n else None\n )\n\n def __str__(self):\n return f\"Order {self.id} ({self.shift})\"\n\n\nclass OrderItem(models.Model):\n class Meta:\n verbose_name = \"item\"\n verbose_name_plural = \"items\"\n ordering = [\"pk\"]\n indexes = [\n models.Index(fields=[\"order\"]),\n ]\n\n product = models.ForeignKey(\n ProductListItem,\n verbose_name=_(\"product\"),\n null=False,\n blank=False,\n on_delete=models.PROTECT,\n )\n order = models.ForeignKey(\n Order,\n verbose_name=_(\"order\"),\n related_name=\"order_items\",\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n )\n total = models.DecimalField(\n verbose_name=_(\"total\"),\n max_digits=6,\n decimal_places=2,\n null=False,\n blank=True,\n validators=[MinValueValidator(Decimal(\"0.00\"))],\n help_text=\"Only when overriding the default\",\n )\n amount = models.PositiveSmallIntegerField(\n verbose_name=_(\"amount\"), null=False, blank=False\n )\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n if self.order.shift.locked:\n raise ValueError(\"The shift this order belongs to is locked.\")\n if self.order.payment:\n raise ValueError(\"This order has already been paid for.\")\n\n if not self.total:\n self.total = self.product.price * self.amount\n\n return super(OrderItem, self).save(\n force_insert, force_update, using, update_fields\n )\n\n def clean(self):\n super().clean()\n errors = {}\n\n if self.order.shift.locked:\n errors.update({\"order\": _(\"The shift is locked.\")})\n\n if self.product not in self.order.shift.product_list.product_items.all():\n errors.update({\"product\": _(\"This product is not available.\")})\n\n if errors:\n raise ValidationError(errors)\n\n def __str__(self):\n return f\"{self.amount}x {self.product.product.name}\"\n", "path": "website/sales/models/order.py"}]}
2,593
361
gh_patches_debug_13790
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3620
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider wafflehouse is broken During the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/wafflehouse.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import json 3 4 import scrapy 5 6 from locations.hours import OpeningHours 7 from locations.items import GeojsonPointItem 8 9 10 class WaffleHouseSpider(scrapy.Spider): 11 name = "wafflehouse" 12 item_attributes = {"brand": "Waffle House", "brand_wikidata": "Q1701206"} 13 allowed_domains = ["wafflehouse.com"] 14 start_urls = [ 15 "https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1" 16 ] 17 18 def parse(self, response): 19 for row in response.json()["markers"]: 20 url = "https://locations.wafflehouse.com/" + row["slug"] 21 yield scrapy.Request(url, callback=self.parse_store) 22 23 def parse_store(self, response): 24 data = json.loads( 25 response.xpath('//head/script[@type="application/ld+json"]/text()').get() 26 ) 27 28 hours = OpeningHours() 29 specs = data.get("openingHoursSpecification", []) 30 if any({"validFrom", "validThrough"} <= spec.keys() for spec in specs): 31 # Giving opening hours for specific dates, abandon the whole proposal 32 pass 33 else: 34 for spec in specs: 35 for day in spec["dayOfWeek"]: 36 hours.add_range( 37 day[:2].capitalize(), spec["opens"], spec["closes"], "%I%p" 38 ) 39 40 properties = { 41 "ref": data["@id"], 42 "lat": data["geo"]["latitude"], 43 "lon": data["geo"]["longitude"], 44 "website": response.url, 45 "name": data["name"], 46 "phone": data["telephone"], 47 "addr_full": data["address"]["streetAddress"], 48 "city": data["address"]["addressLocality"], 49 "state": data["address"]["addressRegion"], 50 "postcode": data["address"]["postalCode"], 51 "opening_hours": hours.as_opening_hours(), 52 } 53 yield GeojsonPointItem(**properties) 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py --- a/locations/spiders/wafflehouse.py +++ b/locations/spiders/wafflehouse.py @@ -44,10 +44,11 @@ "website": response.url, "name": data["name"], "phone": data["telephone"], - "addr_full": data["address"]["streetAddress"], + "street_address": data["address"]["streetAddress"], "city": data["address"]["addressLocality"], "state": data["address"]["addressRegion"], "postcode": data["address"]["postalCode"], "opening_hours": hours.as_opening_hours(), } + yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py\n--- a/locations/spiders/wafflehouse.py\n+++ b/locations/spiders/wafflehouse.py\n@@ -44,10 +44,11 @@\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n- \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n+\n yield GeojsonPointItem(**properties)\n", "issue": "Spider wafflehouse is broken\nDuring the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass WaffleHouseSpider(scrapy.Spider):\n name = \"wafflehouse\"\n item_attributes = {\"brand\": \"Waffle House\", \"brand_wikidata\": \"Q1701206\"}\n allowed_domains = [\"wafflehouse.com\"]\n start_urls = [\n \"https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1\"\n ]\n\n def parse(self, response):\n for row in response.json()[\"markers\"]:\n url = \"https://locations.wafflehouse.com/\" + row[\"slug\"]\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(\n response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\n )\n\n hours = OpeningHours()\n specs = data.get(\"openingHoursSpecification\", [])\n if any({\"validFrom\", \"validThrough\"} <= spec.keys() for spec in specs):\n # Giving opening hours for specific dates, abandon the whole proposal\n pass\n else:\n for spec in specs:\n for day in spec[\"dayOfWeek\"]:\n hours.add_range(\n day[:2].capitalize(), spec[\"opens\"], spec[\"closes\"], \"%I%p\"\n )\n\n properties = {\n \"ref\": data[\"@id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wafflehouse.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass WaffleHouseSpider(scrapy.Spider):\n name = \"wafflehouse\"\n item_attributes = {\"brand\": \"Waffle House\", \"brand_wikidata\": \"Q1701206\"}\n allowed_domains = [\"wafflehouse.com\"]\n start_urls = [\n \"https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1\"\n ]\n\n def parse(self, response):\n for row in response.json()[\"markers\"]:\n url = \"https://locations.wafflehouse.com/\" + row[\"slug\"]\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(\n response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\n )\n\n hours = OpeningHours()\n specs = data.get(\"openingHoursSpecification\", [])\n if any({\"validFrom\", \"validThrough\"} <= spec.keys() for spec in specs):\n # Giving opening hours for specific dates, abandon the whole proposal\n pass\n else:\n for spec in specs:\n for day in spec[\"dayOfWeek\"]:\n hours.add_range(\n day[:2].capitalize(), spec[\"opens\"], spec[\"closes\"], \"%I%p\"\n )\n\n properties = {\n \"ref\": data[\"@id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wafflehouse.py"}]}
1,076
164
gh_patches_debug_31352
rasdani/github-patches
git_diff
Theano__Theano-4512
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- d3viz doesn't work for some graphs Sometimes d3viz doesn't work for me - empty graph is displayed, and there is an error message in JS console. I tried to reduce it to a minimal example ``` py import os os.environ['THEANO_FLAGS'] = "device=gpu0,floatX=float32" #,optimizer=fast_compile" import numpy as np from lasagne.updates import adam from theano import tensor as T, shared, function import theano.d3viz as d3v def show_d3(g): d3v.d3viz(g, 'example.html') from IPython.display import IFrame return IFrame('example.html', width=800, height=500) x = T.fvector() W = shared(np.zeros((10, 5), dtype=np.float32)) b = shared(np.zeros((10,), dtype=np.float32)) y_true = T.fvector() y = T.nnet.sigmoid(T.dot(x, W) + b) cost = T.sqrt(((y - y_true)**2).sum()) updates = adam(cost, [W]) # no b! f_cost = function([x, y_true], cost, updates=updates) show_d3(f_cost) ``` (IPython notebook: https://gist.github.com/kmike/13b0fb747dccd4f2f1e44789a9cb832c). This is brittle: if `adam` is replaced with any other training method from Lasagne (or if a simple SGD is implemented manually) chart works; if `T.sqrt` is removed from `cost` expression chart works; if `T.nnet sigmoid` is removed from `y` expression chart works; if `b` is added to adam updates chart works; if `optimizer=fast_compile` is added to THEANO_FLAGS chart works. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `theano/d3viz/d3viz.py` Content: ``` 1 """Dynamic visualization of Theano graphs. 2 3 Author: Christof Angermueller <[email protected]> 4 """ 5 from __future__ import absolute_import, print_function, division 6 7 import os 8 import shutil 9 import re 10 import six 11 from six import iteritems 12 13 from theano.d3viz.formatting import PyDotFormatter 14 15 __path__ = os.path.dirname(os.path.realpath(__file__)) 16 17 18 def replace_patterns(x, replace): 19 """Replace `replace` in string `x`. 20 21 Parameters 22 ---------- 23 s : str 24 String on which function is applied 25 replace : dict 26 `key`, `value` pairs where key is a regular expression and `value` a 27 string by which `key` is replaced 28 """ 29 for from_, to in iteritems(replace): 30 x = x.replace(str(from_), str(to)) 31 return x 32 33 34 def escape_quotes(s): 35 """Escape quotes in string. 36 37 Parameters 38 ---------- 39 s : str 40 String on which function is applied 41 """ 42 s = re.sub(r'''(['"])''', r'\\\1', s) 43 return s 44 45 46 def d3viz(fct, outfile, copy_deps=True, *args, **kwargs): 47 """Create HTML file with dynamic visualizing of a Theano function graph. 48 49 In the HTML file, the whole graph or single nodes can be moved by drag and 50 drop. Zooming is possible via the mouse wheel. Detailed information about 51 nodes and edges are displayed via mouse-over events. Node labels can be 52 edited by selecting Edit from the context menu. 53 54 Input nodes are colored in green, output nodes in blue. Apply nodes are 55 ellipses, and colored depending on the type of operation they perform. Red 56 ellipses are transfers from/to the GPU (ops with names GpuFromHost, 57 HostFromGpu). 58 59 Edges are black by default. If a node returns a view of an 60 input, the input edge will be blue. If it returns a destroyed input, the 61 edge will be red. 62 63 Parameters 64 ---------- 65 fct : theano.compile.function_module.Function 66 A compiled Theano function, variable, apply or a list of variables. 67 outfile : str 68 Path to output HTML file. 69 copy_deps : bool, optional 70 Copy javascript and CSS dependencies to output directory. 71 72 Notes 73 ----- 74 This function accepts extra parameters which will be forwarded to 75 :class:`theano.d3viz.formatting.PyDotFormatter`. 76 77 """ 78 79 # Create DOT graph 80 formatter = PyDotFormatter(*args, **kwargs) 81 graph = formatter(fct) 82 dot_graph_raw = graph.create_dot() 83 if not six.PY2: 84 dot_graph_raw = dot_graph_raw.decode('utf8') 85 dot_graph = escape_quotes(dot_graph_raw).replace('\n', '').replace('\r', '') 86 87 # Create output directory if not existing 88 outdir = os.path.dirname(outfile) 89 if not outdir == '' and not os.path.exists(outdir): 90 os.makedirs(outdir) 91 92 # Read template HTML file 93 template_file = os.path.join(__path__, 'html', 'template.html') 94 with open(template_file) as f: 95 template = f.read() 96 97 # Copy dependencies to output directory 98 src_deps = __path__ 99 if copy_deps: 100 dst_deps = 'd3viz' 101 for d in ['js', 'css']: 102 dep = os.path.join(outdir, dst_deps, d) 103 if not os.path.exists(dep): 104 shutil.copytree(os.path.join(src_deps, d), dep) 105 else: 106 dst_deps = src_deps 107 108 # Replace patterns in template 109 replace = { 110 '%% JS_DIR %%': os.path.join(dst_deps, 'js'), 111 '%% CSS_DIR %%': os.path.join(dst_deps, 'css'), 112 '%% DOT_GRAPH %%': dot_graph, 113 } 114 html = replace_patterns(template, replace) 115 116 # Write HTML file 117 with open(outfile, 'w') as f: 118 f.write(html) 119 120 121 def d3write(fct, path, *args, **kwargs): 122 """Convert Theano graph to pydot graph and write to dot file. 123 124 Parameters 125 ---------- 126 fct : theano.compile.function_module.Function 127 A compiled Theano function, variable, apply or a list of variables. 128 path: str 129 Path to output file 130 131 Notes 132 ----- 133 This function accepts extra parameters which will be forwarded to 134 :class:`theano.d3viz.formatting.PyDotFormatter`. 135 136 """ 137 138 formatter = PyDotFormatter(*args, **kwargs) 139 graph = formatter(fct) 140 graph.write_dot(path) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/theano/d3viz/d3viz.py b/theano/d3viz/d3viz.py --- a/theano/d3viz/d3viz.py +++ b/theano/d3viz/d3viz.py @@ -5,8 +5,8 @@ from __future__ import absolute_import, print_function, division import os +import json import shutil -import re import six from six import iteritems @@ -31,16 +31,15 @@ return x -def escape_quotes(s): - """Escape quotes in string. +def safe_json(obj): + """Encode `obj` to JSON so that it can be embedded safely inside HTML. Parameters ---------- - s : str - String on which function is applied + obj : object + object to serialize """ - s = re.sub(r'''(['"])''', r'\\\1', s) - return s + return json.dumps(obj).replace('<', '\\u003c') def d3viz(fct, outfile, copy_deps=True, *args, **kwargs): @@ -79,10 +78,9 @@ # Create DOT graph formatter = PyDotFormatter(*args, **kwargs) graph = formatter(fct) - dot_graph_raw = graph.create_dot() + dot_graph = graph.create_dot() if not six.PY2: - dot_graph_raw = dot_graph_raw.decode('utf8') - dot_graph = escape_quotes(dot_graph_raw).replace('\n', '').replace('\r', '') + dot_graph = dot_graph.decode('utf8') # Create output directory if not existing outdir = os.path.dirname(outfile) @@ -109,7 +107,7 @@ replace = { '%% JS_DIR %%': os.path.join(dst_deps, 'js'), '%% CSS_DIR %%': os.path.join(dst_deps, 'css'), - '%% DOT_GRAPH %%': dot_graph, + '%% DOT_GRAPH %%': safe_json(dot_graph), } html = replace_patterns(template, replace)
{"golden_diff": "diff --git a/theano/d3viz/d3viz.py b/theano/d3viz/d3viz.py\n--- a/theano/d3viz/d3viz.py\n+++ b/theano/d3viz/d3viz.py\n@@ -5,8 +5,8 @@\n from __future__ import absolute_import, print_function, division\n \n import os\n+import json\n import shutil\n-import re\n import six\n from six import iteritems\n \n@@ -31,16 +31,15 @@\n return x\n \n \n-def escape_quotes(s):\n- \"\"\"Escape quotes in string.\n+def safe_json(obj):\n+ \"\"\"Encode `obj` to JSON so that it can be embedded safely inside HTML.\n \n Parameters\n ----------\n- s : str\n- String on which function is applied\n+ obj : object\n+ object to serialize\n \"\"\"\n- s = re.sub(r'''(['\"])''', r'\\\\\\1', s)\n- return s\n+ return json.dumps(obj).replace('<', '\\\\u003c')\n \n \n def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n@@ -79,10 +78,9 @@\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n- dot_graph_raw = graph.create_dot()\n+ dot_graph = graph.create_dot()\n if not six.PY2:\n- dot_graph_raw = dot_graph_raw.decode('utf8')\n- dot_graph = escape_quotes(dot_graph_raw).replace('\\n', '').replace('\\r', '')\n+ dot_graph = dot_graph.decode('utf8')\n \n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n@@ -109,7 +107,7 @@\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n- '%% DOT_GRAPH %%': dot_graph,\n+ '%% DOT_GRAPH %%': safe_json(dot_graph),\n }\n html = replace_patterns(template, replace)\n", "issue": "d3viz doesn't work for some graphs\nSometimes d3viz doesn't work for me - empty graph is displayed, and there is an error message in JS console. I tried to reduce it to a minimal example\n\n``` py\nimport os \nos.environ['THEANO_FLAGS'] = \"device=gpu0,floatX=float32\" #,optimizer=fast_compile\"\n\nimport numpy as np\nfrom lasagne.updates import adam\nfrom theano import tensor as T, shared, function\nimport theano.d3viz as d3v\n\ndef show_d3(g):\n d3v.d3viz(g, 'example.html')\n from IPython.display import IFrame\n return IFrame('example.html', width=800, height=500)\n\nx = T.fvector()\nW = shared(np.zeros((10, 5), dtype=np.float32))\nb = shared(np.zeros((10,), dtype=np.float32))\ny_true = T.fvector()\n\ny = T.nnet.sigmoid(T.dot(x, W) + b)\ncost = T.sqrt(((y - y_true)**2).sum())\n\nupdates = adam(cost, [W]) # no b!\nf_cost = function([x, y_true], cost, updates=updates)\nshow_d3(f_cost)\n```\n\n(IPython notebook: https://gist.github.com/kmike/13b0fb747dccd4f2f1e44789a9cb832c).\nThis is brittle: if `adam` is replaced with any other training method from Lasagne (or if a simple SGD is implemented manually) chart works; if `T.sqrt` is removed from `cost` expression chart works; if `T.nnet sigmoid` is removed from `y` expression chart works; if `b` is added to adam updates chart works; if `optimizer=fast_compile` is added to THEANO_FLAGS chart works.\n\n", "before_files": [{"content": "\"\"\"Dynamic visualization of Theano graphs.\n\nAuthor: Christof Angermueller <[email protected]>\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport os\nimport shutil\nimport re\nimport six\nfrom six import iteritems\n\nfrom theano.d3viz.formatting import PyDotFormatter\n\n__path__ = os.path.dirname(os.path.realpath(__file__))\n\n\ndef replace_patterns(x, replace):\n \"\"\"Replace `replace` in string `x`.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n replace : dict\n `key`, `value` pairs where key is a regular expression and `value` a\n string by which `key` is replaced\n \"\"\"\n for from_, to in iteritems(replace):\n x = x.replace(str(from_), str(to))\n return x\n\n\ndef escape_quotes(s):\n \"\"\"Escape quotes in string.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n \"\"\"\n s = re.sub(r'''(['\"])''', r'\\\\\\1', s)\n return s\n\n\ndef d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n \"\"\"Create HTML file with dynamic visualizing of a Theano function graph.\n\n In the HTML file, the whole graph or single nodes can be moved by drag and\n drop. Zooming is possible via the mouse wheel. Detailed information about\n nodes and edges are displayed via mouse-over events. Node labels can be\n edited by selecting Edit from the context menu.\n\n Input nodes are colored in green, output nodes in blue. Apply nodes are\n ellipses, and colored depending on the type of operation they perform. Red\n ellipses are transfers from/to the GPU (ops with names GpuFromHost,\n HostFromGpu).\n\n Edges are black by default. If a node returns a view of an\n input, the input edge will be blue. If it returns a destroyed input, the\n edge will be red.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n outfile : str\n Path to output HTML file.\n copy_deps : bool, optional\n Copy javascript and CSS dependencies to output directory.\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n dot_graph_raw = graph.create_dot()\n if not six.PY2:\n dot_graph_raw = dot_graph_raw.decode('utf8')\n dot_graph = escape_quotes(dot_graph_raw).replace('\\n', '').replace('\\r', '')\n\n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n if not outdir == '' and not os.path.exists(outdir):\n os.makedirs(outdir)\n\n # Read template HTML file\n template_file = os.path.join(__path__, 'html', 'template.html')\n with open(template_file) as f:\n template = f.read()\n\n # Copy dependencies to output directory\n src_deps = __path__\n if copy_deps:\n dst_deps = 'd3viz'\n for d in ['js', 'css']:\n dep = os.path.join(outdir, dst_deps, d)\n if not os.path.exists(dep):\n shutil.copytree(os.path.join(src_deps, d), dep)\n else:\n dst_deps = src_deps\n\n # Replace patterns in template\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n '%% DOT_GRAPH %%': dot_graph,\n }\n html = replace_patterns(template, replace)\n\n # Write HTML file\n with open(outfile, 'w') as f:\n f.write(html)\n\n\ndef d3write(fct, path, *args, **kwargs):\n \"\"\"Convert Theano graph to pydot graph and write to dot file.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n path: str\n Path to output file\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n graph.write_dot(path)\n", "path": "theano/d3viz/d3viz.py"}], "after_files": [{"content": "\"\"\"Dynamic visualization of Theano graphs.\n\nAuthor: Christof Angermueller <[email protected]>\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport os\nimport json\nimport shutil\nimport six\nfrom six import iteritems\n\nfrom theano.d3viz.formatting import PyDotFormatter\n\n__path__ = os.path.dirname(os.path.realpath(__file__))\n\n\ndef replace_patterns(x, replace):\n \"\"\"Replace `replace` in string `x`.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n replace : dict\n `key`, `value` pairs where key is a regular expression and `value` a\n string by which `key` is replaced\n \"\"\"\n for from_, to in iteritems(replace):\n x = x.replace(str(from_), str(to))\n return x\n\n\ndef safe_json(obj):\n \"\"\"Encode `obj` to JSON so that it can be embedded safely inside HTML.\n\n Parameters\n ----------\n obj : object\n object to serialize\n \"\"\"\n return json.dumps(obj).replace('<', '\\\\u003c')\n\n\ndef d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n \"\"\"Create HTML file with dynamic visualizing of a Theano function graph.\n\n In the HTML file, the whole graph or single nodes can be moved by drag and\n drop. Zooming is possible via the mouse wheel. Detailed information about\n nodes and edges are displayed via mouse-over events. Node labels can be\n edited by selecting Edit from the context menu.\n\n Input nodes are colored in green, output nodes in blue. Apply nodes are\n ellipses, and colored depending on the type of operation they perform. Red\n ellipses are transfers from/to the GPU (ops with names GpuFromHost,\n HostFromGpu).\n\n Edges are black by default. If a node returns a view of an\n input, the input edge will be blue. If it returns a destroyed input, the\n edge will be red.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n outfile : str\n Path to output HTML file.\n copy_deps : bool, optional\n Copy javascript and CSS dependencies to output directory.\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n dot_graph = graph.create_dot()\n if not six.PY2:\n dot_graph = dot_graph.decode('utf8')\n\n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n if not outdir == '' and not os.path.exists(outdir):\n os.makedirs(outdir)\n\n # Read template HTML file\n template_file = os.path.join(__path__, 'html', 'template.html')\n with open(template_file) as f:\n template = f.read()\n\n # Copy dependencies to output directory\n src_deps = __path__\n if copy_deps:\n dst_deps = 'd3viz'\n for d in ['js', 'css']:\n dep = os.path.join(outdir, dst_deps, d)\n if not os.path.exists(dep):\n shutil.copytree(os.path.join(src_deps, d), dep)\n else:\n dst_deps = src_deps\n\n # Replace patterns in template\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n '%% DOT_GRAPH %%': safe_json(dot_graph),\n }\n html = replace_patterns(template, replace)\n\n # Write HTML file\n with open(outfile, 'w') as f:\n f.write(html)\n\n\ndef d3write(fct, path, *args, **kwargs):\n \"\"\"Convert Theano graph to pydot graph and write to dot file.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n path: str\n Path to output file\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n graph.write_dot(path)\n", "path": "theano/d3viz/d3viz.py"}]}
1,997
462
gh_patches_debug_9769
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-1168
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Choice options are redundant using cookiecutter with latest click package * Cookiecutter version: 1.6.0 -- installed via `pip install` * Template project url: n/a * Python version: 3.7 * Operating System: Windows 10 ### Description: If cookiecutter.json has the following: ```json { "my_choice": ["a","b"] } ``` Then running cookiecutter gives the following prompt: ``` Select my_choice: 1 - a 2 - b Choose from 1, 2 (1, 2) [1]: ``` Note how the choices are repeated twice in the last line. This is because the [Click API](https://click.palletsprojects.com/en/7.x/api/) has been updated to 7.0 and automatically shows the choices to the user in parentheses. This is redundant. ### Solution The text passed to the `click.prompt` function should be changed to set `show_choices = False` or it should be changed to not show the choices and let the Click API do so instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import io 6 import sys 7 8 from setuptools import setup 9 10 version = "1.6.0" 11 12 if sys.argv[-1] == 'publish': 13 os.system('python setup.py sdist upload') 14 os.system('python setup.py bdist_wheel upload') 15 sys.exit() 16 17 if sys.argv[-1] == 'tag': 18 os.system("git tag -a %s -m 'version %s'" % (version, version)) 19 os.system("git push --tags") 20 sys.exit() 21 22 with io.open('README.rst', 'r', encoding='utf-8') as readme_file: 23 readme = readme_file.read() 24 25 requirements = [ 26 'future>=0.15.2', 27 'binaryornot>=0.2.0', 28 'jinja2>=2.7', 29 'click>=5.0', 30 'whichcraft>=0.4.0', 31 'poyo>=0.1.0', 32 'jinja2-time>=0.1.0', 33 'requests>=2.18.0', 34 ] 35 36 if sys.argv[-1] == 'readme': 37 print(readme) 38 sys.exit() 39 40 41 setup( 42 name='cookiecutter', 43 version=version, 44 description=('A command-line utility that creates projects from project ' 45 'templates, e.g. creating a Python package project from a ' 46 'Python package project template.'), 47 long_description=readme, 48 author='Audrey Roy', 49 author_email='[email protected]', 50 url='https://github.com/cookiecutter/cookiecutter', 51 packages=[ 52 'cookiecutter', 53 ], 54 package_dir={'cookiecutter': 'cookiecutter'}, 55 entry_points={ 56 'console_scripts': [ 57 'cookiecutter = cookiecutter.__main__:main', 58 ] 59 }, 60 include_package_data=True, 61 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*', 62 install_requires=requirements, 63 license='BSD', 64 zip_safe=False, 65 classifiers=[ 66 'Development Status :: 5 - Production/Stable', 67 'Environment :: Console', 68 'Intended Audience :: Developers', 69 'Natural Language :: English', 70 'License :: OSI Approved :: BSD License', 71 'Programming Language :: Python', 72 'Programming Language :: Python :: 2', 73 'Programming Language :: Python :: 2.7', 74 'Programming Language :: Python :: 3', 75 'Programming Language :: Python :: 3.5', 76 'Programming Language :: Python :: 3.6', 77 'Programming Language :: Python :: 3.7', 78 'Programming Language :: Python :: Implementation :: CPython', 79 'Programming Language :: Python :: Implementation :: PyPy', 80 'Topic :: Software Development', 81 ], 82 keywords=( 83 'cookiecutter, Python, projects, project templates, Jinja2, ' 84 'skeleton, scaffolding, project directory, setup.py, package, ' 85 'packaging' 86 ), 87 ) 88 ``` Path: `cookiecutter/prompt.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """ 4 cookiecutter.prompt 5 --------------------- 6 7 Functions for prompting the user for project info. 8 """ 9 10 from collections import OrderedDict 11 import json 12 13 import click 14 from past.builtins import basestring 15 16 from future.utils import iteritems 17 18 from jinja2.exceptions import UndefinedError 19 20 from .exceptions import UndefinedVariableInTemplate 21 from .environment import StrictEnvironment 22 23 24 def read_user_variable(var_name, default_value): 25 """Prompt the user for the given variable and return the entered value 26 or the given default. 27 28 :param str var_name: Variable of the context to query the user 29 :param default_value: Value that will be returned if no input happens 30 """ 31 # Please see http://click.pocoo.org/4/api/#click.prompt 32 return click.prompt(var_name, default=default_value) 33 34 35 def read_user_yes_no(question, default_value): 36 """Prompt the user to reply with 'yes' or 'no' (or equivalent values). 37 38 Note: 39 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' 40 41 :param str question: Question to the user 42 :param default_value: Value that will be returned if no input happens 43 """ 44 # Please see http://click.pocoo.org/4/api/#click.prompt 45 return click.prompt( 46 question, 47 default=default_value, 48 type=click.BOOL 49 ) 50 51 52 def read_repo_password(question): 53 """Prompt the user to enter a password 54 55 :param str question: Question to the user 56 """ 57 # Please see http://click.pocoo.org/4/api/#click.prompt 58 return click.prompt(question, hide_input=True) 59 60 61 def read_user_choice(var_name, options): 62 """Prompt the user to choose from several options for the given variable. 63 64 The first item will be returned if no input happens. 65 66 :param str var_name: Variable as specified in the context 67 :param list options: Sequence of options that are available to select from 68 :return: Exactly one item of ``options`` that has been chosen by the user 69 """ 70 # Please see http://click.pocoo.org/4/api/#click.prompt 71 if not isinstance(options, list): 72 raise TypeError 73 74 if not options: 75 raise ValueError 76 77 choice_map = OrderedDict( 78 (u'{}'.format(i), value) for i, value in enumerate(options, 1) 79 ) 80 choices = choice_map.keys() 81 default = u'1' 82 83 choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()] 84 prompt = u'\n'.join(( 85 u'Select {}:'.format(var_name), 86 u'\n'.join(choice_lines), 87 u'Choose from {}'.format(u', '.join(choices)) 88 )) 89 90 user_choice = click.prompt( 91 prompt, type=click.Choice(choices), default=default 92 ) 93 return choice_map[user_choice] 94 95 96 def process_json(user_value): 97 try: 98 user_dict = json.loads( 99 user_value, 100 object_pairs_hook=OrderedDict, 101 ) 102 except Exception: 103 # Leave it up to click to ask the user again 104 raise click.UsageError('Unable to decode to JSON.') 105 106 if not isinstance(user_dict, dict): 107 # Leave it up to click to ask the user again 108 raise click.UsageError('Requires JSON dict.') 109 110 return user_dict 111 112 113 def read_user_dict(var_name, default_value): 114 """Prompt the user to provide a dictionary of data. 115 116 :param str var_name: Variable as specified in the context 117 :param default_value: Value that will be returned if no input is provided 118 :return: A Python dictionary to use in the context. 119 """ 120 # Please see http://click.pocoo.org/4/api/#click.prompt 121 if not isinstance(default_value, dict): 122 raise TypeError 123 124 default_display = 'default' 125 126 user_value = click.prompt( 127 var_name, 128 default=default_display, 129 type=click.STRING, 130 value_proc=process_json, 131 ) 132 133 if user_value == default_display: 134 # Return the given default w/o any processing 135 return default_value 136 return user_value 137 138 139 def render_variable(env, raw, cookiecutter_dict): 140 """Inside the prompting taken from the cookiecutter.json file, this renders 141 the next variable. For example, if a project_name is "Peanut Butter 142 Cookie", the repo_name could be be rendered with: 143 144 `{{ cookiecutter.project_name.replace(" ", "_") }}`. 145 146 This is then presented to the user as the default. 147 148 :param Environment env: A Jinja2 Environment object. 149 :param str raw: The next value to be prompted for by the user. 150 :param dict cookiecutter_dict: The current context as it's gradually 151 being populated with variables. 152 :return: The rendered value for the default variable. 153 """ 154 if raw is None: 155 return None 156 elif isinstance(raw, dict): 157 return { 158 render_variable(env, k, cookiecutter_dict): 159 render_variable(env, v, cookiecutter_dict) 160 for k, v in raw.items() 161 } 162 elif isinstance(raw, list): 163 return [ 164 render_variable(env, v, cookiecutter_dict) 165 for v in raw 166 ] 167 elif not isinstance(raw, basestring): 168 raw = str(raw) 169 170 template = env.from_string(raw) 171 172 rendered_template = template.render(cookiecutter=cookiecutter_dict) 173 return rendered_template 174 175 176 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input): 177 """Prompt the user which option to choose from the given. Each of the 178 possible choices is rendered beforehand. 179 """ 180 rendered_options = [ 181 render_variable(env, raw, cookiecutter_dict) for raw in options 182 ] 183 184 if no_input: 185 return rendered_options[0] 186 return read_user_choice(key, rendered_options) 187 188 189 def prompt_for_config(context, no_input=False): 190 """ 191 Prompts the user to enter new config, using context as a source for the 192 field names and sample values. 193 194 :param no_input: Prompt the user at command line for manual configuration? 195 """ 196 cookiecutter_dict = OrderedDict([]) 197 env = StrictEnvironment(context=context) 198 199 # First pass: Handle simple and raw variables, plus choices. 200 # These must be done first because the dictionaries keys and 201 # values might refer to them. 202 for key, raw in iteritems(context[u'cookiecutter']): 203 if key.startswith(u'_'): 204 cookiecutter_dict[key] = raw 205 continue 206 207 try: 208 if isinstance(raw, list): 209 # We are dealing with a choice variable 210 val = prompt_choice_for_config( 211 cookiecutter_dict, env, key, raw, no_input 212 ) 213 cookiecutter_dict[key] = val 214 elif not isinstance(raw, dict): 215 # We are dealing with a regular variable 216 val = render_variable(env, raw, cookiecutter_dict) 217 218 if not no_input: 219 val = read_user_variable(key, val) 220 221 cookiecutter_dict[key] = val 222 except UndefinedError as err: 223 msg = "Unable to render variable '{}'".format(key) 224 raise UndefinedVariableInTemplate(msg, err, context) 225 226 # Second pass; handle the dictionaries. 227 for key, raw in iteritems(context[u'cookiecutter']): 228 229 try: 230 if isinstance(raw, dict): 231 # We are dealing with a dict variable 232 val = render_variable(env, raw, cookiecutter_dict) 233 234 if not no_input: 235 val = read_user_dict(key, val) 236 237 cookiecutter_dict[key] = val 238 except UndefinedError as err: 239 msg = "Unable to render variable '{}'".format(key) 240 raise UndefinedVariableInTemplate(msg, err, context) 241 242 return cookiecutter_dict 243 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py --- a/cookiecutter/prompt.py +++ b/cookiecutter/prompt.py @@ -88,7 +88,7 @@ )) user_choice = click.prompt( - prompt, type=click.Choice(choices), default=default + prompt, type=click.Choice(choices), default=default, show_choices=False ) return choice_map[user_choice] diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ 'future>=0.15.2', 'binaryornot>=0.2.0', 'jinja2>=2.7', - 'click>=5.0', + 'click>=7.0', 'whichcraft>=0.4.0', 'poyo>=0.1.0', 'jinja2-time>=0.1.0',
{"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -88,7 +88,7 @@\n ))\n \n user_choice = click.prompt(\n- prompt, type=click.Choice(choices), default=default\n+ prompt, type=click.Choice(choices), default=default, show_choices=False\n )\n return choice_map[user_choice]\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n- 'click>=5.0',\n+ 'click>=7.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n", "issue": "Choice options are redundant using cookiecutter with latest click package\n* Cookiecutter version: 1.6.0 -- installed via `pip install`\r\n* Template project url: n/a\r\n* Python version: 3.7\r\n* Operating System: Windows 10\r\n\r\n### Description:\r\n\r\nIf cookiecutter.json has the following:\r\n```json\r\n{\r\n \"my_choice\": [\"a\",\"b\"]\r\n}\r\n```\r\nThen running cookiecutter gives the following prompt:\r\n```\r\nSelect my_choice:\r\n1 - a\r\n2 - b\r\nChoose from 1, 2 (1, 2) [1]:\r\n```\r\n\r\nNote how the choices are repeated twice in the last line. This is because the [Click API](https://click.palletsprojects.com/en/7.x/api/) has been updated to 7.0 and automatically shows the choices to the user in parentheses. This is redundant. \r\n\r\n### Solution\r\nThe text passed to the `click.prompt` function should be changed to set `show_choices = False` or it should be changed to not show the choices and let the Click API do so instead.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.6.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'requests>=2.18.0',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\nimport json\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom .exceptions import UndefinedVariableInTemplate\nfrom .environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_repo_password(question):\n \"\"\"Prompt the user to enter a password\n\n :param str question: Question to the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(question, hide_input=True)\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef process_json(user_value):\n try:\n user_dict = json.loads(\n user_value,\n object_pairs_hook=OrderedDict,\n )\n except Exception:\n # Leave it up to click to ask the user again\n raise click.UsageError('Unable to decode to JSON.')\n\n if not isinstance(user_dict, dict):\n # Leave it up to click to ask the user again\n raise click.UsageError('Requires JSON dict.')\n\n return user_dict\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n default_display = 'default'\n\n user_value = click.prompt(\n var_name,\n default=default_display,\n type=click.STRING,\n value_proc=process_json,\n )\n\n if user_value == default_display:\n # Return the given default w/o any processing\n return default_value\n return user_value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n \"\"\"Inside the prompting taken from the cookiecutter.json file, this renders\n the next variable. For example, if a project_name is \"Peanut Butter\n Cookie\", the repo_name could be be rendered with:\n\n `{{ cookiecutter.project_name.replace(\" \", \"_\") }}`.\n\n This is then presented to the user as the default.\n\n :param Environment env: A Jinja2 Environment object.\n :param str raw: The next value to be prompted for by the user.\n :param dict cookiecutter_dict: The current context as it's gradually\n being populated with variables.\n :return: The rendered value for the default variable.\n \"\"\"\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict):\n render_variable(env, v, cookiecutter_dict)\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [\n render_variable(env, v, cookiecutter_dict)\n for v in raw\n ]\n elif not isinstance(raw, basestring):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = OrderedDict([])\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in iteritems(context[u'cookiecutter']):\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.6.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=7.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'requests>=2.18.0',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\nimport json\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom .exceptions import UndefinedVariableInTemplate\nfrom .environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_repo_password(question):\n \"\"\"Prompt the user to enter a password\n\n :param str question: Question to the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(question, hide_input=True)\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default, show_choices=False\n )\n return choice_map[user_choice]\n\n\ndef process_json(user_value):\n try:\n user_dict = json.loads(\n user_value,\n object_pairs_hook=OrderedDict,\n )\n except Exception:\n # Leave it up to click to ask the user again\n raise click.UsageError('Unable to decode to JSON.')\n\n if not isinstance(user_dict, dict):\n # Leave it up to click to ask the user again\n raise click.UsageError('Requires JSON dict.')\n\n return user_dict\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n default_display = 'default'\n\n user_value = click.prompt(\n var_name,\n default=default_display,\n type=click.STRING,\n value_proc=process_json,\n )\n\n if user_value == default_display:\n # Return the given default w/o any processing\n return default_value\n return user_value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n \"\"\"Inside the prompting taken from the cookiecutter.json file, this renders\n the next variable. For example, if a project_name is \"Peanut Butter\n Cookie\", the repo_name could be be rendered with:\n\n `{{ cookiecutter.project_name.replace(\" \", \"_\") }}`.\n\n This is then presented to the user as the default.\n\n :param Environment env: A Jinja2 Environment object.\n :param str raw: The next value to be prompted for by the user.\n :param dict cookiecutter_dict: The current context as it's gradually\n being populated with variables.\n :return: The rendered value for the default variable.\n \"\"\"\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict):\n render_variable(env, v, cookiecutter_dict)\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [\n render_variable(env, v, cookiecutter_dict)\n for v in raw\n ]\n elif not isinstance(raw, basestring):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = OrderedDict([])\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in iteritems(context[u'cookiecutter']):\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]}
3,683
222
gh_patches_debug_3837
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4338
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests [BUG]: wrong in KL_approx , the order of two distribution is handled incorrectly compute_approx_kl for `NaiveExperienceMaker` maybe incorrect. As motion in [Approximating KL Divergence](http://joschu.net/blog/kl-approx.html) $$ KL[q,p] = \mathbb{E}_{x\sim q}[\log\frac{q(x)}{p(x)}] $$ let $$ r = \frac{p(x)}{q(x)} $$ note that, x is sample from distribution q. Then $$ KL_{approx}[q,p] = \mathbb{E}_{x\sim q}[-\log(r) + (r-1) ] $$ --- In paper [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155), object for actor , (e.i. reward of experience , ignore loss_ptx) <img width="756" alt="image" src="https://user-images.githubusercontent.com/22851737/236610916-2f068c34-1508-438f-bd16-6fe6ed491e8c.png"> <img width="795" alt="image" src="https://user-images.githubusercontent.com/22851737/236611424-17600f6e-7aca-4bdf-95bd-2ce4035bcd3a.png"> So for computing KL, samples are sampled from actor model e.i $\pi^{RL}_\phi$, instead of $\pi^{SFT}$ KL in the object should be $KL[\pi^{RL}, \pi^{SFT}] =KL[q,p]$ , and $r$ of KL_approx should be $\frac{\pi^{SFT}(x)}{\pi^{RL}_\phi(x)}$ --- While on the `coati.models.utils.compute_approx_kl` ``` python log_ratio = log_probs - log_probs_base ``` and log_probs and log_probs_base correspond to actor_model and sft_model respectively. This should be modify to ```python log_ratio = log_probs_base - log_probs ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `applications/Chat/coati/models/utils.py` Content: ``` 1 from typing import Optional, Union 2 3 import loralib as lora 4 import torch 5 import torch.nn as nn 6 import torch.nn.functional as F 7 8 9 def compute_approx_kl(log_probs: torch.Tensor, 10 log_probs_base: torch.Tensor, 11 action_mask: Optional[torch.Tensor] = None) -> torch.Tensor: 12 """ 13 Compute the approximate KL divergence between two distributions. 14 Schulman blog: http://joschu.net/blog/kl-approx.html 15 16 Args: 17 log_probs: Log probabilities of the new distribution. 18 log_probs_base: Log probabilities of the base distribution. 19 action_mask: Mask for actions. 20 """ 21 22 log_ratio = log_probs - log_probs_base 23 approx_kl = (log_ratio.exp() - 1) - log_ratio 24 if action_mask is not None: 25 approx_kl = masked_mean(approx_kl, action_mask, dim=1) 26 return approx_kl 27 approx_kl = approx_kl.mean(dim=1) 28 return approx_kl 29 30 31 def compute_reward(r: Union[torch.Tensor, float], 32 kl_coef: float, 33 log_probs: torch.Tensor, 34 log_probs_base: torch.Tensor, 35 action_mask: Optional[torch.Tensor] = None) -> torch.Tensor: 36 if kl_coef <= 0.0: 37 return r 38 kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask) 39 reward = r - kl_coef * kl 40 return reward 41 42 43 def log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: 44 log_probs = F.log_softmax(logits, dim=-1) 45 log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1)) 46 return log_probs_labels.squeeze(-1) 47 48 49 def calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor: 50 """Calculate action log probs. 51 52 Args: 53 output (torch.Tensor): Output tensor of Actor.forward. 54 sequences (torch.LongTensor): Input sequences. 55 num_actions (int): Number of actions. 56 57 Returns: 58 torch.Tensor: Action log probs. 59 """ 60 logits = output['logits'] 61 log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:]) 62 return log_probs[:, -num_actions:] 63 64 65 def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor: 66 tensor = tensor * mask 67 tensor = tensor.sum(dim=dim) 68 mask_sum = mask.sum(dim=dim) 69 mean = tensor / (mask_sum + 1e-8) 70 return mean 71 72 73 def masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor: 74 tensor = tensor * mask 75 mean = masked_mean(tensor, mask, dim=dim) 76 mean_centered = tensor - mean 77 var = masked_mean(mean_centered**2, mask, dim=dim) 78 return mean_centered * var.clamp(min=eps).rsqrt() 79 80 81 def normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor: 82 mean = tensor.mean(dim) 83 mean_centered = tensor - mean 84 var = (mean_centered**2).mean(dim) 85 norm = mean_centered * var.clamp(min=eps).rsqrt() 86 return norm 87 88 89 def convert_to_lora(model: nn.Module, 90 input_size: int, 91 output_size: int, 92 lora_rank: int = 16, 93 lora_alpha: int = 1, 94 lora_dropout: float = 0., 95 fan_in_fan_out: bool = False, 96 merge_weights: bool = True): 97 if lora_rank > min(input_size, output_size): 98 raise ValueError(f"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}") 99 100 for name, module in model.named_modules(): 101 if isinstance(module, nn.Linear): 102 module._modules[name] = lora.Linear(input_size, 103 output_size, 104 r=lora_rank, 105 lora_alpha=lora_alpha, 106 lora_dropout=lora_dropout, 107 fan_in_fan_out=fan_in_fan_out, 108 merge_weights=merge_weights) 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/applications/Chat/coati/models/utils.py b/applications/Chat/coati/models/utils.py --- a/applications/Chat/coati/models/utils.py +++ b/applications/Chat/coati/models/utils.py @@ -19,7 +19,7 @@ action_mask: Mask for actions. """ - log_ratio = log_probs - log_probs_base + log_ratio = log_probs_base - log_probs approx_kl = (log_ratio.exp() - 1) - log_ratio if action_mask is not None: approx_kl = masked_mean(approx_kl, action_mask, dim=1)
{"golden_diff": "diff --git a/applications/Chat/coati/models/utils.py b/applications/Chat/coati/models/utils.py\n--- a/applications/Chat/coati/models/utils.py\n+++ b/applications/Chat/coati/models/utils.py\n@@ -19,7 +19,7 @@\n action_mask: Mask for actions.\n \"\"\"\n \n- log_ratio = log_probs - log_probs_base\n+ log_ratio = log_probs_base - log_probs\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: wrong in KL_approx , the order of two distribution is handled incorrectly\ncompute_approx_kl for `NaiveExperienceMaker` maybe incorrect. \r\n\r\nAs motion in [Approximating KL Divergence](http://joschu.net/blog/kl-approx.html)\r\n\r\n \r\n$$ KL[q,p] = \\mathbb{E}_{x\\sim q}[\\log\\frac{q(x)}{p(x)}] $$\r\n\r\nlet \r\n\r\n$$ r = \\frac{p(x)}{q(x)} $$\r\n\r\nnote that, x is sample from distribution q. \r\n\r\nThen \r\n\r\n$$ KL_{approx}[q,p] = \\mathbb{E}_{x\\sim q}[-\\log(r) + (r-1) ] $$\r\n\r\n---\r\n \r\nIn paper [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155), object for actor , (e.i. reward of experience , ignore loss_ptx) \r\n\r\n<img width=\"756\" alt=\"image\" src=\"https://user-images.githubusercontent.com/22851737/236610916-2f068c34-1508-438f-bd16-6fe6ed491e8c.png\">\r\n\r\n\r\n<img width=\"795\" alt=\"image\" src=\"https://user-images.githubusercontent.com/22851737/236611424-17600f6e-7aca-4bdf-95bd-2ce4035bcd3a.png\">\r\n\r\nSo for computing KL, samples are sampled from actor model e.i $\\pi^{RL}_\\phi$, instead of $\\pi^{SFT}$\r\n\r\n KL in the object should be $KL[\\pi^{RL}, \\pi^{SFT}] =KL[q,p]$ , and $r$ of KL_approx should be $\\frac{\\pi^{SFT}(x)}{\\pi^{RL}_\\phi(x)}$\r\n\r\n--- \r\n\r\nWhile on the `coati.models.utils.compute_approx_kl`\r\n\r\n``` python \r\n log_ratio = log_probs - log_probs_base\r\n```\r\n\r\nand log_probs and log_probs_base correspond to actor_model and sft_model respectively.\r\nThis should be modify to \r\n\r\n```python \r\n log_ratio = log_probs_base - log_probs \r\n```\r\n\r\n\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport loralib as lora\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef compute_approx_kl(log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Compute the approximate KL divergence between two distributions.\n Schulman blog: http://joschu.net/blog/kl-approx.html\n\n Args:\n log_probs: Log probabilities of the new distribution.\n log_probs_base: Log probabilities of the base distribution.\n action_mask: Mask for actions.\n \"\"\"\n\n log_ratio = log_probs - log_probs_base\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n return approx_kl\n approx_kl = approx_kl.mean(dim=1)\n return approx_kl\n\n\ndef compute_reward(r: Union[torch.Tensor, float],\n kl_coef: float,\n log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n if kl_coef <= 0.0:\n return r\n kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)\n reward = r - kl_coef * kl\n return reward\n\n\ndef log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n log_probs = F.log_softmax(logits, dim=-1)\n log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))\n return log_probs_labels.squeeze(-1)\n\n\ndef calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:\n \"\"\"Calculate action log probs.\n\n Args:\n output (torch.Tensor): Output tensor of Actor.forward.\n sequences (torch.LongTensor): Input sequences.\n num_actions (int): Number of actions.\n\n Returns:\n torch.Tensor: Action log probs.\n \"\"\"\n logits = output['logits']\n log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])\n return log_probs[:, -num_actions:]\n\n\ndef masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:\n tensor = tensor * mask\n tensor = tensor.sum(dim=dim)\n mask_sum = mask.sum(dim=dim)\n mean = tensor / (mask_sum + 1e-8)\n return mean\n\n\ndef masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n return mean_centered * var.clamp(min=eps).rsqrt()\n\n\ndef normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor:\n mean = tensor.mean(dim)\n mean_centered = tensor - mean\n var = (mean_centered**2).mean(dim)\n norm = mean_centered * var.clamp(min=eps).rsqrt()\n return norm\n\n\ndef convert_to_lora(model: nn.Module,\n input_size: int,\n output_size: int,\n lora_rank: int = 16,\n lora_alpha: int = 1,\n lora_dropout: float = 0.,\n fan_in_fan_out: bool = False,\n merge_weights: bool = True):\n if lora_rank > min(input_size, output_size):\n raise ValueError(f\"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}\")\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._modules[name] = lora.Linear(input_size,\n output_size,\n r=lora_rank,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n fan_in_fan_out=fan_in_fan_out,\n merge_weights=merge_weights)\n", "path": "applications/Chat/coati/models/utils.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nimport loralib as lora\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef compute_approx_kl(log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Compute the approximate KL divergence between two distributions.\n Schulman blog: http://joschu.net/blog/kl-approx.html\n\n Args:\n log_probs: Log probabilities of the new distribution.\n log_probs_base: Log probabilities of the base distribution.\n action_mask: Mask for actions.\n \"\"\"\n\n log_ratio = log_probs_base - log_probs\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n return approx_kl\n approx_kl = approx_kl.mean(dim=1)\n return approx_kl\n\n\ndef compute_reward(r: Union[torch.Tensor, float],\n kl_coef: float,\n log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n if kl_coef <= 0.0:\n return r\n kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)\n reward = r - kl_coef * kl\n return reward\n\n\ndef log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n log_probs = F.log_softmax(logits, dim=-1)\n log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))\n return log_probs_labels.squeeze(-1)\n\n\ndef calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:\n \"\"\"Calculate action log probs.\n\n Args:\n output (torch.Tensor): Output tensor of Actor.forward.\n sequences (torch.LongTensor): Input sequences.\n num_actions (int): Number of actions.\n\n Returns:\n torch.Tensor: Action log probs.\n \"\"\"\n logits = output['logits']\n log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])\n return log_probs[:, -num_actions:]\n\n\ndef masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:\n tensor = tensor * mask\n tensor = tensor.sum(dim=dim)\n mask_sum = mask.sum(dim=dim)\n mean = tensor / (mask_sum + 1e-8)\n return mean\n\n\ndef masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n return mean_centered * var.clamp(min=eps).rsqrt()\n\n\ndef normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor:\n mean = tensor.mean(dim)\n mean_centered = tensor - mean\n var = (mean_centered**2).mean(dim)\n norm = mean_centered * var.clamp(min=eps).rsqrt()\n return norm\n\n\ndef convert_to_lora(model: nn.Module,\n input_size: int,\n output_size: int,\n lora_rank: int = 16,\n lora_alpha: int = 1,\n lora_dropout: float = 0.,\n fan_in_fan_out: bool = False,\n merge_weights: bool = True):\n if lora_rank > min(input_size, output_size):\n raise ValueError(f\"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}\")\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._modules[name] = lora.Linear(input_size,\n output_size,\n r=lora_rank,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n fan_in_fan_out=fan_in_fan_out,\n merge_weights=merge_weights)\n", "path": "applications/Chat/coati/models/utils.py"}]}
1,954
140
gh_patches_debug_27561
rasdani/github-patches
git_diff
huggingface__transformers-11746
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384) the command to reproduce: cd huggingface-transformers/examples/pytorch/question-answering python -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \ --model_name_or_path roberta-large \ --dataset_name squad \ --do_train --do_eval \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 256 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir test_result2/$trials --overwrite_output_dir \ --logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \ --fp16 i tried add "--max_eval_samples 10240", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266) the full error is "File "./transformers/src/transformers/trainer_pt_utils.py", line 410, in _nested_set_tensors i * slice_len : (i + 1) * slice_len i * slice_len : (i + 1) * slice_len ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/pytorch/question-answering/trainer_qa.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2020 The HuggingFace Team All rights reserved. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """ 16 A subclass of `Trainer` specific to Question-Answering tasks 17 """ 18 19 from transformers import Trainer, is_torch_tpu_available 20 from transformers.trainer_utils import PredictionOutput 21 22 23 if is_torch_tpu_available(): 24 import torch_xla.core.xla_model as xm 25 import torch_xla.debug.metrics as met 26 27 28 class QuestionAnsweringTrainer(Trainer): 29 def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): 30 super().__init__(*args, **kwargs) 31 self.eval_examples = eval_examples 32 self.post_process_function = post_process_function 33 34 def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None): 35 eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset 36 eval_dataloader = self.get_eval_dataloader(eval_dataset) 37 eval_examples = self.eval_examples if eval_examples is None else eval_examples 38 39 # Temporarily disable metric computation, we will do it in the loop here. 40 compute_metrics = self.compute_metrics 41 self.compute_metrics = None 42 try: 43 output = self.prediction_loop( 44 eval_dataloader, 45 description="Evaluation", 46 # No point gathering the predictions if there are no metrics, otherwise we defer to 47 # self.args.prediction_loss_only 48 prediction_loss_only=True if compute_metrics is None else None, 49 ignore_keys=ignore_keys, 50 ) 51 finally: 52 self.compute_metrics = compute_metrics 53 54 if self.post_process_function is not None and self.compute_metrics is not None: 55 eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) 56 metrics = self.compute_metrics(eval_preds) 57 58 self.log(metrics) 59 else: 60 metrics = {} 61 62 if self.args.tpu_metrics_debug or self.args.debug: 63 # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) 64 xm.master_print(met.metrics_report()) 65 66 self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) 67 return metrics 68 69 def predict(self, predict_dataset, predict_examples, ignore_keys=None): 70 predict_dataloader = self.get_test_dataloader(predict_dataset) 71 72 # Temporarily disable metric computation, we will do it in the loop here. 73 compute_metrics = self.compute_metrics 74 self.compute_metrics = None 75 try: 76 output = self.prediction_loop( 77 predict_dataloader, 78 description="Prediction", 79 # No point gathering the predictions if there are no metrics, otherwise we defer to 80 # self.args.prediction_loss_only 81 prediction_loss_only=True if compute_metrics is None else None, 82 ignore_keys=ignore_keys, 83 ) 84 finally: 85 self.compute_metrics = compute_metrics 86 87 if self.post_process_function is None or self.compute_metrics is None: 88 return output 89 90 predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict") 91 metrics = self.compute_metrics(predictions) 92 93 return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py --- a/examples/pytorch/question-answering/trainer_qa.py +++ b/examples/pytorch/question-answering/trainer_qa.py @@ -39,8 +39,9 @@ # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: - output = self.prediction_loop( + output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to @@ -72,8 +73,9 @@ # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: - output = self.prediction_loop( + output = eval_loop( predict_dataloader, description="Prediction", # No point gathering the predictions if there are no metrics, otherwise we defer to
{"golden_diff": "diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py\n--- a/examples/pytorch/question-answering/trainer_qa.py\n+++ b/examples/pytorch/question-answering/trainer_qa.py\n@@ -39,8 +39,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n@@ -72,8 +73,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n", "issue": "ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\nthe command to reproduce:\r\ncd huggingface-transformers/examples/pytorch/question-answering\r\npython -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \\\r\n\t\t --model_name_or_path roberta-large \\\r\n\t\t --dataset_name squad \\\r\n\t\t --do_train --do_eval \\\r\n\t\t --per_device_train_batch_size 16 \\\r\n\t\t --per_device_eval_batch_size 256 \\\r\n\t\t --learning_rate 3e-5 \\\r\n\t\t --num_train_epochs 2 \\\r\n\t\t --max_seq_length 384 \\\r\n\t\t --doc_stride 128 \\\r\n\t\t --output_dir test_result2/$trials --overwrite_output_dir \\\r\n\t\t --logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \\\r\n --fp16\r\n\r\n\r\n\r\ni tried add \"--max_eval_samples 10240\", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266)\r\n\r\n\r\nthe full error is \"File \"./transformers/src/transformers/trainer_pt_utils.py\", line 410, in _nested_set_tensors\r\n i * slice_len : (i + 1) * slice_len\r\n i * slice_len : (i + 1) * slice_len\r\nValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\"\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA subclass of `Trainer` specific to Question-Answering tasks\n\"\"\"\n\nfrom transformers import Trainer, is_torch_tpu_available\nfrom transformers.trainer_utils import PredictionOutput\n\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringTrainer(Trainer):\n def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)\n metrics = self.compute_metrics(eval_preds)\n\n self.log(metrics)\n else:\n metrics = {}\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n def predict(self, predict_dataset, predict_examples, ignore_keys=None):\n predict_dataloader = self.get_test_dataloader(predict_dataset)\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, \"predict\")\n metrics = self.compute_metrics(predictions)\n\n return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)\n", "path": "examples/pytorch/question-answering/trainer_qa.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA subclass of `Trainer` specific to Question-Answering tasks\n\"\"\"\n\nfrom transformers import Trainer, is_torch_tpu_available\nfrom transformers.trainer_utils import PredictionOutput\n\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringTrainer(Trainer):\n def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)\n metrics = self.compute_metrics(eval_preds)\n\n self.log(metrics)\n else:\n metrics = {}\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n def predict(self, predict_dataset, predict_examples, ignore_keys=None):\n predict_dataloader = self.get_test_dataloader(predict_dataset)\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, \"predict\")\n metrics = self.compute_metrics(predictions)\n\n return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)\n", "path": "examples/pytorch/question-answering/trainer_qa.py"}]}
1,635
290
gh_patches_debug_16666
rasdani/github-patches
git_diff
SeldonIO__MLServer-613
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compress requests / responses Leverage `gzip` to compress requests / responses. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlserver/rest/app.py` Content: ``` 1 from typing import Callable 2 from fastapi import FastAPI 3 from fastapi.responses import Response as FastAPIResponse 4 from fastapi.routing import APIRoute as FastAPIRoute 5 from fastapi.middleware.cors import CORSMiddleware 6 from starlette_exporter import PrometheusMiddleware 7 8 from .endpoints import Endpoints, ModelRepositoryEndpoints 9 from .requests import Request 10 from .responses import Response 11 from .errors import _EXCEPTION_HANDLERS 12 13 from ..settings import Settings 14 from ..handlers import DataPlane, ModelRepositoryHandlers 15 16 17 class APIRoute(FastAPIRoute): 18 """ 19 Custom route to use our own Request handler. 20 """ 21 22 def get_route_handler(self) -> Callable: 23 original_route_handler = super().get_route_handler() 24 25 async def custom_route_handler(request: Request) -> FastAPIResponse: 26 request = Request(request.scope, request.receive) 27 return await original_route_handler(request) 28 29 return custom_route_handler 30 31 32 def create_app( 33 settings: Settings, 34 data_plane: DataPlane, 35 model_repository_handlers: ModelRepositoryHandlers, 36 ) -> FastAPI: 37 endpoints = Endpoints(data_plane) 38 model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers) 39 40 routes = [ 41 # Model ready 42 APIRoute( 43 "/v2/models/{model_name}/ready", 44 endpoints.model_ready, 45 ), 46 APIRoute( 47 "/v2/models/{model_name}/versions/{model_version}/ready", 48 endpoints.model_ready, 49 ), 50 # Model infer 51 APIRoute( 52 "/v2/models/{model_name}/infer", 53 endpoints.infer, 54 methods=["POST"], 55 ), 56 APIRoute( 57 "/v2/models/{model_name}/versions/{model_version}/infer", 58 endpoints.infer, 59 methods=["POST"], 60 ), 61 # Model metadata 62 APIRoute( 63 "/v2/models/{model_name}", 64 endpoints.model_metadata, 65 ), 66 APIRoute( 67 "/v2/models/{model_name}/versions/{model_version}", 68 endpoints.model_metadata, 69 ), 70 # Liveness and readiness 71 APIRoute("/v2/health/live", endpoints.live), 72 APIRoute("/v2/health/ready", endpoints.ready), 73 # Server metadata 74 APIRoute( 75 "/v2", 76 endpoints.metadata, 77 ), 78 ] 79 80 routes += [ 81 # Model Repository API 82 APIRoute( 83 "/v2/repository/index", 84 model_repository_endpoints.index, 85 methods=["POST"], 86 ), 87 APIRoute( 88 "/v2/repository/models/{model_name}/load", 89 model_repository_endpoints.load, 90 methods=["POST"], 91 ), 92 APIRoute( 93 "/v2/repository/models/{model_name}/unload", 94 model_repository_endpoints.unload, 95 methods=["POST"], 96 ), 97 ] 98 99 app = FastAPI( 100 debug=settings.debug, 101 routes=routes, # type: ignore 102 default_response_class=Response, 103 exception_handlers=_EXCEPTION_HANDLERS, # type: ignore 104 ) 105 106 if settings.cors_settings is not None: 107 app.add_middleware( 108 CORSMiddleware, 109 allow_origins=settings.cors_settings.allow_origins, 110 allow_origin_regex=settings.cors_settings.allow_origin_regex, 111 allow_credentials=settings.cors_settings.allow_credentials, 112 allow_methods=settings.cors_settings.allow_methods, 113 allow_headers=settings.cors_settings.allow_headers, 114 max_age=settings.cors_settings.max_age, 115 ) 116 117 if settings.metrics_endpoint: 118 app.add_middleware( 119 PrometheusMiddleware, 120 app_name="mlserver", 121 prefix="rest_server", 122 # TODO: Should we also exclude model's health endpoints? 123 skip_paths=[ 124 settings.metrics_endpoint, 125 "/v2/health/live", 126 "/v2/health/ready", 127 ], 128 ) 129 130 return app 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py --- a/mlserver/rest/app.py +++ b/mlserver/rest/app.py @@ -3,6 +3,7 @@ from fastapi.responses import Response as FastAPIResponse from fastapi.routing import APIRoute as FastAPIRoute from fastapi.middleware.cors import CORSMiddleware +from fastapi.middleware.gzip import GZipMiddleware from starlette_exporter import PrometheusMiddleware from .endpoints import Endpoints, ModelRepositoryEndpoints @@ -103,6 +104,7 @@ exception_handlers=_EXCEPTION_HANDLERS, # type: ignore ) + app.add_middleware(GZipMiddleware) if settings.cors_settings is not None: app.add_middleware( CORSMiddleware,
{"golden_diff": "diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py\n--- a/mlserver/rest/app.py\n+++ b/mlserver/rest/app.py\n@@ -3,6 +3,7 @@\n from fastapi.responses import Response as FastAPIResponse\n from fastapi.routing import APIRoute as FastAPIRoute\n from fastapi.middleware.cors import CORSMiddleware\n+from fastapi.middleware.gzip import GZipMiddleware\n from starlette_exporter import PrometheusMiddleware\n \n from .endpoints import Endpoints, ModelRepositoryEndpoints\n@@ -103,6 +104,7 @@\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n \n+ app.add_middleware(GZipMiddleware)\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n", "issue": "Compress requests / responses\nLeverage `gzip` to compress requests / responses.\n", "before_files": [{"content": "from typing import Callable\nfrom fastapi import FastAPI\nfrom fastapi.responses import Response as FastAPIResponse\nfrom fastapi.routing import APIRoute as FastAPIRoute\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette_exporter import PrometheusMiddleware\n\nfrom .endpoints import Endpoints, ModelRepositoryEndpoints\nfrom .requests import Request\nfrom .responses import Response\nfrom .errors import _EXCEPTION_HANDLERS\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\n\n\nclass APIRoute(FastAPIRoute):\n \"\"\"\n Custom route to use our own Request handler.\n \"\"\"\n\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> FastAPIResponse:\n request = Request(request.scope, request.receive)\n return await original_route_handler(request)\n\n return custom_route_handler\n\n\ndef create_app(\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n) -> FastAPI:\n endpoints = Endpoints(data_plane)\n model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)\n\n routes = [\n # Model ready\n APIRoute(\n \"/v2/models/{model_name}/ready\",\n endpoints.model_ready,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/ready\",\n endpoints.model_ready,\n ),\n # Model infer\n APIRoute(\n \"/v2/models/{model_name}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n # Model metadata\n APIRoute(\n \"/v2/models/{model_name}\",\n endpoints.model_metadata,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}\",\n endpoints.model_metadata,\n ),\n # Liveness and readiness\n APIRoute(\"/v2/health/live\", endpoints.live),\n APIRoute(\"/v2/health/ready\", endpoints.ready),\n # Server metadata\n APIRoute(\n \"/v2\",\n endpoints.metadata,\n ),\n ]\n\n routes += [\n # Model Repository API\n APIRoute(\n \"/v2/repository/index\",\n model_repository_endpoints.index,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/load\",\n model_repository_endpoints.load,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/unload\",\n model_repository_endpoints.unload,\n methods=[\"POST\"],\n ),\n ]\n\n app = FastAPI(\n debug=settings.debug,\n routes=routes, # type: ignore\n default_response_class=Response,\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=settings.cors_settings.allow_origins,\n allow_origin_regex=settings.cors_settings.allow_origin_regex,\n allow_credentials=settings.cors_settings.allow_credentials,\n allow_methods=settings.cors_settings.allow_methods,\n allow_headers=settings.cors_settings.allow_headers,\n max_age=settings.cors_settings.max_age,\n )\n\n if settings.metrics_endpoint:\n app.add_middleware(\n PrometheusMiddleware,\n app_name=\"mlserver\",\n prefix=\"rest_server\",\n # TODO: Should we also exclude model's health endpoints?\n skip_paths=[\n settings.metrics_endpoint,\n \"/v2/health/live\",\n \"/v2/health/ready\",\n ],\n )\n\n return app\n", "path": "mlserver/rest/app.py"}], "after_files": [{"content": "from typing import Callable\nfrom fastapi import FastAPI\nfrom fastapi.responses import Response as FastAPIResponse\nfrom fastapi.routing import APIRoute as FastAPIRoute\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.middleware.gzip import GZipMiddleware\nfrom starlette_exporter import PrometheusMiddleware\n\nfrom .endpoints import Endpoints, ModelRepositoryEndpoints\nfrom .requests import Request\nfrom .responses import Response\nfrom .errors import _EXCEPTION_HANDLERS\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\n\n\nclass APIRoute(FastAPIRoute):\n \"\"\"\n Custom route to use our own Request handler.\n \"\"\"\n\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> FastAPIResponse:\n request = Request(request.scope, request.receive)\n return await original_route_handler(request)\n\n return custom_route_handler\n\n\ndef create_app(\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n) -> FastAPI:\n endpoints = Endpoints(data_plane)\n model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)\n\n routes = [\n # Model ready\n APIRoute(\n \"/v2/models/{model_name}/ready\",\n endpoints.model_ready,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/ready\",\n endpoints.model_ready,\n ),\n # Model infer\n APIRoute(\n \"/v2/models/{model_name}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n # Model metadata\n APIRoute(\n \"/v2/models/{model_name}\",\n endpoints.model_metadata,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}\",\n endpoints.model_metadata,\n ),\n # Liveness and readiness\n APIRoute(\"/v2/health/live\", endpoints.live),\n APIRoute(\"/v2/health/ready\", endpoints.ready),\n # Server metadata\n APIRoute(\n \"/v2\",\n endpoints.metadata,\n ),\n ]\n\n routes += [\n # Model Repository API\n APIRoute(\n \"/v2/repository/index\",\n model_repository_endpoints.index,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/load\",\n model_repository_endpoints.load,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/unload\",\n model_repository_endpoints.unload,\n methods=[\"POST\"],\n ),\n ]\n\n app = FastAPI(\n debug=settings.debug,\n routes=routes, # type: ignore\n default_response_class=Response,\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n\n app.add_middleware(GZipMiddleware)\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=settings.cors_settings.allow_origins,\n allow_origin_regex=settings.cors_settings.allow_origin_regex,\n allow_credentials=settings.cors_settings.allow_credentials,\n allow_methods=settings.cors_settings.allow_methods,\n allow_headers=settings.cors_settings.allow_headers,\n max_age=settings.cors_settings.max_age,\n )\n\n if settings.metrics_endpoint:\n app.add_middleware(\n PrometheusMiddleware,\n app_name=\"mlserver\",\n prefix=\"rest_server\",\n # TODO: Should we also exclude model's health endpoints?\n skip_paths=[\n settings.metrics_endpoint,\n \"/v2/health/live\",\n \"/v2/health/ready\",\n ],\n )\n\n return app\n", "path": "mlserver/rest/app.py"}]}
1,353
173
gh_patches_debug_58116
rasdani/github-patches
git_diff
mindee__doctr-929
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix encode_string function ### Bug description Currently there is no check if the single characters are also available in the given vocabulary. We need a check for this :) TODO's: - [ ] check that in the function and throw a meaningful exception - [ ] improve the corresponding test discussion: #926 ### Code snippet to reproduce the bug ```python from doctr.datasets.utils import encode_string from doctr.datasets import VOCABS x = encode_string(input_string='abcDÄÜ', vocab=VOCABS['english']) # Ä and Ü does not exist in vocab # raises ValueError: substring not found ``` ### Error traceback ``` Traceback (most recent call last): File "/home/felix/Desktop/doctr/test.py", line 7, in <module> x = encode_string(input_string='abcDÄÜ', vocab=VOCABS['english']) # Ä and Ü does not exist in vocab File "/home/felix/Desktop/doctr/doctr/datasets/utils.py", line 75, in encode_string return list(map(vocab.index, input_string)) # type: ignore[arg-type] ValueError: substring not found ``` ### Environment not need :) ### Deep Learning backend same --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doctr/datasets/utils.py` Content: ``` 1 # Copyright (C) 2021-2022, Mindee. 2 3 # This program is licensed under the Apache License version 2. 4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. 5 6 import string 7 import unicodedata 8 from collections.abc import Sequence 9 from functools import partial 10 from pathlib import Path 11 from typing import Any, Dict, List, Optional 12 from typing import Sequence as SequenceType 13 from typing import Tuple, TypeVar, Union 14 15 import numpy as np 16 from PIL import Image 17 18 from doctr.io.image import get_img_shape 19 from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extract_rcrops 20 21 from .vocabs import VOCABS 22 23 __all__ = ['translate', 'encode_string', 'decode_sequence', 'encode_sequences'] 24 25 ImageTensor = TypeVar('ImageTensor') 26 27 28 def translate( 29 input_string: str, 30 vocab_name: str, 31 unknown_char: str = '■', 32 ) -> str: 33 """Translate a string input in a given vocabulary 34 35 Args: 36 input_string: input string to translate 37 vocab_name: vocabulary to use (french, latin, ...) 38 unknown_char: unknown character for non-translatable characters 39 40 Returns: 41 A string translated in a given vocab""" 42 43 if VOCABS.get(vocab_name) is None: 44 raise KeyError("output vocabulary must be in vocabs dictionnary") 45 46 translated = '' 47 for char in input_string: 48 if char not in VOCABS[vocab_name]: 49 # we need to translate char into a vocab char 50 if char in string.whitespace: 51 # remove whitespaces 52 continue 53 # normalize character if it is not in vocab 54 char = unicodedata.normalize('NFD', char).encode('ascii', 'ignore').decode('ascii') 55 if char == '' or char not in VOCABS[vocab_name]: 56 # if normalization fails or char still not in vocab, return unknown character) 57 char = unknown_char 58 translated += char 59 return translated 60 61 62 def encode_string( 63 input_string: str, 64 vocab: str, 65 ) -> List[int]: 66 """Given a predefined mapping, encode the string to a sequence of numbers 67 68 Args: 69 input_string: string to encode 70 vocab: vocabulary (string), the encoding is given by the indexing of the character sequence 71 72 Returns: 73 A list encoding the input_string""" 74 75 return list(map(vocab.index, input_string)) # type: ignore[arg-type] 76 77 78 def decode_sequence( 79 input_seq: Union[np.array, SequenceType[int]], 80 mapping: str, 81 ) -> str: 82 """Given a predefined mapping, decode the sequence of numbers to a string 83 84 Args: 85 input_seq: array to decode 86 mapping: vocabulary (string), the encoding is given by the indexing of the character sequence 87 88 Returns: 89 A string, decoded from input_seq 90 """ 91 92 if not isinstance(input_seq, (Sequence, np.ndarray)): 93 raise TypeError("Invalid sequence type") 94 if isinstance(input_seq, np.ndarray) and (input_seq.dtype != np.int_ or input_seq.max() >= len(mapping)): 95 raise AssertionError("Input must be an array of int, with max less than mapping size") 96 97 return ''.join(map(mapping.__getitem__, input_seq)) 98 99 100 def encode_sequences( 101 sequences: List[str], 102 vocab: str, 103 target_size: Optional[int] = None, 104 eos: int = -1, 105 sos: Optional[int] = None, 106 pad: Optional[int] = None, 107 dynamic_seq_length: bool = False, 108 **kwargs: Any, 109 ) -> np.ndarray: 110 """Encode character sequences using a given vocab as mapping 111 112 Args: 113 sequences: the list of character sequences of size N 114 vocab: the ordered vocab to use for encoding 115 target_size: maximum length of the encoded data 116 eos: encoding of End Of String 117 sos: optional encoding of Start Of String 118 pad: optional encoding for padding. In case of padding, all sequences are followed by 1 EOS then PAD 119 dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size 120 121 Returns: 122 the padded encoded data as a tensor 123 """ 124 125 if 0 <= eos < len(vocab): 126 raise ValueError("argument 'eos' needs to be outside of vocab possible indices") 127 128 if not isinstance(target_size, int) or dynamic_seq_length: 129 # Maximum string length + EOS 130 max_length = max(len(w) for w in sequences) + 1 131 if isinstance(sos, int): 132 max_length += 1 133 if isinstance(pad, int): 134 max_length += 1 135 target_size = max_length if not isinstance(target_size, int) else min(max_length, target_size) 136 137 # Pad all sequences 138 if isinstance(pad, int): # pad with padding symbol 139 if 0 <= pad < len(vocab): 140 raise ValueError("argument 'pad' needs to be outside of vocab possible indices") 141 # In that case, add EOS at the end of the word before padding 142 default_symbol = pad 143 else: # pad with eos symbol 144 default_symbol = eos 145 encoded_data = np.full([len(sequences), target_size], default_symbol, dtype=np.int32) 146 147 # Encode the strings 148 for idx, seq in enumerate(map(partial(encode_string, vocab=vocab), sequences)): 149 if isinstance(pad, int): # add eos at the end of the sequence 150 seq.append(eos) 151 encoded_data[idx, :min(len(seq), target_size)] = seq[:min(len(seq), target_size)] 152 153 if isinstance(sos, int): # place sos symbol at the beginning of each sequence 154 if 0 <= sos < len(vocab): 155 raise ValueError("argument 'sos' needs to be outside of vocab possible indices") 156 encoded_data = np.roll(encoded_data, 1) 157 encoded_data[:, 0] = sos 158 159 return encoded_data 160 161 162 def convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]: 163 164 target['boxes'] = convert_to_relative_coords(target['boxes'], get_img_shape(img)) 165 return img, target 166 167 168 def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]: 169 """Crop a set of bounding boxes from an image 170 Args: 171 img_path: path to the image 172 geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4) 173 Returns: 174 a list of cropped images 175 """ 176 img = np.array(Image.open(img_path)) 177 # Polygon 178 if geoms.ndim == 3 and geoms.shape[1:] == (4, 2): 179 return extract_rcrops(img, geoms.astype(dtype=int)) 180 if geoms.ndim == 2 and geoms.shape[1] == 4: 181 return extract_crops(img, geoms.astype(dtype=int)) 182 raise ValueError("Invalid geometry format") 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doctr/datasets/utils.py b/doctr/datasets/utils.py --- a/doctr/datasets/utils.py +++ b/doctr/datasets/utils.py @@ -72,7 +72,10 @@ Returns: A list encoding the input_string""" - return list(map(vocab.index, input_string)) # type: ignore[arg-type] + try: + return list(map(vocab.index, input_string)) # type: ignore[arg-type] + except ValueError: + raise ValueError("some characters cannot be found in 'vocab'") def decode_sequence(
{"golden_diff": "diff --git a/doctr/datasets/utils.py b/doctr/datasets/utils.py\n--- a/doctr/datasets/utils.py\n+++ b/doctr/datasets/utils.py\n@@ -72,7 +72,10 @@\n Returns:\n A list encoding the input_string\"\"\"\n \n- return list(map(vocab.index, input_string)) # type: ignore[arg-type]\n+ try:\n+ return list(map(vocab.index, input_string)) # type: ignore[arg-type]\n+ except ValueError:\n+ raise ValueError(\"some characters cannot be found in 'vocab'\")\n \n \n def decode_sequence(\n", "issue": "Fix encode_string function\n### Bug description\n\nCurrently there is no check if the single characters are also available in the given vocabulary.\r\nWe need a check for this :) \r\n\r\nTODO's:\r\n\r\n- [ ] check that in the function and throw a meaningful exception\r\n- [ ] improve the corresponding test \r\n\r\ndiscussion:\r\n#926 \n\n### Code snippet to reproduce the bug\n\n```python\r\nfrom doctr.datasets.utils import encode_string\r\nfrom doctr.datasets import VOCABS\r\n\r\nx = encode_string(input_string='abcD\u00c4\u00dc', vocab=VOCABS['english']) # \u00c4 and \u00dc does not exist in vocab\r\n# raises ValueError: substring not found\r\n```\n\n### Error traceback\n\n```\r\nTraceback (most recent call last):\r\n File \"/home/felix/Desktop/doctr/test.py\", line 7, in <module>\r\n x = encode_string(input_string='abcD\u00c4\u00dc', vocab=VOCABS['english']) # \u00c4 and \u00dc does not exist in vocab\r\n File \"/home/felix/Desktop/doctr/doctr/datasets/utils.py\", line 75, in encode_string\r\n return list(map(vocab.index, input_string)) # type: ignore[arg-type]\r\nValueError: substring not found\r\n```\n\n### Environment\n\nnot need :)\n\n### Deep Learning backend\n\nsame\n", "before_files": [{"content": "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport string\nimport unicodedata\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\nfrom typing import Sequence as SequenceType\nfrom typing import Tuple, TypeVar, Union\n\nimport numpy as np\nfrom PIL import Image\n\nfrom doctr.io.image import get_img_shape\nfrom doctr.utils.geometry import convert_to_relative_coords, extract_crops, extract_rcrops\n\nfrom .vocabs import VOCABS\n\n__all__ = ['translate', 'encode_string', 'decode_sequence', 'encode_sequences']\n\nImageTensor = TypeVar('ImageTensor')\n\n\ndef translate(\n input_string: str,\n vocab_name: str,\n unknown_char: str = '\u25a0',\n) -> str:\n \"\"\"Translate a string input in a given vocabulary\n\n Args:\n input_string: input string to translate\n vocab_name: vocabulary to use (french, latin, ...)\n unknown_char: unknown character for non-translatable characters\n\n Returns:\n A string translated in a given vocab\"\"\"\n\n if VOCABS.get(vocab_name) is None:\n raise KeyError(\"output vocabulary must be in vocabs dictionnary\")\n\n translated = ''\n for char in input_string:\n if char not in VOCABS[vocab_name]:\n # we need to translate char into a vocab char\n if char in string.whitespace:\n # remove whitespaces\n continue\n # normalize character if it is not in vocab\n char = unicodedata.normalize('NFD', char).encode('ascii', 'ignore').decode('ascii')\n if char == '' or char not in VOCABS[vocab_name]:\n # if normalization fails or char still not in vocab, return unknown character)\n char = unknown_char\n translated += char\n return translated\n\n\ndef encode_string(\n input_string: str,\n vocab: str,\n) -> List[int]:\n \"\"\"Given a predefined mapping, encode the string to a sequence of numbers\n\n Args:\n input_string: string to encode\n vocab: vocabulary (string), the encoding is given by the indexing of the character sequence\n\n Returns:\n A list encoding the input_string\"\"\"\n\n return list(map(vocab.index, input_string)) # type: ignore[arg-type]\n\n\ndef decode_sequence(\n input_seq: Union[np.array, SequenceType[int]],\n mapping: str,\n) -> str:\n \"\"\"Given a predefined mapping, decode the sequence of numbers to a string\n\n Args:\n input_seq: array to decode\n mapping: vocabulary (string), the encoding is given by the indexing of the character sequence\n\n Returns:\n A string, decoded from input_seq\n \"\"\"\n\n if not isinstance(input_seq, (Sequence, np.ndarray)):\n raise TypeError(\"Invalid sequence type\")\n if isinstance(input_seq, np.ndarray) and (input_seq.dtype != np.int_ or input_seq.max() >= len(mapping)):\n raise AssertionError(\"Input must be an array of int, with max less than mapping size\")\n\n return ''.join(map(mapping.__getitem__, input_seq))\n\n\ndef encode_sequences(\n sequences: List[str],\n vocab: str,\n target_size: Optional[int] = None,\n eos: int = -1,\n sos: Optional[int] = None,\n pad: Optional[int] = None,\n dynamic_seq_length: bool = False,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"Encode character sequences using a given vocab as mapping\n\n Args:\n sequences: the list of character sequences of size N\n vocab: the ordered vocab to use for encoding\n target_size: maximum length of the encoded data\n eos: encoding of End Of String\n sos: optional encoding of Start Of String\n pad: optional encoding for padding. In case of padding, all sequences are followed by 1 EOS then PAD\n dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size\n\n Returns:\n the padded encoded data as a tensor\n \"\"\"\n\n if 0 <= eos < len(vocab):\n raise ValueError(\"argument 'eos' needs to be outside of vocab possible indices\")\n\n if not isinstance(target_size, int) or dynamic_seq_length:\n # Maximum string length + EOS\n max_length = max(len(w) for w in sequences) + 1\n if isinstance(sos, int):\n max_length += 1\n if isinstance(pad, int):\n max_length += 1\n target_size = max_length if not isinstance(target_size, int) else min(max_length, target_size)\n\n # Pad all sequences\n if isinstance(pad, int): # pad with padding symbol\n if 0 <= pad < len(vocab):\n raise ValueError(\"argument 'pad' needs to be outside of vocab possible indices\")\n # In that case, add EOS at the end of the word before padding\n default_symbol = pad\n else: # pad with eos symbol\n default_symbol = eos\n encoded_data = np.full([len(sequences), target_size], default_symbol, dtype=np.int32)\n\n # Encode the strings\n for idx, seq in enumerate(map(partial(encode_string, vocab=vocab), sequences)):\n if isinstance(pad, int): # add eos at the end of the sequence\n seq.append(eos)\n encoded_data[idx, :min(len(seq), target_size)] = seq[:min(len(seq), target_size)]\n\n if isinstance(sos, int): # place sos symbol at the beginning of each sequence\n if 0 <= sos < len(vocab):\n raise ValueError(\"argument 'sos' needs to be outside of vocab possible indices\")\n encoded_data = np.roll(encoded_data, 1)\n encoded_data[:, 0] = sos\n\n return encoded_data\n\n\ndef convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]:\n\n target['boxes'] = convert_to_relative_coords(target['boxes'], get_img_shape(img))\n return img, target\n\n\ndef crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]:\n \"\"\"Crop a set of bounding boxes from an image\n Args:\n img_path: path to the image\n geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)\n Returns:\n a list of cropped images\n \"\"\"\n img = np.array(Image.open(img_path))\n # Polygon\n if geoms.ndim == 3 and geoms.shape[1:] == (4, 2):\n return extract_rcrops(img, geoms.astype(dtype=int))\n if geoms.ndim == 2 and geoms.shape[1] == 4:\n return extract_crops(img, geoms.astype(dtype=int))\n raise ValueError(\"Invalid geometry format\")\n", "path": "doctr/datasets/utils.py"}], "after_files": [{"content": "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport string\nimport unicodedata\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\nfrom typing import Sequence as SequenceType\nfrom typing import Tuple, TypeVar, Union\n\nimport numpy as np\nfrom PIL import Image\n\nfrom doctr.io.image import get_img_shape\nfrom doctr.utils.geometry import convert_to_relative_coords, extract_crops, extract_rcrops\n\nfrom .vocabs import VOCABS\n\n__all__ = ['translate', 'encode_string', 'decode_sequence', 'encode_sequences']\n\nImageTensor = TypeVar('ImageTensor')\n\n\ndef translate(\n input_string: str,\n vocab_name: str,\n unknown_char: str = '\u25a0',\n) -> str:\n \"\"\"Translate a string input in a given vocabulary\n\n Args:\n input_string: input string to translate\n vocab_name: vocabulary to use (french, latin, ...)\n unknown_char: unknown character for non-translatable characters\n\n Returns:\n A string translated in a given vocab\"\"\"\n\n if VOCABS.get(vocab_name) is None:\n raise KeyError(\"output vocabulary must be in vocabs dictionnary\")\n\n translated = ''\n for char in input_string:\n if char not in VOCABS[vocab_name]:\n # we need to translate char into a vocab char\n if char in string.whitespace:\n # remove whitespaces\n continue\n # normalize character if it is not in vocab\n char = unicodedata.normalize('NFD', char).encode('ascii', 'ignore').decode('ascii')\n if char == '' or char not in VOCABS[vocab_name]:\n # if normalization fails or char still not in vocab, return unknown character)\n char = unknown_char\n translated += char\n return translated\n\n\ndef encode_string(\n input_string: str,\n vocab: str,\n) -> List[int]:\n \"\"\"Given a predefined mapping, encode the string to a sequence of numbers\n\n Args:\n input_string: string to encode\n vocab: vocabulary (string), the encoding is given by the indexing of the character sequence\n\n Returns:\n A list encoding the input_string\"\"\"\n\n try:\n return list(map(vocab.index, input_string)) # type: ignore[arg-type]\n except ValueError:\n raise ValueError(\"some characters cannot be found in 'vocab'\")\n\n\ndef decode_sequence(\n input_seq: Union[np.array, SequenceType[int]],\n mapping: str,\n) -> str:\n \"\"\"Given a predefined mapping, decode the sequence of numbers to a string\n\n Args:\n input_seq: array to decode\n mapping: vocabulary (string), the encoding is given by the indexing of the character sequence\n\n Returns:\n A string, decoded from input_seq\n \"\"\"\n\n if not isinstance(input_seq, (Sequence, np.ndarray)):\n raise TypeError(\"Invalid sequence type\")\n if isinstance(input_seq, np.ndarray) and (input_seq.dtype != np.int_ or input_seq.max() >= len(mapping)):\n raise AssertionError(\"Input must be an array of int, with max less than mapping size\")\n\n return ''.join(map(mapping.__getitem__, input_seq))\n\n\ndef encode_sequences(\n sequences: List[str],\n vocab: str,\n target_size: Optional[int] = None,\n eos: int = -1,\n sos: Optional[int] = None,\n pad: Optional[int] = None,\n dynamic_seq_length: bool = False,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"Encode character sequences using a given vocab as mapping\n\n Args:\n sequences: the list of character sequences of size N\n vocab: the ordered vocab to use for encoding\n target_size: maximum length of the encoded data\n eos: encoding of End Of String\n sos: optional encoding of Start Of String\n pad: optional encoding for padding. In case of padding, all sequences are followed by 1 EOS then PAD\n dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size\n\n Returns:\n the padded encoded data as a tensor\n \"\"\"\n\n if 0 <= eos < len(vocab):\n raise ValueError(\"argument 'eos' needs to be outside of vocab possible indices\")\n\n if not isinstance(target_size, int) or dynamic_seq_length:\n # Maximum string length + EOS\n max_length = max(len(w) for w in sequences) + 1\n if isinstance(sos, int):\n max_length += 1\n if isinstance(pad, int):\n max_length += 1\n target_size = max_length if not isinstance(target_size, int) else min(max_length, target_size)\n\n # Pad all sequences\n if isinstance(pad, int): # pad with padding symbol\n if 0 <= pad < len(vocab):\n raise ValueError(\"argument 'pad' needs to be outside of vocab possible indices\")\n # In that case, add EOS at the end of the word before padding\n default_symbol = pad\n else: # pad with eos symbol\n default_symbol = eos\n encoded_data = np.full([len(sequences), target_size], default_symbol, dtype=np.int32)\n\n # Encode the strings\n for idx, seq in enumerate(map(partial(encode_string, vocab=vocab), sequences)):\n if isinstance(pad, int): # add eos at the end of the sequence\n seq.append(eos)\n encoded_data[idx, :min(len(seq), target_size)] = seq[:min(len(seq), target_size)]\n\n if isinstance(sos, int): # place sos symbol at the beginning of each sequence\n if 0 <= sos < len(vocab):\n raise ValueError(\"argument 'sos' needs to be outside of vocab possible indices\")\n encoded_data = np.roll(encoded_data, 1)\n encoded_data[:, 0] = sos\n\n return encoded_data\n\n\ndef convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]:\n\n target['boxes'] = convert_to_relative_coords(target['boxes'], get_img_shape(img))\n return img, target\n\n\ndef crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]:\n \"\"\"Crop a set of bounding boxes from an image\n Args:\n img_path: path to the image\n geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)\n Returns:\n a list of cropped images\n \"\"\"\n img = np.array(Image.open(img_path))\n # Polygon\n if geoms.ndim == 3 and geoms.shape[1:] == (4, 2):\n return extract_rcrops(img, geoms.astype(dtype=int))\n if geoms.ndim == 2 and geoms.shape[1] == 4:\n return extract_crops(img, geoms.astype(dtype=int))\n raise ValueError(\"Invalid geometry format\")\n", "path": "doctr/datasets/utils.py"}]}
2,511
129
gh_patches_debug_16254
rasdani/github-patches
git_diff
pyodide__pyodide-123
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Full build path is included in package `.js` files As @rth pointed out in #121, the full build path to the `.data` file is included in the `.js` file for each package. This is *really* a problem, since it doesn't prevent the packages from being deployed anywhere, but it is leaking information we probably don't want to and makes the builds less reproducible. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/buildpkg.py` Content: ``` 1 #!/usr/bin/env python3 2 3 """ 4 Builds a Pyodide package. 5 """ 6 7 import argparse 8 import hashlib 9 import os 10 from pathlib import Path 11 import shutil 12 import subprocess 13 14 15 import common 16 17 18 ROOTDIR = Path(__file__).parent.resolve() 19 20 21 def check_checksum(path, pkg): 22 """ 23 Checks that a tarball matches the checksum in the package metadata. 24 """ 25 checksum_keys = {'md5', 'sha256'}.intersection(pkg['source']) 26 if not checksum_keys: 27 return 28 elif len(checksum_keys) != 1: 29 raise ValueError('Only one checksum should be included in a package ' 30 'setup; found {}.'.format(checksum_keys)) 31 checksum_algorithm = checksum_keys.pop() 32 checksum = pkg['source'][checksum_algorithm] 33 CHUNK_SIZE = 1 << 16 34 h = getattr(hashlib, checksum_algorithm)() 35 with open(path, 'rb') as fd: 36 while True: 37 chunk = fd.read(CHUNK_SIZE) 38 h.update(chunk) 39 if len(chunk) < CHUNK_SIZE: 40 break 41 if h.hexdigest() != checksum: 42 raise ValueError("Invalid {} checksum".format(checksum_algorithm)) 43 44 45 def download_and_extract(buildpath, packagedir, pkg, args): 46 tarballpath = buildpath / Path(pkg['source']['url']).name 47 if not tarballpath.is_file(): 48 subprocess.run([ 49 'wget', '-q', '-O', str(tarballpath), pkg['source']['url'] 50 ], check=True) 51 check_checksum(tarballpath, pkg) 52 srcpath = buildpath / packagedir 53 if not srcpath.is_dir(): 54 shutil.unpack_archive(str(tarballpath), str(buildpath)) 55 return srcpath 56 57 58 def patch(path, srcpath, pkg, args): 59 if (srcpath / '.patched').is_file(): 60 return 61 62 # Apply all of the patches 63 orig_dir = Path.cwd() 64 pkgdir = path.parent.resolve() 65 os.chdir(srcpath) 66 try: 67 for patch in pkg['source'].get('patches', []): 68 subprocess.run([ 69 'patch', '-p1', '--binary', '-i', pkgdir / patch 70 ], check=True) 71 finally: 72 os.chdir(orig_dir) 73 74 # Add any extra files 75 for src, dst in pkg['source'].get('extras', []): 76 shutil.copyfile(pkgdir / src, srcpath / dst) 77 78 with open(srcpath / '.patched', 'wb') as fd: 79 fd.write(b'\n') 80 81 82 def get_libdir(srcpath, args): 83 # Get the name of the build/lib.XXX directory that distutils wrote its 84 # output to 85 slug = subprocess.check_output([ 86 str(Path(args.host) / 'bin' / 'python3'), 87 '-c', 88 'import sysconfig, sys; ' 89 'print("{}-{}.{}".format(' 90 'sysconfig.get_platform(), ' 91 'sys.version_info[0], ' 92 'sys.version_info[1]))']).decode('ascii').strip() 93 purelib = srcpath / 'build' / 'lib' 94 if purelib.is_dir(): 95 libdir = purelib 96 else: 97 libdir = srcpath / 'build' / ('lib.' + slug) 98 return libdir 99 100 101 def compile(path, srcpath, pkg, args): 102 if (srcpath / '.built').is_file(): 103 return 104 105 orig_dir = Path.cwd() 106 os.chdir(srcpath) 107 try: 108 subprocess.run([ 109 str(Path(args.host) / 'bin' / 'python3'), 110 str(ROOTDIR / 'pywasmcross'), 111 '--cflags', 112 args.cflags + ' ' + 113 pkg.get('build', {}).get('cflags', ''), 114 '--ldflags', 115 args.ldflags + ' ' + 116 pkg.get('build', {}).get('ldflags', ''), 117 '--host', args.host, 118 '--target', args.target], check=True) 119 finally: 120 os.chdir(orig_dir) 121 122 post = pkg.get('build', {}).get('post') 123 if post is not None: 124 libdir = get_libdir(srcpath, args) 125 pkgdir = path.parent.resolve() 126 env = { 127 'BUILD': libdir, 128 'PKGDIR': pkgdir 129 } 130 subprocess.run([ 131 'bash', '-c', post], env=env, check=True) 132 133 with open(srcpath / '.built', 'wb') as fd: 134 fd.write(b'\n') 135 136 137 def package_files(buildpath, srcpath, pkg, args): 138 if (buildpath / '.pacakaged').is_file(): 139 return 140 141 name = pkg['package']['name'] 142 libdir = get_libdir(srcpath, args) 143 subprocess.run([ 144 'python', 145 Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py', 146 buildpath / (name + '.data'), 147 '--preload', 148 '{}@/lib/python3.6/site-packages'.format(libdir), 149 '--js-output={}'.format(buildpath / (name + '.js')), 150 '--export-name=pyodide', 151 '--exclude', '*.wasm.pre', 152 '--exclude', '__pycache__', 153 '--use-preload-plugins'], check=True) 154 subprocess.run([ 155 'uglifyjs', 156 buildpath / (name + '.js'), 157 '-o', 158 buildpath / (name + '.js')], check=True) 159 160 with open(buildpath / '.packaged', 'wb') as fd: 161 fd.write(b'\n') 162 163 164 def build_package(path, args): 165 pkg = common.parse_package(path) 166 packagedir = pkg['package']['name'] + '-' + pkg['package']['version'] 167 dirpath = path.parent 168 orig_path = Path.cwd() 169 os.chdir(dirpath) 170 try: 171 buildpath = dirpath / 'build' 172 if not buildpath.is_dir(): 173 os.makedirs(buildpath) 174 srcpath = download_and_extract(buildpath, packagedir, pkg, args) 175 patch(path, srcpath, pkg, args) 176 compile(path, srcpath, pkg, args) 177 package_files(buildpath, srcpath, pkg, args) 178 finally: 179 os.chdir(orig_path) 180 181 182 def parse_args(): 183 parser = argparse.ArgumentParser('Build a pyodide package.') 184 parser.add_argument( 185 'package', type=str, nargs=1, 186 help="Path to meta.yaml package description") 187 parser.add_argument( 188 '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS, 189 help='Extra compiling flags') 190 parser.add_argument( 191 '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS, 192 help='Extra linking flags') 193 parser.add_argument( 194 '--host', type=str, nargs='?', default=common.HOSTPYTHON, 195 help='The path to the host Python installation') 196 parser.add_argument( 197 '--target', type=str, nargs='?', default=common.TARGETPYTHON, 198 help='The path to the target Python installation') 199 return parser.parse_args() 200 201 202 def main(args): 203 path = Path(args.package[0]).resolve() 204 build_package(path, args) 205 206 207 if __name__ == '__main__': 208 args = parse_args() 209 main(args) 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/buildpkg.py b/tools/buildpkg.py --- a/tools/buildpkg.py +++ b/tools/buildpkg.py @@ -143,14 +143,15 @@ subprocess.run([ 'python', Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py', - buildpath / (name + '.data'), + name + '.data', '--preload', '{}@/lib/python3.6/site-packages'.format(libdir), - '--js-output={}'.format(buildpath / (name + '.js')), + '--js-output={}'.format(name + '.js'), '--export-name=pyodide', '--exclude', '*.wasm.pre', '--exclude', '__pycache__', - '--use-preload-plugins'], check=True) + '--use-preload-plugins'], + cwd=buildpath, check=True) subprocess.run([ 'uglifyjs', buildpath / (name + '.js'),
{"golden_diff": "diff --git a/tools/buildpkg.py b/tools/buildpkg.py\n--- a/tools/buildpkg.py\n+++ b/tools/buildpkg.py\n@@ -143,14 +143,15 @@\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n- buildpath / (name + '.data'),\n+ name + '.data',\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n- '--js-output={}'.format(buildpath / (name + '.js')),\n+ '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n- '--use-preload-plugins'], check=True)\n+ '--use-preload-plugins'],\n+ cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n", "issue": "Full build path is included in package `.js` files\nAs @rth pointed out in #121, the full build path to the `.data` file is included in the `.js` file for each package. This is *really* a problem, since it doesn't prevent the packages from being deployed anywhere, but it is leaking information we probably don't want to and makes the builds less reproducible.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n str(Path(args.host) / 'bin' / 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = srcpath / 'build' / 'lib'\n if purelib.is_dir():\n libdir = purelib\n else:\n libdir = srcpath / 'build' / ('lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = path.parent.resolve()\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.pacakaged').is_file():\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n buildpath / (name + '.data'),\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(buildpath / (name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'], check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n str(Path(args.host) / 'bin' / 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = srcpath / 'build' / 'lib'\n if purelib.is_dir():\n libdir = purelib\n else:\n libdir = srcpath / 'build' / ('lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = path.parent.resolve()\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.pacakaged').is_file():\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'],\n cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}]}
2,428
215
gh_patches_debug_4204
rasdani/github-patches
git_diff
statsmodels__statsmodels-9082
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typo in CanCorr documentation and docstring CanCorr's documentation and docstring say that CanCorr has attributes x_cancoeff and y_cancoeff. However, they should say x_cancoef and y_cancoef. Should I submit a PR? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `statsmodels/multivariate/cancorr.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Canonical correlation analysis 4 5 author: Yichuan Liu 6 """ 7 import numpy as np 8 from numpy.linalg import svd 9 import scipy 10 import pandas as pd 11 12 from statsmodels.base.model import Model 13 from statsmodels.iolib import summary2 14 from .multivariate_ols import multivariate_stats 15 16 17 class CanCorr(Model): 18 """ 19 Canonical correlation analysis using singular value decomposition 20 21 For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef 22 such that: 23 24 x1 = x * x_cancoef, x1' * x1 is identity matrix 25 y1 = y * y_cancoef, y1' * y1 is identity matrix 26 27 and the correlation between x1 and y1 is maximized. 28 29 Attributes 30 ---------- 31 endog : ndarray 32 See Parameters. 33 exog : ndarray 34 See Parameters. 35 cancorr : ndarray 36 The canonical correlation values 37 y_cancoeff : ndarray 38 The canonical coefficients for endog 39 x_cancoeff : ndarray 40 The canonical coefficients for exog 41 42 References 43 ---------- 44 .. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf 45 .. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf 46 .. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/ 47 """ # noqa:E501 48 def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs): 49 super(CanCorr, self).__init__(endog, exog, missing=missing, 50 hasconst=hasconst, **kwargs) 51 self._fit(tolerance) 52 53 def _fit(self, tolerance=1e-8): 54 """Fit the model 55 56 A ValueError is raised if there are singular values smaller than the 57 tolerance. The treatment of singular arrays might change in future. 58 59 Parameters 60 ---------- 61 tolerance : float 62 eigenvalue tolerance, values smaller than which is considered 0 63 """ 64 nobs, k_yvar = self.endog.shape 65 nobs, k_xvar = self.exog.shape 66 k = np.min([k_yvar, k_xvar]) 67 68 x = np.array(self.exog) 69 x = x - x.mean(0) 70 y = np.array(self.endog) 71 y = y - y.mean(0) 72 73 ux, sx, vx = svd(x, 0) 74 # vx_ds = vx.T divided by sx 75 vx_ds = vx.T 76 mask = sx > tolerance 77 if mask.sum() < len(mask): 78 raise ValueError('exog is collinear.') 79 vx_ds[:, mask] /= sx[mask] 80 uy, sy, vy = svd(y, 0) 81 # vy_ds = vy.T divided by sy 82 vy_ds = vy.T 83 mask = sy > tolerance 84 if mask.sum() < len(mask): 85 raise ValueError('endog is collinear.') 86 vy_ds[:, mask] /= sy[mask] 87 u, s, v = svd(ux.T.dot(uy), 0) 88 89 # Correct any roundoff 90 self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))]) 91 92 self.x_cancoef = vx_ds.dot(u[:, :k]) 93 self.y_cancoef = vy_ds.dot(v.T[:, :k]) 94 95 def corr_test(self): 96 """Approximate F test 97 Perform multivariate statistical tests of the hypothesis that 98 there is no canonical correlation between endog and exog. 99 For each canonical correlation, testing its significance based on 100 Wilks' lambda. 101 102 Returns 103 ------- 104 CanCorrTestResults instance 105 """ 106 nobs, k_yvar = self.endog.shape 107 nobs, k_xvar = self.exog.shape 108 eigenvals = np.power(self.cancorr, 2) 109 stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda", 110 'Num DF','Den DF', 'F Value','Pr > F'], 111 index=list(range(len(eigenvals) - 1, -1, -1))) 112 prod = 1 113 for i in range(len(eigenvals) - 1, -1, -1): 114 prod *= 1 - eigenvals[i] 115 p = k_yvar - i 116 q = k_xvar - i 117 r = (nobs - k_yvar - 1) - (p - q + 1) / 2 118 u = (p * q - 2) / 4 119 df1 = p * q 120 if p ** 2 + q ** 2 - 5 > 0: 121 t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5)) 122 else: 123 t = 1 124 df2 = r * t - 2 * u 125 lmd = np.power(prod, 1 / t) 126 F = (1 - lmd) / lmd * df2 / df1 127 stats.loc[i, 'Canonical Correlation'] = self.cancorr[i] 128 stats.loc[i, "Wilks' lambda"] = prod 129 stats.loc[i, 'Num DF'] = df1 130 stats.loc[i, 'Den DF'] = df2 131 stats.loc[i, 'F Value'] = F 132 pval = scipy.stats.f.sf(F, df1, df2) 133 stats.loc[i, 'Pr > F'] = pval 134 ''' 135 # Wilk's Chi square test of each canonical correlation 136 df = (p - i + 1) * (q - i + 1) 137 chi2 = a * np.log(prod) 138 pval = stats.chi2.sf(chi2, df) 139 stats.loc[i, 'Canonical correlation'] = self.cancorr[i] 140 stats.loc[i, 'Chi-square'] = chi2 141 stats.loc[i, 'DF'] = df 142 stats.loc[i, 'Pr > ChiSq'] = pval 143 ''' 144 ind = stats.index.values[::-1] 145 stats = stats.loc[ind, :] 146 147 # Multivariate tests (remember x has mean removed) 148 stats_mv = multivariate_stats(eigenvals, 149 k_yvar, k_xvar, nobs - k_xvar - 1) 150 return CanCorrTestResults(stats, stats_mv) 151 152 153 class CanCorrTestResults: 154 """ 155 Canonical correlation results class 156 157 Attributes 158 ---------- 159 stats : DataFrame 160 Contain statistical tests results for each canonical correlation 161 stats_mv : DataFrame 162 Contain the multivariate statistical tests results 163 """ 164 def __init__(self, stats, stats_mv): 165 self.stats = stats 166 self.stats_mv = stats_mv 167 168 def __str__(self): 169 return self.summary().__str__() 170 171 def summary(self): 172 summ = summary2.Summary() 173 summ.add_title('Cancorr results') 174 summ.add_df(self.stats) 175 summ.add_dict({'': ''}) 176 summ.add_dict({'Multivariate Statistics and F Approximations': ''}) 177 summ.add_df(self.stats_mv) 178 return summ 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/statsmodels/multivariate/cancorr.py b/statsmodels/multivariate/cancorr.py --- a/statsmodels/multivariate/cancorr.py +++ b/statsmodels/multivariate/cancorr.py @@ -34,9 +34,9 @@ See Parameters. cancorr : ndarray The canonical correlation values - y_cancoeff : ndarray + y_cancoef : ndarray The canonical coefficients for endog - x_cancoeff : ndarray + x_cancoef : ndarray The canonical coefficients for exog References
{"golden_diff": "diff --git a/statsmodels/multivariate/cancorr.py b/statsmodels/multivariate/cancorr.py\n--- a/statsmodels/multivariate/cancorr.py\n+++ b/statsmodels/multivariate/cancorr.py\n@@ -34,9 +34,9 @@\n See Parameters.\n cancorr : ndarray\n The canonical correlation values\n- y_cancoeff : ndarray\n+ y_cancoef : ndarray\n The canonical coefficients for endog\n- x_cancoeff : ndarray\n+ x_cancoef : ndarray\n The canonical coefficients for exog\n \n References\n", "issue": "Typo in CanCorr documentation and docstring\nCanCorr's documentation and docstring say that CanCorr has attributes x_cancoeff and y_cancoeff. However, they should say x_cancoef and y_cancoef. Should I submit a PR?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Canonical correlation analysis\n\nauthor: Yichuan Liu\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import svd\nimport scipy\nimport pandas as pd\n\nfrom statsmodels.base.model import Model\nfrom statsmodels.iolib import summary2\nfrom .multivariate_ols import multivariate_stats\n\n\nclass CanCorr(Model):\n \"\"\"\n Canonical correlation analysis using singular value decomposition\n\n For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef\n such that:\n\n x1 = x * x_cancoef, x1' * x1 is identity matrix\n y1 = y * y_cancoef, y1' * y1 is identity matrix\n\n and the correlation between x1 and y1 is maximized.\n\n Attributes\n ----------\n endog : ndarray\n See Parameters.\n exog : ndarray\n See Parameters.\n cancorr : ndarray\n The canonical correlation values\n y_cancoeff : ndarray\n The canonical coefficients for endog\n x_cancoeff : ndarray\n The canonical coefficients for exog\n\n References\n ----------\n .. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf\n .. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf\n .. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/\n \"\"\" # noqa:E501\n def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):\n super(CanCorr, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, **kwargs)\n self._fit(tolerance)\n\n def _fit(self, tolerance=1e-8):\n \"\"\"Fit the model\n\n A ValueError is raised if there are singular values smaller than the\n tolerance. The treatment of singular arrays might change in future.\n\n Parameters\n ----------\n tolerance : float\n eigenvalue tolerance, values smaller than which is considered 0\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n k = np.min([k_yvar, k_xvar])\n\n x = np.array(self.exog)\n x = x - x.mean(0)\n y = np.array(self.endog)\n y = y - y.mean(0)\n\n ux, sx, vx = svd(x, 0)\n # vx_ds = vx.T divided by sx\n vx_ds = vx.T\n mask = sx > tolerance\n if mask.sum() < len(mask):\n raise ValueError('exog is collinear.')\n vx_ds[:, mask] /= sx[mask]\n uy, sy, vy = svd(y, 0)\n # vy_ds = vy.T divided by sy\n vy_ds = vy.T\n mask = sy > tolerance\n if mask.sum() < len(mask):\n raise ValueError('endog is collinear.')\n vy_ds[:, mask] /= sy[mask]\n u, s, v = svd(ux.T.dot(uy), 0)\n\n # Correct any roundoff\n self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])\n\n self.x_cancoef = vx_ds.dot(u[:, :k])\n self.y_cancoef = vy_ds.dot(v.T[:, :k])\n\n def corr_test(self):\n \"\"\"Approximate F test\n Perform multivariate statistical tests of the hypothesis that\n there is no canonical correlation between endog and exog.\n For each canonical correlation, testing its significance based on\n Wilks' lambda.\n\n Returns\n -------\n CanCorrTestResults instance\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n eigenvals = np.power(self.cancorr, 2)\n stats = pd.DataFrame(columns=['Canonical Correlation', \"Wilks' lambda\",\n 'Num DF','Den DF', 'F Value','Pr > F'],\n index=list(range(len(eigenvals) - 1, -1, -1)))\n prod = 1\n for i in range(len(eigenvals) - 1, -1, -1):\n prod *= 1 - eigenvals[i]\n p = k_yvar - i\n q = k_xvar - i\n r = (nobs - k_yvar - 1) - (p - q + 1) / 2\n u = (p * q - 2) / 4\n df1 = p * q\n if p ** 2 + q ** 2 - 5 > 0:\n t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))\n else:\n t = 1\n df2 = r * t - 2 * u\n lmd = np.power(prod, 1 / t)\n F = (1 - lmd) / lmd * df2 / df1\n stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]\n stats.loc[i, \"Wilks' lambda\"] = prod\n stats.loc[i, 'Num DF'] = df1\n stats.loc[i, 'Den DF'] = df2\n stats.loc[i, 'F Value'] = F\n pval = scipy.stats.f.sf(F, df1, df2)\n stats.loc[i, 'Pr > F'] = pval\n '''\n # Wilk's Chi square test of each canonical correlation\n df = (p - i + 1) * (q - i + 1)\n chi2 = a * np.log(prod)\n pval = stats.chi2.sf(chi2, df)\n stats.loc[i, 'Canonical correlation'] = self.cancorr[i]\n stats.loc[i, 'Chi-square'] = chi2\n stats.loc[i, 'DF'] = df\n stats.loc[i, 'Pr > ChiSq'] = pval\n '''\n ind = stats.index.values[::-1]\n stats = stats.loc[ind, :]\n\n # Multivariate tests (remember x has mean removed)\n stats_mv = multivariate_stats(eigenvals,\n k_yvar, k_xvar, nobs - k_xvar - 1)\n return CanCorrTestResults(stats, stats_mv)\n\n\nclass CanCorrTestResults:\n \"\"\"\n Canonical correlation results class\n\n Attributes\n ----------\n stats : DataFrame\n Contain statistical tests results for each canonical correlation\n stats_mv : DataFrame\n Contain the multivariate statistical tests results\n \"\"\"\n def __init__(self, stats, stats_mv):\n self.stats = stats\n self.stats_mv = stats_mv\n\n def __str__(self):\n return self.summary().__str__()\n\n def summary(self):\n summ = summary2.Summary()\n summ.add_title('Cancorr results')\n summ.add_df(self.stats)\n summ.add_dict({'': ''})\n summ.add_dict({'Multivariate Statistics and F Approximations': ''})\n summ.add_df(self.stats_mv)\n return summ\n", "path": "statsmodels/multivariate/cancorr.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Canonical correlation analysis\n\nauthor: Yichuan Liu\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import svd\nimport scipy\nimport pandas as pd\n\nfrom statsmodels.base.model import Model\nfrom statsmodels.iolib import summary2\nfrom .multivariate_ols import multivariate_stats\n\n\nclass CanCorr(Model):\n \"\"\"\n Canonical correlation analysis using singular value decomposition\n\n For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef\n such that:\n\n x1 = x * x_cancoef, x1' * x1 is identity matrix\n y1 = y * y_cancoef, y1' * y1 is identity matrix\n\n and the correlation between x1 and y1 is maximized.\n\n Attributes\n ----------\n endog : ndarray\n See Parameters.\n exog : ndarray\n See Parameters.\n cancorr : ndarray\n The canonical correlation values\n y_cancoef : ndarray\n The canonical coefficients for endog\n x_cancoef : ndarray\n The canonical coefficients for exog\n\n References\n ----------\n .. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf\n .. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf\n .. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/\n \"\"\" # noqa:E501\n def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):\n super(CanCorr, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, **kwargs)\n self._fit(tolerance)\n\n def _fit(self, tolerance=1e-8):\n \"\"\"Fit the model\n\n A ValueError is raised if there are singular values smaller than the\n tolerance. The treatment of singular arrays might change in future.\n\n Parameters\n ----------\n tolerance : float\n eigenvalue tolerance, values smaller than which is considered 0\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n k = np.min([k_yvar, k_xvar])\n\n x = np.array(self.exog)\n x = x - x.mean(0)\n y = np.array(self.endog)\n y = y - y.mean(0)\n\n ux, sx, vx = svd(x, 0)\n # vx_ds = vx.T divided by sx\n vx_ds = vx.T\n mask = sx > tolerance\n if mask.sum() < len(mask):\n raise ValueError('exog is collinear.')\n vx_ds[:, mask] /= sx[mask]\n uy, sy, vy = svd(y, 0)\n # vy_ds = vy.T divided by sy\n vy_ds = vy.T\n mask = sy > tolerance\n if mask.sum() < len(mask):\n raise ValueError('endog is collinear.')\n vy_ds[:, mask] /= sy[mask]\n u, s, v = svd(ux.T.dot(uy), 0)\n\n # Correct any roundoff\n self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])\n\n self.x_cancoef = vx_ds.dot(u[:, :k])\n self.y_cancoef = vy_ds.dot(v.T[:, :k])\n\n def corr_test(self):\n \"\"\"Approximate F test\n Perform multivariate statistical tests of the hypothesis that\n there is no canonical correlation between endog and exog.\n For each canonical correlation, testing its significance based on\n Wilks' lambda.\n\n Returns\n -------\n CanCorrTestResults instance\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n eigenvals = np.power(self.cancorr, 2)\n stats = pd.DataFrame(columns=['Canonical Correlation', \"Wilks' lambda\",\n 'Num DF','Den DF', 'F Value','Pr > F'],\n index=list(range(len(eigenvals) - 1, -1, -1)))\n prod = 1\n for i in range(len(eigenvals) - 1, -1, -1):\n prod *= 1 - eigenvals[i]\n p = k_yvar - i\n q = k_xvar - i\n r = (nobs - k_yvar - 1) - (p - q + 1) / 2\n u = (p * q - 2) / 4\n df1 = p * q\n if p ** 2 + q ** 2 - 5 > 0:\n t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))\n else:\n t = 1\n df2 = r * t - 2 * u\n lmd = np.power(prod, 1 / t)\n F = (1 - lmd) / lmd * df2 / df1\n stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]\n stats.loc[i, \"Wilks' lambda\"] = prod\n stats.loc[i, 'Num DF'] = df1\n stats.loc[i, 'Den DF'] = df2\n stats.loc[i, 'F Value'] = F\n pval = scipy.stats.f.sf(F, df1, df2)\n stats.loc[i, 'Pr > F'] = pval\n '''\n # Wilk's Chi square test of each canonical correlation\n df = (p - i + 1) * (q - i + 1)\n chi2 = a * np.log(prod)\n pval = stats.chi2.sf(chi2, df)\n stats.loc[i, 'Canonical correlation'] = self.cancorr[i]\n stats.loc[i, 'Chi-square'] = chi2\n stats.loc[i, 'DF'] = df\n stats.loc[i, 'Pr > ChiSq'] = pval\n '''\n ind = stats.index.values[::-1]\n stats = stats.loc[ind, :]\n\n # Multivariate tests (remember x has mean removed)\n stats_mv = multivariate_stats(eigenvals,\n k_yvar, k_xvar, nobs - k_xvar - 1)\n return CanCorrTestResults(stats, stats_mv)\n\n\nclass CanCorrTestResults:\n \"\"\"\n Canonical correlation results class\n\n Attributes\n ----------\n stats : DataFrame\n Contain statistical tests results for each canonical correlation\n stats_mv : DataFrame\n Contain the multivariate statistical tests results\n \"\"\"\n def __init__(self, stats, stats_mv):\n self.stats = stats\n self.stats_mv = stats_mv\n\n def __str__(self):\n return self.summary().__str__()\n\n def summary(self):\n summ = summary2.Summary()\n summ.add_title('Cancorr results')\n summ.add_df(self.stats)\n summ.add_dict({'': ''})\n summ.add_dict({'Multivariate Statistics and F Approximations': ''})\n summ.add_df(self.stats_mv)\n return summ\n", "path": "statsmodels/multivariate/cancorr.py"}]}
2,377
131
gh_patches_debug_26969
rasdani/github-patches
git_diff
conda__conda-707
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to keep retrying with a lock error The yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda/lock.py` Content: ``` 1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io 2 # All Rights Reserved 3 # 4 # conda is distributed under the terms of the BSD 3-clause license. 5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. 6 7 """ 8 Tools for working with locks 9 10 A lock is just an empty directory. We use directories because this lets us use 11 the race condition-proof os.makedirs. 12 13 For now, there is one global lock for all of conda, because some things happen 14 globally (such as downloading packages). 15 16 We don't raise an error if the lock is named with the current PID 17 """ 18 19 import os 20 from os.path import join 21 import glob 22 23 24 LOCKFN = '.conda_lock' 25 26 27 class Locked(object): 28 """ 29 Context manager to handle locks. 30 """ 31 def __init__(self, path): 32 self.path = path 33 self.end = "-" + str(os.getpid()) 34 self.lock_path = join(self.path, LOCKFN + self.end) 35 self.pattern = join(self.path, LOCKFN + '-*') 36 self.remove = True 37 38 def __enter__(self): 39 files = glob.glob(self.pattern) 40 if files and not files[0].endswith(self.end): 41 # Keep the string "LOCKERROR" in this string so that external 42 # programs can look for it. 43 raise RuntimeError("""\ 44 LOCKERROR: It looks like conda is already doing something. 45 The lock %s was found. Wait for it to finish before continuing. 46 If you are sure that conda is not running, remove it and try again. 47 You can also use: $ conda clean --lock""" % self.lock_path) 48 49 if not files: 50 try: 51 os.makedirs(self.lock_path) 52 except OSError: 53 pass 54 else: # PID lock already here --- someone else will remove it. 55 self.remove = False 56 57 def __exit__(self, exc_type, exc_value, traceback): 58 if self.remove: 59 for path in self.lock_path, self.path: 60 try: 61 os.rmdir(path) 62 except OSError: 63 pass 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/lock.py b/conda/lock.py --- a/conda/lock.py +++ b/conda/lock.py @@ -19,7 +19,7 @@ import os from os.path import join import glob - +from time import sleep LOCKFN = '.conda_lock' @@ -36,15 +36,28 @@ self.remove = True def __enter__(self): - files = glob.glob(self.pattern) - if files and not files[0].endswith(self.end): - # Keep the string "LOCKERROR" in this string so that external - # programs can look for it. - raise RuntimeError("""\ -LOCKERROR: It looks like conda is already doing something. -The lock %s was found. Wait for it to finish before continuing. -If you are sure that conda is not running, remove it and try again. -You can also use: $ conda clean --lock""" % self.lock_path) + retries = 10 + # Keep the string "LOCKERROR" in this string so that external + # programs can look for it. + lockstr = ("""\ + LOCKERROR: It looks like conda is already doing something. + The lock %s was found. Wait for it to finish before continuing. + If you are sure that conda is not running, remove it and try again. + You can also use: $ conda clean --lock""" % self.lock_path) + sleeptime = 1 + while retries: + files = glob.glob(self.pattern) + if files and not files[0].endswith(self.end): + print(lockstr) + print("Sleeping for %s seconds" % sleeptime) + sleep(sleeptime) + sleeptime *= 2 + retries -= 1 + else: + break + else: + print("Exceeded max retries, giving up") + raise RuntimeError(lockstr) if not files: try:
{"golden_diff": "diff --git a/conda/lock.py b/conda/lock.py\n--- a/conda/lock.py\n+++ b/conda/lock.py\n@@ -19,7 +19,7 @@\n import os\n from os.path import join\n import glob\n-\n+from time import sleep\n \n LOCKFN = '.conda_lock'\n \n@@ -36,15 +36,28 @@\n self.remove = True\n \n def __enter__(self):\n- files = glob.glob(self.pattern)\n- if files and not files[0].endswith(self.end):\n- # Keep the string \"LOCKERROR\" in this string so that external\n- # programs can look for it.\n- raise RuntimeError(\"\"\"\\\n-LOCKERROR: It looks like conda is already doing something.\n-The lock %s was found. Wait for it to finish before continuing.\n-If you are sure that conda is not running, remove it and try again.\n-You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ retries = 10\n+ # Keep the string \"LOCKERROR\" in this string so that external\n+ # programs can look for it.\n+ lockstr = (\"\"\"\\\n+ LOCKERROR: It looks like conda is already doing something.\n+ The lock %s was found. Wait for it to finish before continuing.\n+ If you are sure that conda is not running, remove it and try again.\n+ You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ sleeptime = 1\n+ while retries:\n+ files = glob.glob(self.pattern)\n+ if files and not files[0].endswith(self.end):\n+ print(lockstr)\n+ print(\"Sleeping for %s seconds\" % sleeptime)\n+ sleep(sleeptime)\n+ sleeptime *= 2\n+ retries -= 1\n+ else:\n+ break\n+ else:\n+ print(\"Exceeded max retries, giving up\")\n+ raise RuntimeError(lockstr)\n \n if not files:\n try:\n", "issue": "Add ability to keep retrying with a lock error\nThe yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda. \n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\n\nimport os\nfrom os.path import join\nimport glob\n\n\nLOCKFN = '.conda_lock'\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n raise RuntimeError(\"\"\"\\\nLOCKERROR: It looks like conda is already doing something.\nThe lock %s was found. Wait for it to finish before continuing.\nIf you are sure that conda is not running, remove it and try again.\nYou can also use: $ conda clean --lock\"\"\" % self.lock_path)\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\n\nimport os\nfrom os.path import join\nimport glob\nfrom time import sleep\n\nLOCKFN = '.conda_lock'\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n retries = 10\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n LOCKERROR: It looks like conda is already doing something.\n The lock %s was found. Wait for it to finish before continuing.\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n sleeptime = 1\n while retries:\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n print(lockstr)\n print(\"Sleeping for %s seconds\" % sleeptime)\n sleep(sleeptime)\n sleeptime *= 2\n retries -= 1\n else:\n break\n else:\n print(\"Exceeded max retries, giving up\")\n raise RuntimeError(lockstr)\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}]}
884
451