problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_29298
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-5502
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Have option to disable Mouse 4 and Mouse 5 from jumping forward and back through tab history. When using Mouse 4 or Mouse 5 as a global hotkey for another application, for example a voice chat program such as Discord or Mumble using either button as push to talk, qutebrowser still receives the button press and goes forward and backwards through history while focused. Some way to disable Mouse 4 and Mouse 5 from being used by qutebrowser would be cool. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qutebrowser/browser/eventfilter.py` Content: ``` 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2016-2020 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """Event handling for a browser tab.""" 21 22 from PyQt5.QtCore import QObject, QEvent, Qt, QTimer 23 24 from qutebrowser.config import config 25 from qutebrowser.utils import message, log, usertypes, qtutils, objreg 26 from qutebrowser.misc import objects 27 from qutebrowser.keyinput import modeman 28 29 30 class ChildEventFilter(QObject): 31 32 """An event filter re-adding TabEventFilter on ChildEvent. 33 34 This is needed because QtWebEngine likes to randomly change its 35 focusProxy... 36 37 FIXME:qtwebengine Add a test for this happening 38 39 Attributes: 40 _filter: The event filter to install. 41 _widget: The widget expected to send out childEvents. 42 """ 43 44 def __init__(self, eventfilter, widget, win_id, parent=None): 45 super().__init__(parent) 46 self._filter = eventfilter 47 assert widget is not None 48 self._widget = widget 49 self._win_id = win_id 50 51 def eventFilter(self, obj, event): 52 """Act on ChildAdded events.""" 53 if event.type() == QEvent.ChildAdded: 54 child = event.child() 55 log.misc.debug("{} got new child {}, installing filter".format( 56 obj, child)) 57 assert obj is self._widget 58 child.installEventFilter(self._filter) 59 60 if qtutils.version_check('5.11', compiled=False, exact=True): 61 # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076 62 pass_modes = [usertypes.KeyMode.command, 63 usertypes.KeyMode.prompt, 64 usertypes.KeyMode.yesno] 65 if modeman.instance(self._win_id).mode not in pass_modes: 66 tabbed_browser = objreg.get('tabbed-browser', 67 scope='window', 68 window=self._win_id) 69 current_index = tabbed_browser.widget.currentIndex() 70 try: 71 widget_index = tabbed_browser.widget.indexOf( 72 self._widget.parent()) 73 except RuntimeError: 74 widget_index = -1 75 if current_index == widget_index: 76 QTimer.singleShot(0, self._widget.setFocus) 77 78 elif event.type() == QEvent.ChildRemoved: 79 child = event.child() 80 log.misc.debug("{}: removed child {}".format(obj, child)) 81 82 return False 83 84 85 class TabEventFilter(QObject): 86 87 """Handle mouse/keyboard events on a tab. 88 89 Attributes: 90 _tab: The browsertab object this filter is installed on. 91 _handlers: A dict of handler functions for the handled events. 92 _ignore_wheel_event: Whether to ignore the next wheelEvent. 93 _check_insertmode_on_release: Whether an insertmode check should be 94 done when the mouse is released. 95 """ 96 97 def __init__(self, tab, *, parent=None): 98 super().__init__(parent) 99 self._tab = tab 100 self._handlers = { 101 QEvent.MouseButtonPress: self._handle_mouse_press, 102 QEvent.MouseButtonRelease: self._handle_mouse_release, 103 QEvent.Wheel: self._handle_wheel, 104 QEvent.ContextMenu: self._handle_context_menu, 105 QEvent.KeyRelease: self._handle_key_release, 106 } 107 self._ignore_wheel_event = False 108 self._check_insertmode_on_release = False 109 110 def _handle_mouse_press(self, e): 111 """Handle pressing of a mouse button. 112 113 Args: 114 e: The QMouseEvent. 115 116 Return: 117 True if the event should be filtered, False otherwise. 118 """ 119 is_rocker_gesture = (config.val.input.rocker_gestures and 120 e.buttons() == Qt.LeftButton | Qt.RightButton) 121 122 if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture: 123 self._mousepress_backforward(e) 124 return True 125 126 self._ignore_wheel_event = True 127 128 pos = e.pos() 129 if pos.x() < 0 or pos.y() < 0: 130 log.mouse.warning("Ignoring invalid click at {}".format(pos)) 131 return False 132 133 if e.button() != Qt.NoButton: 134 self._tab.elements.find_at_pos(pos, self._mousepress_insertmode_cb) 135 136 return False 137 138 def _handle_mouse_release(self, _e): 139 """Handle releasing of a mouse button. 140 141 Args: 142 e: The QMouseEvent. 143 144 Return: 145 True if the event should be filtered, False otherwise. 146 """ 147 # We want to make sure we check the focus element after the WebView is 148 # updated completely. 149 QTimer.singleShot(0, self._mouserelease_insertmode) 150 return False 151 152 def _handle_wheel(self, e): 153 """Zoom on Ctrl-Mousewheel. 154 155 Args: 156 e: The QWheelEvent. 157 158 Return: 159 True if the event should be filtered, False otherwise. 160 """ 161 if self._ignore_wheel_event: 162 # See https://github.com/qutebrowser/qutebrowser/issues/395 163 self._ignore_wheel_event = False 164 return True 165 166 # Don't allow scrolling while hinting 167 mode = modeman.instance(self._tab.win_id).mode 168 if mode == usertypes.KeyMode.hint: 169 return True 170 171 elif e.modifiers() & Qt.ControlModifier: 172 if mode == usertypes.KeyMode.passthrough: 173 return False 174 175 divider = config.val.zoom.mouse_divider 176 if divider == 0: 177 # Disable mouse zooming 178 return True 179 180 factor = self._tab.zoom.factor() + (e.angleDelta().y() / divider) 181 if factor < 0: 182 return True 183 184 perc = int(100 * factor) 185 message.info("Zoom level: {}%".format(perc), replace=True) 186 self._tab.zoom.set_factor(factor) 187 return True 188 elif (e.modifiers() & Qt.ShiftModifier and 189 not qtutils.version_check('5.9', compiled=False)): 190 if e.angleDelta().y() > 0: 191 self._tab.scroller.left() 192 else: 193 self._tab.scroller.right() 194 return True 195 196 return False 197 198 def _handle_context_menu(self, _e): 199 """Suppress context menus if rocker gestures are turned on. 200 201 Args: 202 e: The QContextMenuEvent. 203 204 Return: 205 True if the event should be filtered, False otherwise. 206 """ 207 return config.val.input.rocker_gestures 208 209 def _handle_key_release(self, e): 210 """Ignore repeated key release events going to the website. 211 212 WORKAROUND for https://bugreports.qt.io/browse/QTBUG-77208 213 214 Args: 215 e: The QKeyEvent. 216 217 Return: 218 True if the event should be filtered, False otherwise. 219 """ 220 return (e.isAutoRepeat() and 221 qtutils.version_check('5.10', compiled=False) and 222 not qtutils.version_check('5.14', compiled=False) and 223 objects.backend == usertypes.Backend.QtWebEngine) 224 225 def _mousepress_insertmode_cb(self, elem): 226 """Check if the clicked element is editable.""" 227 if elem is None: 228 # Something didn't work out, let's find the focus element after 229 # a mouse release. 230 log.mouse.debug("Got None element, scheduling check on " 231 "mouse release") 232 self._check_insertmode_on_release = True 233 return 234 235 if elem.is_editable(): 236 log.mouse.debug("Clicked editable element!") 237 if config.val.input.insert_mode.auto_enter: 238 modeman.enter(self._tab.win_id, usertypes.KeyMode.insert, 239 'click', only_if_normal=True) 240 else: 241 log.mouse.debug("Clicked non-editable element!") 242 if config.val.input.insert_mode.auto_leave: 243 modeman.leave(self._tab.win_id, usertypes.KeyMode.insert, 244 'click', maybe=True) 245 246 def _mouserelease_insertmode(self): 247 """If we have an insertmode check scheduled, handle it.""" 248 if not self._check_insertmode_on_release: 249 return 250 self._check_insertmode_on_release = False 251 252 def mouserelease_insertmode_cb(elem): 253 """Callback which gets called from JS.""" 254 if elem is None: 255 log.mouse.debug("Element vanished!") 256 return 257 258 if elem.is_editable(): 259 log.mouse.debug("Clicked editable element (delayed)!") 260 modeman.enter(self._tab.win_id, usertypes.KeyMode.insert, 261 'click-delayed', only_if_normal=True) 262 else: 263 log.mouse.debug("Clicked non-editable element (delayed)!") 264 if config.val.input.insert_mode.auto_leave: 265 modeman.leave(self._tab.win_id, usertypes.KeyMode.insert, 266 'click-delayed', maybe=True) 267 268 self._tab.elements.find_focused(mouserelease_insertmode_cb) 269 270 def _mousepress_backforward(self, e): 271 """Handle back/forward mouse button presses. 272 273 Args: 274 e: The QMouseEvent. 275 276 Return: 277 True if the event should be filtered, False otherwise. 278 """ 279 if e.button() in [Qt.XButton1, Qt.LeftButton]: 280 # Back button on mice which have it, or rocker gesture 281 if self._tab.history.can_go_back(): 282 self._tab.history.back() 283 else: 284 message.error("At beginning of history.") 285 elif e.button() in [Qt.XButton2, Qt.RightButton]: 286 # Forward button on mice which have it, or rocker gesture 287 if self._tab.history.can_go_forward(): 288 self._tab.history.forward() 289 else: 290 message.error("At end of history.") 291 292 def eventFilter(self, obj, event): 293 """Filter events going to a QWeb(Engine)View. 294 295 Return: 296 True if the event should be filtered, False otherwise. 297 """ 298 evtype = event.type() 299 if evtype not in self._handlers: 300 return False 301 if obj is not self._tab.private_api.event_target(): 302 log.mouse.debug("Ignoring {} to {}".format( 303 event.__class__.__name__, obj)) 304 return False 305 return self._handlers[evtype](event) 306 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qutebrowser/browser/eventfilter.py b/qutebrowser/browser/eventfilter.py --- a/qutebrowser/browser/eventfilter.py +++ b/qutebrowser/browser/eventfilter.py @@ -116,7 +116,7 @@ Return: True if the event should be filtered, False otherwise. """ - is_rocker_gesture = (config.val.input.rocker_gestures and + is_rocker_gesture = (config.val.input.mouse.rocker_gestures and e.buttons() == Qt.LeftButton | Qt.RightButton) if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture: @@ -204,7 +204,7 @@ Return: True if the event should be filtered, False otherwise. """ - return config.val.input.rocker_gestures + return config.val.input.mouse.rocker_gestures def _handle_key_release(self, e): """Ignore repeated key release events going to the website. @@ -276,6 +276,11 @@ Return: True if the event should be filtered, False otherwise. """ + if (not config.val.input.mouse.back_forward_buttons and + e.button() in [Qt.XButton1, Qt.XButton2]): + # Back and forward on mice are disabled + return + if e.button() in [Qt.XButton1, Qt.LeftButton]: # Back button on mice which have it, or rocker gesture if self._tab.history.can_go_back():
{"golden_diff": "diff --git a/qutebrowser/browser/eventfilter.py b/qutebrowser/browser/eventfilter.py\n--- a/qutebrowser/browser/eventfilter.py\n+++ b/qutebrowser/browser/eventfilter.py\n@@ -116,7 +116,7 @@\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n- is_rocker_gesture = (config.val.input.rocker_gestures and\n+ is_rocker_gesture = (config.val.input.mouse.rocker_gestures and\n e.buttons() == Qt.LeftButton | Qt.RightButton)\n \n if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture:\n@@ -204,7 +204,7 @@\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n- return config.val.input.rocker_gestures\n+ return config.val.input.mouse.rocker_gestures\n \n def _handle_key_release(self, e):\n \"\"\"Ignore repeated key release events going to the website.\n@@ -276,6 +276,11 @@\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n+ if (not config.val.input.mouse.back_forward_buttons and\n+ e.button() in [Qt.XButton1, Qt.XButton2]):\n+ # Back and forward on mice are disabled\n+ return\n+\n if e.button() in [Qt.XButton1, Qt.LeftButton]:\n # Back button on mice which have it, or rocker gesture\n if self._tab.history.can_go_back():\n", "issue": "Have option to disable Mouse 4 and Mouse 5 from jumping forward and back through tab history.\nWhen using Mouse 4 or Mouse 5 as a global hotkey for another application, for example a voice chat program such as Discord or Mumble using either button as push to talk, qutebrowser still receives the button press and goes forward and backwards through history while focused. Some way to disable Mouse 4 and Mouse 5 from being used by qutebrowser would be cool.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2020 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Event handling for a browser tab.\"\"\"\n\nfrom PyQt5.QtCore import QObject, QEvent, Qt, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import message, log, usertypes, qtutils, objreg\nfrom qutebrowser.misc import objects\nfrom qutebrowser.keyinput import modeman\n\n\nclass ChildEventFilter(QObject):\n\n \"\"\"An event filter re-adding TabEventFilter on ChildEvent.\n\n This is needed because QtWebEngine likes to randomly change its\n focusProxy...\n\n FIXME:qtwebengine Add a test for this happening\n\n Attributes:\n _filter: The event filter to install.\n _widget: The widget expected to send out childEvents.\n \"\"\"\n\n def __init__(self, eventfilter, widget, win_id, parent=None):\n super().__init__(parent)\n self._filter = eventfilter\n assert widget is not None\n self._widget = widget\n self._win_id = win_id\n\n def eventFilter(self, obj, event):\n \"\"\"Act on ChildAdded events.\"\"\"\n if event.type() == QEvent.ChildAdded:\n child = event.child()\n log.misc.debug(\"{} got new child {}, installing filter\".format(\n obj, child))\n assert obj is self._widget\n child.installEventFilter(self._filter)\n\n if qtutils.version_check('5.11', compiled=False, exact=True):\n # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076\n pass_modes = [usertypes.KeyMode.command,\n usertypes.KeyMode.prompt,\n usertypes.KeyMode.yesno]\n if modeman.instance(self._win_id).mode not in pass_modes:\n tabbed_browser = objreg.get('tabbed-browser',\n scope='window',\n window=self._win_id)\n current_index = tabbed_browser.widget.currentIndex()\n try:\n widget_index = tabbed_browser.widget.indexOf(\n self._widget.parent())\n except RuntimeError:\n widget_index = -1\n if current_index == widget_index:\n QTimer.singleShot(0, self._widget.setFocus)\n\n elif event.type() == QEvent.ChildRemoved:\n child = event.child()\n log.misc.debug(\"{}: removed child {}\".format(obj, child))\n\n return False\n\n\nclass TabEventFilter(QObject):\n\n \"\"\"Handle mouse/keyboard events on a tab.\n\n Attributes:\n _tab: The browsertab object this filter is installed on.\n _handlers: A dict of handler functions for the handled events.\n _ignore_wheel_event: Whether to ignore the next wheelEvent.\n _check_insertmode_on_release: Whether an insertmode check should be\n done when the mouse is released.\n \"\"\"\n\n def __init__(self, tab, *, parent=None):\n super().__init__(parent)\n self._tab = tab\n self._handlers = {\n QEvent.MouseButtonPress: self._handle_mouse_press,\n QEvent.MouseButtonRelease: self._handle_mouse_release,\n QEvent.Wheel: self._handle_wheel,\n QEvent.ContextMenu: self._handle_context_menu,\n QEvent.KeyRelease: self._handle_key_release,\n }\n self._ignore_wheel_event = False\n self._check_insertmode_on_release = False\n\n def _handle_mouse_press(self, e):\n \"\"\"Handle pressing of a mouse button.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n is_rocker_gesture = (config.val.input.rocker_gestures and\n e.buttons() == Qt.LeftButton | Qt.RightButton)\n\n if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture:\n self._mousepress_backforward(e)\n return True\n\n self._ignore_wheel_event = True\n\n pos = e.pos()\n if pos.x() < 0 or pos.y() < 0:\n log.mouse.warning(\"Ignoring invalid click at {}\".format(pos))\n return False\n\n if e.button() != Qt.NoButton:\n self._tab.elements.find_at_pos(pos, self._mousepress_insertmode_cb)\n\n return False\n\n def _handle_mouse_release(self, _e):\n \"\"\"Handle releasing of a mouse button.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n # We want to make sure we check the focus element after the WebView is\n # updated completely.\n QTimer.singleShot(0, self._mouserelease_insertmode)\n return False\n\n def _handle_wheel(self, e):\n \"\"\"Zoom on Ctrl-Mousewheel.\n\n Args:\n e: The QWheelEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n if self._ignore_wheel_event:\n # See https://github.com/qutebrowser/qutebrowser/issues/395\n self._ignore_wheel_event = False\n return True\n\n # Don't allow scrolling while hinting\n mode = modeman.instance(self._tab.win_id).mode\n if mode == usertypes.KeyMode.hint:\n return True\n\n elif e.modifiers() & Qt.ControlModifier:\n if mode == usertypes.KeyMode.passthrough:\n return False\n\n divider = config.val.zoom.mouse_divider\n if divider == 0:\n # Disable mouse zooming\n return True\n\n factor = self._tab.zoom.factor() + (e.angleDelta().y() / divider)\n if factor < 0:\n return True\n\n perc = int(100 * factor)\n message.info(\"Zoom level: {}%\".format(perc), replace=True)\n self._tab.zoom.set_factor(factor)\n return True\n elif (e.modifiers() & Qt.ShiftModifier and\n not qtutils.version_check('5.9', compiled=False)):\n if e.angleDelta().y() > 0:\n self._tab.scroller.left()\n else:\n self._tab.scroller.right()\n return True\n\n return False\n\n def _handle_context_menu(self, _e):\n \"\"\"Suppress context menus if rocker gestures are turned on.\n\n Args:\n e: The QContextMenuEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n return config.val.input.rocker_gestures\n\n def _handle_key_release(self, e):\n \"\"\"Ignore repeated key release events going to the website.\n\n WORKAROUND for https://bugreports.qt.io/browse/QTBUG-77208\n\n Args:\n e: The QKeyEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n return (e.isAutoRepeat() and\n qtutils.version_check('5.10', compiled=False) and\n not qtutils.version_check('5.14', compiled=False) and\n objects.backend == usertypes.Backend.QtWebEngine)\n\n def _mousepress_insertmode_cb(self, elem):\n \"\"\"Check if the clicked element is editable.\"\"\"\n if elem is None:\n # Something didn't work out, let's find the focus element after\n # a mouse release.\n log.mouse.debug(\"Got None element, scheduling check on \"\n \"mouse release\")\n self._check_insertmode_on_release = True\n return\n\n if elem.is_editable():\n log.mouse.debug(\"Clicked editable element!\")\n if config.val.input.insert_mode.auto_enter:\n modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,\n 'click', only_if_normal=True)\n else:\n log.mouse.debug(\"Clicked non-editable element!\")\n if config.val.input.insert_mode.auto_leave:\n modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,\n 'click', maybe=True)\n\n def _mouserelease_insertmode(self):\n \"\"\"If we have an insertmode check scheduled, handle it.\"\"\"\n if not self._check_insertmode_on_release:\n return\n self._check_insertmode_on_release = False\n\n def mouserelease_insertmode_cb(elem):\n \"\"\"Callback which gets called from JS.\"\"\"\n if elem is None:\n log.mouse.debug(\"Element vanished!\")\n return\n\n if elem.is_editable():\n log.mouse.debug(\"Clicked editable element (delayed)!\")\n modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,\n 'click-delayed', only_if_normal=True)\n else:\n log.mouse.debug(\"Clicked non-editable element (delayed)!\")\n if config.val.input.insert_mode.auto_leave:\n modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,\n 'click-delayed', maybe=True)\n\n self._tab.elements.find_focused(mouserelease_insertmode_cb)\n\n def _mousepress_backforward(self, e):\n \"\"\"Handle back/forward mouse button presses.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n if e.button() in [Qt.XButton1, Qt.LeftButton]:\n # Back button on mice which have it, or rocker gesture\n if self._tab.history.can_go_back():\n self._tab.history.back()\n else:\n message.error(\"At beginning of history.\")\n elif e.button() in [Qt.XButton2, Qt.RightButton]:\n # Forward button on mice which have it, or rocker gesture\n if self._tab.history.can_go_forward():\n self._tab.history.forward()\n else:\n message.error(\"At end of history.\")\n\n def eventFilter(self, obj, event):\n \"\"\"Filter events going to a QWeb(Engine)View.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n evtype = event.type()\n if evtype not in self._handlers:\n return False\n if obj is not self._tab.private_api.event_target():\n log.mouse.debug(\"Ignoring {} to {}\".format(\n event.__class__.__name__, obj))\n return False\n return self._handlers[evtype](event)\n", "path": "qutebrowser/browser/eventfilter.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2020 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Event handling for a browser tab.\"\"\"\n\nfrom PyQt5.QtCore import QObject, QEvent, Qt, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import message, log, usertypes, qtutils, objreg\nfrom qutebrowser.misc import objects\nfrom qutebrowser.keyinput import modeman\n\n\nclass ChildEventFilter(QObject):\n\n \"\"\"An event filter re-adding TabEventFilter on ChildEvent.\n\n This is needed because QtWebEngine likes to randomly change its\n focusProxy...\n\n FIXME:qtwebengine Add a test for this happening\n\n Attributes:\n _filter: The event filter to install.\n _widget: The widget expected to send out childEvents.\n \"\"\"\n\n def __init__(self, eventfilter, widget, win_id, parent=None):\n super().__init__(parent)\n self._filter = eventfilter\n assert widget is not None\n self._widget = widget\n self._win_id = win_id\n\n def eventFilter(self, obj, event):\n \"\"\"Act on ChildAdded events.\"\"\"\n if event.type() == QEvent.ChildAdded:\n child = event.child()\n log.misc.debug(\"{} got new child {}, installing filter\".format(\n obj, child))\n assert obj is self._widget\n child.installEventFilter(self._filter)\n\n if qtutils.version_check('5.11', compiled=False, exact=True):\n # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076\n pass_modes = [usertypes.KeyMode.command,\n usertypes.KeyMode.prompt,\n usertypes.KeyMode.yesno]\n if modeman.instance(self._win_id).mode not in pass_modes:\n tabbed_browser = objreg.get('tabbed-browser',\n scope='window',\n window=self._win_id)\n current_index = tabbed_browser.widget.currentIndex()\n try:\n widget_index = tabbed_browser.widget.indexOf(\n self._widget.parent())\n except RuntimeError:\n widget_index = -1\n if current_index == widget_index:\n QTimer.singleShot(0, self._widget.setFocus)\n\n elif event.type() == QEvent.ChildRemoved:\n child = event.child()\n log.misc.debug(\"{}: removed child {}\".format(obj, child))\n\n return False\n\n\nclass TabEventFilter(QObject):\n\n \"\"\"Handle mouse/keyboard events on a tab.\n\n Attributes:\n _tab: The browsertab object this filter is installed on.\n _handlers: A dict of handler functions for the handled events.\n _ignore_wheel_event: Whether to ignore the next wheelEvent.\n _check_insertmode_on_release: Whether an insertmode check should be\n done when the mouse is released.\n \"\"\"\n\n def __init__(self, tab, *, parent=None):\n super().__init__(parent)\n self._tab = tab\n self._handlers = {\n QEvent.MouseButtonPress: self._handle_mouse_press,\n QEvent.MouseButtonRelease: self._handle_mouse_release,\n QEvent.Wheel: self._handle_wheel,\n QEvent.ContextMenu: self._handle_context_menu,\n QEvent.KeyRelease: self._handle_key_release,\n }\n self._ignore_wheel_event = False\n self._check_insertmode_on_release = False\n\n def _handle_mouse_press(self, e):\n \"\"\"Handle pressing of a mouse button.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n is_rocker_gesture = (config.val.input.mouse.rocker_gestures and\n e.buttons() == Qt.LeftButton | Qt.RightButton)\n\n if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture:\n self._mousepress_backforward(e)\n return True\n\n self._ignore_wheel_event = True\n\n pos = e.pos()\n if pos.x() < 0 or pos.y() < 0:\n log.mouse.warning(\"Ignoring invalid click at {}\".format(pos))\n return False\n\n if e.button() != Qt.NoButton:\n self._tab.elements.find_at_pos(pos, self._mousepress_insertmode_cb)\n\n return False\n\n def _handle_mouse_release(self, _e):\n \"\"\"Handle releasing of a mouse button.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n # We want to make sure we check the focus element after the WebView is\n # updated completely.\n QTimer.singleShot(0, self._mouserelease_insertmode)\n return False\n\n def _handle_wheel(self, e):\n \"\"\"Zoom on Ctrl-Mousewheel.\n\n Args:\n e: The QWheelEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n if self._ignore_wheel_event:\n # See https://github.com/qutebrowser/qutebrowser/issues/395\n self._ignore_wheel_event = False\n return True\n\n # Don't allow scrolling while hinting\n mode = modeman.instance(self._tab.win_id).mode\n if mode == usertypes.KeyMode.hint:\n return True\n\n elif e.modifiers() & Qt.ControlModifier:\n if mode == usertypes.KeyMode.passthrough:\n return False\n\n divider = config.val.zoom.mouse_divider\n if divider == 0:\n # Disable mouse zooming\n return True\n\n factor = self._tab.zoom.factor() + (e.angleDelta().y() / divider)\n if factor < 0:\n return True\n\n perc = int(100 * factor)\n message.info(\"Zoom level: {}%\".format(perc), replace=True)\n self._tab.zoom.set_factor(factor)\n return True\n elif (e.modifiers() & Qt.ShiftModifier and\n not qtutils.version_check('5.9', compiled=False)):\n if e.angleDelta().y() > 0:\n self._tab.scroller.left()\n else:\n self._tab.scroller.right()\n return True\n\n return False\n\n def _handle_context_menu(self, _e):\n \"\"\"Suppress context menus if rocker gestures are turned on.\n\n Args:\n e: The QContextMenuEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n return config.val.input.mouse.rocker_gestures\n\n def _handle_key_release(self, e):\n \"\"\"Ignore repeated key release events going to the website.\n\n WORKAROUND for https://bugreports.qt.io/browse/QTBUG-77208\n\n Args:\n e: The QKeyEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n return (e.isAutoRepeat() and\n qtutils.version_check('5.10', compiled=False) and\n not qtutils.version_check('5.14', compiled=False) and\n objects.backend == usertypes.Backend.QtWebEngine)\n\n def _mousepress_insertmode_cb(self, elem):\n \"\"\"Check if the clicked element is editable.\"\"\"\n if elem is None:\n # Something didn't work out, let's find the focus element after\n # a mouse release.\n log.mouse.debug(\"Got None element, scheduling check on \"\n \"mouse release\")\n self._check_insertmode_on_release = True\n return\n\n if elem.is_editable():\n log.mouse.debug(\"Clicked editable element!\")\n if config.val.input.insert_mode.auto_enter:\n modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,\n 'click', only_if_normal=True)\n else:\n log.mouse.debug(\"Clicked non-editable element!\")\n if config.val.input.insert_mode.auto_leave:\n modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,\n 'click', maybe=True)\n\n def _mouserelease_insertmode(self):\n \"\"\"If we have an insertmode check scheduled, handle it.\"\"\"\n if not self._check_insertmode_on_release:\n return\n self._check_insertmode_on_release = False\n\n def mouserelease_insertmode_cb(elem):\n \"\"\"Callback which gets called from JS.\"\"\"\n if elem is None:\n log.mouse.debug(\"Element vanished!\")\n return\n\n if elem.is_editable():\n log.mouse.debug(\"Clicked editable element (delayed)!\")\n modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,\n 'click-delayed', only_if_normal=True)\n else:\n log.mouse.debug(\"Clicked non-editable element (delayed)!\")\n if config.val.input.insert_mode.auto_leave:\n modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,\n 'click-delayed', maybe=True)\n\n self._tab.elements.find_focused(mouserelease_insertmode_cb)\n\n def _mousepress_backforward(self, e):\n \"\"\"Handle back/forward mouse button presses.\n\n Args:\n e: The QMouseEvent.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n if (not config.val.input.mouse.back_forward_buttons and\n e.button() in [Qt.XButton1, Qt.XButton2]):\n # Back and forward on mice are disabled\n return\n\n if e.button() in [Qt.XButton1, Qt.LeftButton]:\n # Back button on mice which have it, or rocker gesture\n if self._tab.history.can_go_back():\n self._tab.history.back()\n else:\n message.error(\"At beginning of history.\")\n elif e.button() in [Qt.XButton2, Qt.RightButton]:\n # Forward button on mice which have it, or rocker gesture\n if self._tab.history.can_go_forward():\n self._tab.history.forward()\n else:\n message.error(\"At end of history.\")\n\n def eventFilter(self, obj, event):\n \"\"\"Filter events going to a QWeb(Engine)View.\n\n Return:\n True if the event should be filtered, False otherwise.\n \"\"\"\n evtype = event.type()\n if evtype not in self._handlers:\n return False\n if obj is not self._tab.private_api.event_target():\n log.mouse.debug(\"Ignoring {} to {}\".format(\n event.__class__.__name__, obj))\n return False\n return self._handlers[evtype](event)\n", "path": "qutebrowser/browser/eventfilter.py"}]}
3,572
345
gh_patches_debug_25513
rasdani/github-patches
git_diff
pypa__setuptools-3805
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [FR] Cache supported tags in Wheel.is_compatible ### What's the problem this feature will solve? Calling `is_compatible` on an instance of the `Wheel` class takes mere milliseconds. But when calling this on thousands of wheels, this quickly adds up. I think this method is called when setuptools or pip reads an index page, for example https://pypi.org/simple/setuptools/, and for each link checks if the wheel is compatible with the current interpreter and platform. (Not completely sure if this is how it works.) My own use case is with Buildout. If this downloads a distribution, it saves it in a directory. Buildout uses this directory as an extra find-link. So the next time you call buildout, these distributions are available. This can help for the case where you have no internet, or someone has removed a distribution from PyPI (which happens a lot less these days, I am glad to say.) With thousands of wheels in there, and Buildout/setuptools calling `is_compatible` on each wheel, this takes too much time. I created an [issue in Buildout](https://github.com/buildout/buildout/issues/626) to track this, so some more details are there. There it seems it is worse with the combination of the very latest setuptools (67.0.0) and pip (23.0.0), and extra worse on Python 3.8 compared to 3.11. But this is a bit unclear. ### Describe the solution you'd like This is fixable in the `_is_compatible` method in [`setuptools/wheel.py`](https://github.com/pypa/setuptools/blob/v67.0.0/setuptools/wheel.py#L85-L89) by calculating the supported tags once, outside of the class. When I checked on my system, this gives a set of 1700 supported tags. With 1000 wheels, we would calculate 1.7 million tags. A tad much. ;-) The assumption is that calling `sys_tags` from the vendored packaging returns the same result each time. I am preparing a PR. ### Alternative Solutions I suppose Buildout could add another to its existing [patches](https://github.com/buildout/buildout/blob/master/src/zc/buildout/patches.py), which already includes `setuptools.package_index.PackageIndex` which is involved here. But I think pip would benefit from a faster method as well. ### Additional context Here is a test file to get some timings on your own system. Prerequisite: a directory with some wheel, the more the better. ``` from setuptools.wheel import Wheel from time import time import os DIR = "/Users/maurits/cached-downloads/dist" print(f"Looking for compatible wheels in {DIR}...") wheels = 0 compatible = 0 start = time() for filename in os.listdir(DIR): if not filename.endswith(".whl"): continue wheel = Wheel(os.path.join(DIR, filename)) wheels += 1 if wheel.is_compatible(): compatible += 1 stop = time() print(f""" Processed {wheels} wheels. There were {compatible} compatible wheels. Time taken: {stop - start} seconds. """) ``` Save this as `test.py`. With a clone of the pip repo and using the main branch, this is the result: ``` $ .tox/python/bin/python test.py Looking for compatible wheels in /Users/maurits/cached-downloads/dist... Processed 2284 wheels. There were 1776 compatible wheels. Time taken: 7.127894639968872 seconds. ``` With my branch I get the same numbers, except the time: ``` Time taken: 0.04627823829650879 seconds. ``` That is about 150 times faster. ### Code of Conduct - [X] I agree to follow the PSF Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/wheel.py` Content: ``` 1 """Wheels support.""" 2 3 import email 4 import itertools 5 import os 6 import posixpath 7 import re 8 import zipfile 9 import contextlib 10 11 from distutils.util import get_platform 12 13 import setuptools 14 from setuptools.extern.packaging.version import Version as parse_version 15 from setuptools.extern.packaging.tags import sys_tags 16 from setuptools.extern.packaging.utils import canonicalize_name 17 from setuptools.command.egg_info import write_requirements, _egg_basename 18 from setuptools.archive_util import _unpack_zipfile_obj 19 20 21 WHEEL_NAME = re.compile( 22 r"""^(?P<project_name>.+?)-(?P<version>\d.*?) 23 ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?) 24 )\.whl$""", 25 re.VERBOSE).match 26 27 NAMESPACE_PACKAGE_INIT = \ 28 "__import__('pkg_resources').declare_namespace(__name__)\n" 29 30 31 def unpack(src_dir, dst_dir): 32 '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' 33 for dirpath, dirnames, filenames in os.walk(src_dir): 34 subdir = os.path.relpath(dirpath, src_dir) 35 for f in filenames: 36 src = os.path.join(dirpath, f) 37 dst = os.path.join(dst_dir, subdir, f) 38 os.renames(src, dst) 39 for n, d in reversed(list(enumerate(dirnames))): 40 src = os.path.join(dirpath, d) 41 dst = os.path.join(dst_dir, subdir, d) 42 if not os.path.exists(dst): 43 # Directory does not exist in destination, 44 # rename it and prune it from os.walk list. 45 os.renames(src, dst) 46 del dirnames[n] 47 # Cleanup. 48 for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): 49 assert not filenames 50 os.rmdir(dirpath) 51 52 53 @contextlib.contextmanager 54 def disable_info_traces(): 55 """ 56 Temporarily disable info traces. 57 """ 58 from distutils import log 59 saved = log.set_threshold(log.WARN) 60 try: 61 yield 62 finally: 63 log.set_threshold(saved) 64 65 66 class Wheel: 67 68 def __init__(self, filename): 69 match = WHEEL_NAME(os.path.basename(filename)) 70 if match is None: 71 raise ValueError('invalid wheel name: %r' % filename) 72 self.filename = filename 73 for k, v in match.groupdict().items(): 74 setattr(self, k, v) 75 76 def tags(self): 77 '''List tags (py_version, abi, platform) supported by this wheel.''' 78 return itertools.product( 79 self.py_version.split('.'), 80 self.abi.split('.'), 81 self.platform.split('.'), 82 ) 83 84 def is_compatible(self): 85 '''Is the wheel is compatible with the current platform?''' 86 supported_tags = set( 87 (t.interpreter, t.abi, t.platform) for t in sys_tags()) 88 return next((True for t in self.tags() if t in supported_tags), False) 89 90 def egg_name(self): 91 return _egg_basename( 92 self.project_name, 93 self.version, 94 platform=(None if self.platform == 'any' else get_platform()), 95 ) + ".egg" 96 97 def get_dist_info(self, zf): 98 # find the correct name of the .dist-info dir in the wheel file 99 for member in zf.namelist(): 100 dirname = posixpath.dirname(member) 101 if (dirname.endswith('.dist-info') and 102 canonicalize_name(dirname).startswith( 103 canonicalize_name(self.project_name))): 104 return dirname 105 raise ValueError("unsupported wheel format. .dist-info not found") 106 107 def install_as_egg(self, destination_eggdir): 108 '''Install wheel as an egg directory.''' 109 with zipfile.ZipFile(self.filename) as zf: 110 self._install_as_egg(destination_eggdir, zf) 111 112 def _install_as_egg(self, destination_eggdir, zf): 113 dist_basename = '%s-%s' % (self.project_name, self.version) 114 dist_info = self.get_dist_info(zf) 115 dist_data = '%s.data' % dist_basename 116 egg_info = os.path.join(destination_eggdir, 'EGG-INFO') 117 118 self._convert_metadata(zf, destination_eggdir, dist_info, egg_info) 119 self._move_data_entries(destination_eggdir, dist_data) 120 self._fix_namespace_packages(egg_info, destination_eggdir) 121 122 @staticmethod 123 def _convert_metadata(zf, destination_eggdir, dist_info, egg_info): 124 import pkg_resources 125 126 def get_metadata(name): 127 with zf.open(posixpath.join(dist_info, name)) as fp: 128 value = fp.read().decode('utf-8') 129 return email.parser.Parser().parsestr(value) 130 131 wheel_metadata = get_metadata('WHEEL') 132 # Check wheel format version is supported. 133 wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) 134 wheel_v1 = ( 135 parse_version('1.0') <= wheel_version < parse_version('2.0dev0') 136 ) 137 if not wheel_v1: 138 raise ValueError( 139 'unsupported wheel format version: %s' % wheel_version) 140 # Extract to target directory. 141 _unpack_zipfile_obj(zf, destination_eggdir) 142 # Convert metadata. 143 dist_info = os.path.join(destination_eggdir, dist_info) 144 dist = pkg_resources.Distribution.from_location( 145 destination_eggdir, dist_info, 146 metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info), 147 ) 148 149 # Note: Evaluate and strip markers now, 150 # as it's difficult to convert back from the syntax: 151 # foobar; "linux" in sys_platform and extra == 'test' 152 def raw_req(req): 153 req.marker = None 154 return str(req) 155 install_requires = list(map(raw_req, dist.requires())) 156 extras_require = { 157 extra: [ 158 req 159 for req in map(raw_req, dist.requires((extra,))) 160 if req not in install_requires 161 ] 162 for extra in dist.extras 163 } 164 os.rename(dist_info, egg_info) 165 os.rename( 166 os.path.join(egg_info, 'METADATA'), 167 os.path.join(egg_info, 'PKG-INFO'), 168 ) 169 setup_dist = setuptools.Distribution( 170 attrs=dict( 171 install_requires=install_requires, 172 extras_require=extras_require, 173 ), 174 ) 175 with disable_info_traces(): 176 write_requirements( 177 setup_dist.get_command_obj('egg_info'), 178 None, 179 os.path.join(egg_info, 'requires.txt'), 180 ) 181 182 @staticmethod 183 def _move_data_entries(destination_eggdir, dist_data): 184 """Move data entries to their correct location.""" 185 dist_data = os.path.join(destination_eggdir, dist_data) 186 dist_data_scripts = os.path.join(dist_data, 'scripts') 187 if os.path.exists(dist_data_scripts): 188 egg_info_scripts = os.path.join( 189 destination_eggdir, 'EGG-INFO', 'scripts') 190 os.mkdir(egg_info_scripts) 191 for entry in os.listdir(dist_data_scripts): 192 # Remove bytecode, as it's not properly handled 193 # during easy_install scripts install phase. 194 if entry.endswith('.pyc'): 195 os.unlink(os.path.join(dist_data_scripts, entry)) 196 else: 197 os.rename( 198 os.path.join(dist_data_scripts, entry), 199 os.path.join(egg_info_scripts, entry), 200 ) 201 os.rmdir(dist_data_scripts) 202 for subdir in filter(os.path.exists, ( 203 os.path.join(dist_data, d) 204 for d in ('data', 'headers', 'purelib', 'platlib') 205 )): 206 unpack(subdir, destination_eggdir) 207 if os.path.exists(dist_data): 208 os.rmdir(dist_data) 209 210 @staticmethod 211 def _fix_namespace_packages(egg_info, destination_eggdir): 212 namespace_packages = os.path.join( 213 egg_info, 'namespace_packages.txt') 214 if os.path.exists(namespace_packages): 215 with open(namespace_packages) as fp: 216 namespace_packages = fp.read().split() 217 for mod in namespace_packages: 218 mod_dir = os.path.join(destination_eggdir, *mod.split('.')) 219 mod_init = os.path.join(mod_dir, '__init__.py') 220 if not os.path.exists(mod_dir): 221 os.mkdir(mod_dir) 222 if not os.path.exists(mod_init): 223 with open(mod_init, 'w') as fp: 224 fp.write(NAMESPACE_PACKAGE_INIT) 225 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/wheel.py b/setuptools/wheel.py --- a/setuptools/wheel.py +++ b/setuptools/wheel.py @@ -2,6 +2,7 @@ import email import itertools +import functools import os import posixpath import re @@ -28,6 +29,14 @@ "__import__('pkg_resources').declare_namespace(__name__)\n" [email protected]_cache(maxsize=None) +def _get_supported_tags(): + # We calculate the supported tags only once, otherwise calling + # this method on thousands of wheels takes seconds instead of + # milliseconds. + return {(t.interpreter, t.abi, t.platform) for t in sys_tags()} + + def unpack(src_dir, dst_dir): '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' for dirpath, dirnames, filenames in os.walk(src_dir): @@ -82,10 +91,8 @@ ) def is_compatible(self): - '''Is the wheel is compatible with the current platform?''' - supported_tags = set( - (t.interpreter, t.abi, t.platform) for t in sys_tags()) - return next((True for t in self.tags() if t in supported_tags), False) + '''Is the wheel compatible with the current platform?''' + return next((True for t in self.tags() if t in _get_supported_tags()), False) def egg_name(self): return _egg_basename(
{"golden_diff": "diff --git a/setuptools/wheel.py b/setuptools/wheel.py\n--- a/setuptools/wheel.py\n+++ b/setuptools/wheel.py\n@@ -2,6 +2,7 @@\n \n import email\n import itertools\n+import functools\n import os\n import posixpath\n import re\n@@ -28,6 +29,14 @@\n \"__import__('pkg_resources').declare_namespace(__name__)\\n\"\n \n \[email protected]_cache(maxsize=None)\n+def _get_supported_tags():\n+ # We calculate the supported tags only once, otherwise calling\n+ # this method on thousands of wheels takes seconds instead of\n+ # milliseconds.\n+ return {(t.interpreter, t.abi, t.platform) for t in sys_tags()}\n+\n+\n def unpack(src_dir, dst_dir):\n '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''\n for dirpath, dirnames, filenames in os.walk(src_dir):\n@@ -82,10 +91,8 @@\n )\n \n def is_compatible(self):\n- '''Is the wheel is compatible with the current platform?'''\n- supported_tags = set(\n- (t.interpreter, t.abi, t.platform) for t in sys_tags())\n- return next((True for t in self.tags() if t in supported_tags), False)\n+ '''Is the wheel compatible with the current platform?'''\n+ return next((True for t in self.tags() if t in _get_supported_tags()), False)\n \n def egg_name(self):\n return _egg_basename(\n", "issue": "[FR] Cache supported tags in Wheel.is_compatible\n### What's the problem this feature will solve?\n\nCalling `is_compatible` on an instance of the `Wheel` class takes mere milliseconds. But when calling this on thousands of wheels, this quickly adds up.\r\n\r\nI think this method is called when setuptools or pip reads an index page, for example https://pypi.org/simple/setuptools/, and for each link checks if the wheel is compatible with the current interpreter and platform. (Not completely sure if this is how it works.)\r\n\r\nMy own use case is with Buildout. If this downloads a distribution, it saves it in a directory. Buildout uses this directory as an extra find-link. So the next time you call buildout, these distributions are available. This can help for the case where you have no internet, or someone has removed a distribution from PyPI (which happens a lot less these days, I am glad to say.) With thousands of wheels in there, and Buildout/setuptools calling `is_compatible` on each wheel, this takes too much time.\r\n\r\nI created an [issue in Buildout](https://github.com/buildout/buildout/issues/626) to track this, so some more details are there. There it seems it is worse with the combination of the very latest setuptools (67.0.0) and pip (23.0.0), and extra worse on Python 3.8 compared to 3.11. But this is a bit unclear.\n\n### Describe the solution you'd like\n\nThis is fixable in the `_is_compatible` method in [`setuptools/wheel.py`](https://github.com/pypa/setuptools/blob/v67.0.0/setuptools/wheel.py#L85-L89) by calculating the supported tags once, outside of the class.\r\nWhen I checked on my system, this gives a set of 1700 supported tags. With 1000 wheels, we would calculate 1.7 million tags. A tad much. ;-)\r\n\r\nThe assumption is that calling `sys_tags` from the vendored packaging returns the same result each time.\r\n\r\nI am preparing a PR.\r\n\n\n### Alternative Solutions\n\nI suppose Buildout could add another to its existing [patches](https://github.com/buildout/buildout/blob/master/src/zc/buildout/patches.py), which already includes `setuptools.package_index.PackageIndex` which is involved here.\r\nBut I think pip would benefit from a faster method as well.\n\n### Additional context\n\nHere is a test file to get some timings on your own system. Prerequisite: a directory with some wheel, the more the better.\r\n\r\n```\r\nfrom setuptools.wheel import Wheel\r\nfrom time import time\r\n\r\nimport os\r\n\r\nDIR = \"/Users/maurits/cached-downloads/dist\"\r\nprint(f\"Looking for compatible wheels in {DIR}...\")\r\nwheels = 0\r\ncompatible = 0\r\nstart = time()\r\nfor filename in os.listdir(DIR):\r\n if not filename.endswith(\".whl\"):\r\n continue\r\n wheel = Wheel(os.path.join(DIR, filename))\r\n wheels += 1\r\n if wheel.is_compatible():\r\n compatible += 1\r\nstop = time()\r\nprint(f\"\"\"\r\nProcessed {wheels} wheels.\r\nThere were {compatible} compatible wheels.\r\nTime taken: {stop - start} seconds.\r\n\"\"\")\r\n```\r\n\r\nSave this as `test.py`.\r\nWith a clone of the pip repo and using the main branch, this is the result:\r\n\r\n```\r\n$ .tox/python/bin/python test.py \r\nLooking for compatible wheels in /Users/maurits/cached-downloads/dist...\r\n\r\nProcessed 2284 wheels.\r\nThere were 1776 compatible wheels.\r\nTime taken: 7.127894639968872 seconds.\r\n```\r\n\r\nWith my branch I get the same numbers, except the time:\r\n\r\n```\r\nTime taken: 0.04627823829650879 seconds.\r\n```\r\n\r\nThat is about 150 times faster.\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "\"\"\"Wheels support.\"\"\"\n\nimport email\nimport itertools\nimport os\nimport posixpath\nimport re\nimport zipfile\nimport contextlib\n\nfrom distutils.util import get_platform\n\nimport setuptools\nfrom setuptools.extern.packaging.version import Version as parse_version\nfrom setuptools.extern.packaging.tags import sys_tags\nfrom setuptools.extern.packaging.utils import canonicalize_name\nfrom setuptools.command.egg_info import write_requirements, _egg_basename\nfrom setuptools.archive_util import _unpack_zipfile_obj\n\n\nWHEEL_NAME = re.compile(\n r\"\"\"^(?P<project_name>.+?)-(?P<version>\\d.*?)\n ((-(?P<build>\\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)\n )\\.whl$\"\"\",\n re.VERBOSE).match\n\nNAMESPACE_PACKAGE_INIT = \\\n \"__import__('pkg_resources').declare_namespace(__name__)\\n\"\n\n\ndef unpack(src_dir, dst_dir):\n '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''\n for dirpath, dirnames, filenames in os.walk(src_dir):\n subdir = os.path.relpath(dirpath, src_dir)\n for f in filenames:\n src = os.path.join(dirpath, f)\n dst = os.path.join(dst_dir, subdir, f)\n os.renames(src, dst)\n for n, d in reversed(list(enumerate(dirnames))):\n src = os.path.join(dirpath, d)\n dst = os.path.join(dst_dir, subdir, d)\n if not os.path.exists(dst):\n # Directory does not exist in destination,\n # rename it and prune it from os.walk list.\n os.renames(src, dst)\n del dirnames[n]\n # Cleanup.\n for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):\n assert not filenames\n os.rmdir(dirpath)\n\n\[email protected]\ndef disable_info_traces():\n \"\"\"\n Temporarily disable info traces.\n \"\"\"\n from distutils import log\n saved = log.set_threshold(log.WARN)\n try:\n yield\n finally:\n log.set_threshold(saved)\n\n\nclass Wheel:\n\n def __init__(self, filename):\n match = WHEEL_NAME(os.path.basename(filename))\n if match is None:\n raise ValueError('invalid wheel name: %r' % filename)\n self.filename = filename\n for k, v in match.groupdict().items():\n setattr(self, k, v)\n\n def tags(self):\n '''List tags (py_version, abi, platform) supported by this wheel.'''\n return itertools.product(\n self.py_version.split('.'),\n self.abi.split('.'),\n self.platform.split('.'),\n )\n\n def is_compatible(self):\n '''Is the wheel is compatible with the current platform?'''\n supported_tags = set(\n (t.interpreter, t.abi, t.platform) for t in sys_tags())\n return next((True for t in self.tags() if t in supported_tags), False)\n\n def egg_name(self):\n return _egg_basename(\n self.project_name,\n self.version,\n platform=(None if self.platform == 'any' else get_platform()),\n ) + \".egg\"\n\n def get_dist_info(self, zf):\n # find the correct name of the .dist-info dir in the wheel file\n for member in zf.namelist():\n dirname = posixpath.dirname(member)\n if (dirname.endswith('.dist-info') and\n canonicalize_name(dirname).startswith(\n canonicalize_name(self.project_name))):\n return dirname\n raise ValueError(\"unsupported wheel format. .dist-info not found\")\n\n def install_as_egg(self, destination_eggdir):\n '''Install wheel as an egg directory.'''\n with zipfile.ZipFile(self.filename) as zf:\n self._install_as_egg(destination_eggdir, zf)\n\n def _install_as_egg(self, destination_eggdir, zf):\n dist_basename = '%s-%s' % (self.project_name, self.version)\n dist_info = self.get_dist_info(zf)\n dist_data = '%s.data' % dist_basename\n egg_info = os.path.join(destination_eggdir, 'EGG-INFO')\n\n self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)\n self._move_data_entries(destination_eggdir, dist_data)\n self._fix_namespace_packages(egg_info, destination_eggdir)\n\n @staticmethod\n def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):\n import pkg_resources\n\n def get_metadata(name):\n with zf.open(posixpath.join(dist_info, name)) as fp:\n value = fp.read().decode('utf-8')\n return email.parser.Parser().parsestr(value)\n\n wheel_metadata = get_metadata('WHEEL')\n # Check wheel format version is supported.\n wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))\n wheel_v1 = (\n parse_version('1.0') <= wheel_version < parse_version('2.0dev0')\n )\n if not wheel_v1:\n raise ValueError(\n 'unsupported wheel format version: %s' % wheel_version)\n # Extract to target directory.\n _unpack_zipfile_obj(zf, destination_eggdir)\n # Convert metadata.\n dist_info = os.path.join(destination_eggdir, dist_info)\n dist = pkg_resources.Distribution.from_location(\n destination_eggdir, dist_info,\n metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),\n )\n\n # Note: Evaluate and strip markers now,\n # as it's difficult to convert back from the syntax:\n # foobar; \"linux\" in sys_platform and extra == 'test'\n def raw_req(req):\n req.marker = None\n return str(req)\n install_requires = list(map(raw_req, dist.requires()))\n extras_require = {\n extra: [\n req\n for req in map(raw_req, dist.requires((extra,)))\n if req not in install_requires\n ]\n for extra in dist.extras\n }\n os.rename(dist_info, egg_info)\n os.rename(\n os.path.join(egg_info, 'METADATA'),\n os.path.join(egg_info, 'PKG-INFO'),\n )\n setup_dist = setuptools.Distribution(\n attrs=dict(\n install_requires=install_requires,\n extras_require=extras_require,\n ),\n )\n with disable_info_traces():\n write_requirements(\n setup_dist.get_command_obj('egg_info'),\n None,\n os.path.join(egg_info, 'requires.txt'),\n )\n\n @staticmethod\n def _move_data_entries(destination_eggdir, dist_data):\n \"\"\"Move data entries to their correct location.\"\"\"\n dist_data = os.path.join(destination_eggdir, dist_data)\n dist_data_scripts = os.path.join(dist_data, 'scripts')\n if os.path.exists(dist_data_scripts):\n egg_info_scripts = os.path.join(\n destination_eggdir, 'EGG-INFO', 'scripts')\n os.mkdir(egg_info_scripts)\n for entry in os.listdir(dist_data_scripts):\n # Remove bytecode, as it's not properly handled\n # during easy_install scripts install phase.\n if entry.endswith('.pyc'):\n os.unlink(os.path.join(dist_data_scripts, entry))\n else:\n os.rename(\n os.path.join(dist_data_scripts, entry),\n os.path.join(egg_info_scripts, entry),\n )\n os.rmdir(dist_data_scripts)\n for subdir in filter(os.path.exists, (\n os.path.join(dist_data, d)\n for d in ('data', 'headers', 'purelib', 'platlib')\n )):\n unpack(subdir, destination_eggdir)\n if os.path.exists(dist_data):\n os.rmdir(dist_data)\n\n @staticmethod\n def _fix_namespace_packages(egg_info, destination_eggdir):\n namespace_packages = os.path.join(\n egg_info, 'namespace_packages.txt')\n if os.path.exists(namespace_packages):\n with open(namespace_packages) as fp:\n namespace_packages = fp.read().split()\n for mod in namespace_packages:\n mod_dir = os.path.join(destination_eggdir, *mod.split('.'))\n mod_init = os.path.join(mod_dir, '__init__.py')\n if not os.path.exists(mod_dir):\n os.mkdir(mod_dir)\n if not os.path.exists(mod_init):\n with open(mod_init, 'w') as fp:\n fp.write(NAMESPACE_PACKAGE_INIT)\n", "path": "setuptools/wheel.py"}], "after_files": [{"content": "\"\"\"Wheels support.\"\"\"\n\nimport email\nimport itertools\nimport functools\nimport os\nimport posixpath\nimport re\nimport zipfile\nimport contextlib\n\nfrom distutils.util import get_platform\n\nimport setuptools\nfrom setuptools.extern.packaging.version import Version as parse_version\nfrom setuptools.extern.packaging.tags import sys_tags\nfrom setuptools.extern.packaging.utils import canonicalize_name\nfrom setuptools.command.egg_info import write_requirements, _egg_basename\nfrom setuptools.archive_util import _unpack_zipfile_obj\n\n\nWHEEL_NAME = re.compile(\n r\"\"\"^(?P<project_name>.+?)-(?P<version>\\d.*?)\n ((-(?P<build>\\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)\n )\\.whl$\"\"\",\n re.VERBOSE).match\n\nNAMESPACE_PACKAGE_INIT = \\\n \"__import__('pkg_resources').declare_namespace(__name__)\\n\"\n\n\[email protected]_cache(maxsize=None)\ndef _get_supported_tags():\n # We calculate the supported tags only once, otherwise calling\n # this method on thousands of wheels takes seconds instead of\n # milliseconds.\n return {(t.interpreter, t.abi, t.platform) for t in sys_tags()}\n\n\ndef unpack(src_dir, dst_dir):\n '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''\n for dirpath, dirnames, filenames in os.walk(src_dir):\n subdir = os.path.relpath(dirpath, src_dir)\n for f in filenames:\n src = os.path.join(dirpath, f)\n dst = os.path.join(dst_dir, subdir, f)\n os.renames(src, dst)\n for n, d in reversed(list(enumerate(dirnames))):\n src = os.path.join(dirpath, d)\n dst = os.path.join(dst_dir, subdir, d)\n if not os.path.exists(dst):\n # Directory does not exist in destination,\n # rename it and prune it from os.walk list.\n os.renames(src, dst)\n del dirnames[n]\n # Cleanup.\n for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):\n assert not filenames\n os.rmdir(dirpath)\n\n\[email protected]\ndef disable_info_traces():\n \"\"\"\n Temporarily disable info traces.\n \"\"\"\n from distutils import log\n saved = log.set_threshold(log.WARN)\n try:\n yield\n finally:\n log.set_threshold(saved)\n\n\nclass Wheel:\n\n def __init__(self, filename):\n match = WHEEL_NAME(os.path.basename(filename))\n if match is None:\n raise ValueError('invalid wheel name: %r' % filename)\n self.filename = filename\n for k, v in match.groupdict().items():\n setattr(self, k, v)\n\n def tags(self):\n '''List tags (py_version, abi, platform) supported by this wheel.'''\n return itertools.product(\n self.py_version.split('.'),\n self.abi.split('.'),\n self.platform.split('.'),\n )\n\n def is_compatible(self):\n '''Is the wheel compatible with the current platform?'''\n return next((True for t in self.tags() if t in _get_supported_tags()), False)\n\n def egg_name(self):\n return _egg_basename(\n self.project_name,\n self.version,\n platform=(None if self.platform == 'any' else get_platform()),\n ) + \".egg\"\n\n def get_dist_info(self, zf):\n # find the correct name of the .dist-info dir in the wheel file\n for member in zf.namelist():\n dirname = posixpath.dirname(member)\n if (dirname.endswith('.dist-info') and\n canonicalize_name(dirname).startswith(\n canonicalize_name(self.project_name))):\n return dirname\n raise ValueError(\"unsupported wheel format. .dist-info not found\")\n\n def install_as_egg(self, destination_eggdir):\n '''Install wheel as an egg directory.'''\n with zipfile.ZipFile(self.filename) as zf:\n self._install_as_egg(destination_eggdir, zf)\n\n def _install_as_egg(self, destination_eggdir, zf):\n dist_basename = '%s-%s' % (self.project_name, self.version)\n dist_info = self.get_dist_info(zf)\n dist_data = '%s.data' % dist_basename\n egg_info = os.path.join(destination_eggdir, 'EGG-INFO')\n\n self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)\n self._move_data_entries(destination_eggdir, dist_data)\n self._fix_namespace_packages(egg_info, destination_eggdir)\n\n @staticmethod\n def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):\n import pkg_resources\n\n def get_metadata(name):\n with zf.open(posixpath.join(dist_info, name)) as fp:\n value = fp.read().decode('utf-8')\n return email.parser.Parser().parsestr(value)\n\n wheel_metadata = get_metadata('WHEEL')\n # Check wheel format version is supported.\n wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))\n wheel_v1 = (\n parse_version('1.0') <= wheel_version < parse_version('2.0dev0')\n )\n if not wheel_v1:\n raise ValueError(\n 'unsupported wheel format version: %s' % wheel_version)\n # Extract to target directory.\n _unpack_zipfile_obj(zf, destination_eggdir)\n # Convert metadata.\n dist_info = os.path.join(destination_eggdir, dist_info)\n dist = pkg_resources.Distribution.from_location(\n destination_eggdir, dist_info,\n metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),\n )\n\n # Note: Evaluate and strip markers now,\n # as it's difficult to convert back from the syntax:\n # foobar; \"linux\" in sys_platform and extra == 'test'\n def raw_req(req):\n req.marker = None\n return str(req)\n install_requires = list(map(raw_req, dist.requires()))\n extras_require = {\n extra: [\n req\n for req in map(raw_req, dist.requires((extra,)))\n if req not in install_requires\n ]\n for extra in dist.extras\n }\n os.rename(dist_info, egg_info)\n os.rename(\n os.path.join(egg_info, 'METADATA'),\n os.path.join(egg_info, 'PKG-INFO'),\n )\n setup_dist = setuptools.Distribution(\n attrs=dict(\n install_requires=install_requires,\n extras_require=extras_require,\n ),\n )\n with disable_info_traces():\n write_requirements(\n setup_dist.get_command_obj('egg_info'),\n None,\n os.path.join(egg_info, 'requires.txt'),\n )\n\n @staticmethod\n def _move_data_entries(destination_eggdir, dist_data):\n \"\"\"Move data entries to their correct location.\"\"\"\n dist_data = os.path.join(destination_eggdir, dist_data)\n dist_data_scripts = os.path.join(dist_data, 'scripts')\n if os.path.exists(dist_data_scripts):\n egg_info_scripts = os.path.join(\n destination_eggdir, 'EGG-INFO', 'scripts')\n os.mkdir(egg_info_scripts)\n for entry in os.listdir(dist_data_scripts):\n # Remove bytecode, as it's not properly handled\n # during easy_install scripts install phase.\n if entry.endswith('.pyc'):\n os.unlink(os.path.join(dist_data_scripts, entry))\n else:\n os.rename(\n os.path.join(dist_data_scripts, entry),\n os.path.join(egg_info_scripts, entry),\n )\n os.rmdir(dist_data_scripts)\n for subdir in filter(os.path.exists, (\n os.path.join(dist_data, d)\n for d in ('data', 'headers', 'purelib', 'platlib')\n )):\n unpack(subdir, destination_eggdir)\n if os.path.exists(dist_data):\n os.rmdir(dist_data)\n\n @staticmethod\n def _fix_namespace_packages(egg_info, destination_eggdir):\n namespace_packages = os.path.join(\n egg_info, 'namespace_packages.txt')\n if os.path.exists(namespace_packages):\n with open(namespace_packages) as fp:\n namespace_packages = fp.read().split()\n for mod in namespace_packages:\n mod_dir = os.path.join(destination_eggdir, *mod.split('.'))\n mod_init = os.path.join(mod_dir, '__init__.py')\n if not os.path.exists(mod_dir):\n os.mkdir(mod_dir)\n if not os.path.exists(mod_init):\n with open(mod_init, 'w') as fp:\n fp.write(NAMESPACE_PACKAGE_INIT)\n", "path": "setuptools/wheel.py"}]}
3,516
341
gh_patches_debug_40222
rasdani/github-patches
git_diff
yt-dlp__yt-dlp-2589
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- apt doesn't keep track of latest version ### Checklist - [X] I'm asking a question and **not** reporting a bug/feature request - [X] I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme) - [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue) - [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions including closed ones ### Question Yesterday I came online and the first thing I did was to run `sudo apt upgrade yt-dlp`. At the time I had a version 2021-12-27 or something similar. Apt then told me I had the latest version and didn't need to upgrade, I run Linux Mint 20.3 and as it happened the gui Update Manager ran at the same time, and it told me I could upgrade to version 2022.01.21, which I did using the Upgrade Manager. I have never before experienced the Upgrade Manager telling me about an available upgrade for yt-dlp. Perhaps this is a new interaction. In any case I appreciate it since that's the most dead-sure way of catching new upgrades and being up-to-date. ### Verbose log _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt_dlp/extractor/globo.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import base64 5 import hashlib 6 import json 7 import random 8 import re 9 10 from .common import InfoExtractor 11 from ..compat import ( 12 compat_str, 13 ) 14 from ..utils import ( 15 ExtractorError, 16 float_or_none, 17 orderedSet, 18 str_or_none, 19 try_get, 20 ) 21 22 23 class GloboIE(InfoExtractor): 24 _VALID_URL = r'(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})' 25 _NETRC_MACHINE = 'globo' 26 _TESTS = [{ 27 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', 28 'info_dict': { 29 'id': '3607726', 30 'ext': 'mp4', 31 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', 32 'duration': 103.204, 33 'uploader': 'G1', 34 'uploader_id': '2015', 35 }, 36 'params': { 37 'skip_download': True, 38 }, 39 }, { 40 'url': 'http://globoplay.globo.com/v/4581987/', 41 'info_dict': { 42 'id': '4581987', 43 'ext': 'mp4', 44 'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP', 45 'duration': 137.973, 46 'uploader': 'Rede Globo', 47 'uploader_id': '196', 48 }, 49 'params': { 50 'skip_download': True, 51 }, 52 }, { 53 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html', 54 'only_matching': True, 55 }, { 56 'url': 'http://globosatplay.globo.com/globonews/v/4472924/', 57 'only_matching': True, 58 }, { 59 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/', 60 'only_matching': True, 61 }, { 62 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', 63 'only_matching': True, 64 }, { 65 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html', 66 'only_matching': True, 67 }, { 68 'url': 'globo:3607726', 69 'only_matching': True, 70 }] 71 72 def _real_extract(self, url): 73 video_id = self._match_id(url) 74 75 video = self._download_json( 76 'http://api.globovideos.com/videos/%s/playlist' % video_id, 77 video_id)['videos'][0] 78 if not self.get_param('allow_unplayable_formats') and video.get('encrypted') is True: 79 self.report_drm(video_id) 80 81 title = video['title'] 82 83 formats = [] 84 security = self._download_json( 85 'https://playback.video.globo.com/v1/video-session', video_id, 'Downloading security hash for %s' % video_id, 86 headers={'content-type': 'application/json'}, data=json.dumps({ 87 "player_type": "desktop", 88 "video_id": video_id, 89 "quality": "max", 90 "content_protection": "widevine", 91 "vsid": "581b986b-4c40-71f0-5a58-803e579d5fa2", 92 "tz": "-3.0:00" 93 }).encode()) 94 95 security_hash = security['source']['token'] 96 if not security_hash: 97 message = security.get('message') 98 if message: 99 raise ExtractorError( 100 '%s returned error: %s' % (self.IE_NAME, message), expected=True) 101 102 hash_code = security_hash[:2] 103 padding = '%010d' % random.randint(1, 10000000000) 104 if hash_code in ('04', '14'): 105 received_time = security_hash[3:13] 106 received_md5 = security_hash[24:] 107 hash_prefix = security_hash[:23] 108 elif hash_code in ('02', '12', '03', '13'): 109 received_time = security_hash[2:12] 110 received_md5 = security_hash[22:] 111 padding += '1' 112 hash_prefix = '05' + security_hash[:22] 113 114 padded_sign_time = compat_str(int(received_time) + 86400) + padding 115 md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode() 116 signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=') 117 signed_hash = hash_prefix + padded_sign_time + signed_md5 118 source = security['source']['url_parts'] 119 resource_url = source['scheme'] + '://' + source['domain'] + source['path'] 120 signed_url = '%s?h=%s&k=html5&a=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A') 121 122 formats.extend(self._extract_m3u8_formats( 123 signed_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) 124 self._sort_formats(formats) 125 126 subtitles = {} 127 for resource in video['resources']: 128 if resource.get('type') == 'subtitle': 129 subtitles.setdefault(resource.get('language') or 'por', []).append({ 130 'url': resource.get('url'), 131 }) 132 subs = try_get(security, lambda x: x['source']['subtitles'], expected_type=dict) or {} 133 for sub_lang, sub_url in subs.items(): 134 if sub_url: 135 subtitles.setdefault(sub_lang or 'por', []).append({ 136 'url': sub_url, 137 }) 138 subs = try_get(security, lambda x: x['source']['subtitles_webvtt'], expected_type=dict) or {} 139 for sub_lang, sub_url in subs.items(): 140 if sub_url: 141 subtitles.setdefault(sub_lang or 'por', []).append({ 142 'url': sub_url, 143 }) 144 145 duration = float_or_none(video.get('duration'), 1000) 146 uploader = video.get('channel') 147 uploader_id = str_or_none(video.get('channel_id')) 148 149 return { 150 'id': video_id, 151 'title': title, 152 'duration': duration, 153 'uploader': uploader, 154 'uploader_id': uploader_id, 155 'formats': formats, 156 'subtitles': subtitles, 157 } 158 159 160 class GloboArticleIE(InfoExtractor): 161 _VALID_URL = r'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\.html)?' 162 163 _VIDEOID_REGEXES = [ 164 r'\bdata-video-id=["\'](\d{7,})', 165 r'\bdata-player-videosids=["\'](\d{7,})', 166 r'\bvideosIDs\s*:\s*["\']?(\d{7,})', 167 r'\bdata-id=["\'](\d{7,})', 168 r'<div[^>]+\bid=["\'](\d{7,})', 169 ] 170 171 _TESTS = [{ 172 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', 173 'info_dict': { 174 'id': 'novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes', 175 'title': 'Novidade na fiscalização de bagagem pela Receita provoca discussões', 176 'description': 'md5:c3c4b4d4c30c32fce460040b1ac46b12', 177 }, 178 'playlist_count': 1, 179 }, { 180 'url': 'http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html', 181 'info_dict': { 182 'id': 'mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato', 183 'title': "Lula era o 'comandante máximo' do esquema da Lava Jato, diz MPF", 184 'description': 'md5:8aa7cc8beda4dc71cc8553e00b77c54c', 185 }, 186 'playlist_count': 6, 187 }, { 188 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html', 189 'only_matching': True, 190 }, { 191 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html', 192 'only_matching': True, 193 }, { 194 'url': 'http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271', 195 'only_matching': True, 196 }] 197 198 @classmethod 199 def suitable(cls, url): 200 return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url) 201 202 def _real_extract(self, url): 203 display_id = self._match_id(url) 204 webpage = self._download_webpage(url, display_id) 205 video_ids = [] 206 for video_regex in self._VIDEOID_REGEXES: 207 video_ids.extend(re.findall(video_regex, webpage)) 208 entries = [ 209 self.url_result('globo:%s' % video_id, GloboIE.ie_key()) 210 for video_id in orderedSet(video_ids)] 211 title = self._og_search_title(webpage, fatal=False) 212 description = self._html_search_meta('description', webpage) 213 return self.playlist_result(entries, display_id, title, description) 214 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt_dlp/extractor/globo.py b/yt_dlp/extractor/globo.py --- a/yt_dlp/extractor/globo.py +++ b/yt_dlp/extractor/globo.py @@ -12,6 +12,7 @@ compat_str, ) from ..utils import ( + HEADRequest, ExtractorError, float_or_none, orderedSet, @@ -67,11 +68,28 @@ }, { 'url': 'globo:3607726', 'only_matching': True, + }, { + 'url': 'https://globoplay.globo.com/v/10248083/', + 'info_dict': { + 'id': '10248083', + 'ext': 'mp4', + 'title': 'Melhores momentos: Equador 1 x 1 Brasil pelas Eliminatórias da Copa do Mundo 2022', + 'duration': 530.964, + 'uploader': 'SporTV', + 'uploader_id': '698', + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): video_id = self._match_id(url) + self._request_webpage( + HEADRequest('https://globo-ab.globo.com/v2/selected-alternatives?experiments=player-isolated-experiment-02&skipImpressions=true'), + video_id, 'Getting cookies') + video = self._download_json( 'http://api.globovideos.com/videos/%s/playlist' % video_id, video_id)['videos'][0] @@ -82,7 +100,7 @@ formats = [] security = self._download_json( - 'https://playback.video.globo.com/v1/video-session', video_id, 'Downloading security hash for %s' % video_id, + 'https://playback.video.globo.com/v2/video-session', video_id, 'Downloading security hash for %s' % video_id, headers={'content-type': 'application/json'}, data=json.dumps({ "player_type": "desktop", "video_id": video_id, @@ -92,7 +110,9 @@ "tz": "-3.0:00" }).encode()) - security_hash = security['source']['token'] + self._request_webpage(HEADRequest(security['sources'][0]['url_template']), video_id, 'Getting locksession cookie') + + security_hash = security['sources'][0]['token'] if not security_hash: message = security.get('message') if message: @@ -115,7 +135,7 @@ md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode() signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=') signed_hash = hash_prefix + padded_sign_time + signed_md5 - source = security['source']['url_parts'] + source = security['sources'][0]['url_parts'] resource_url = source['scheme'] + '://' + source['domain'] + source['path'] signed_url = '%s?h=%s&k=html5&a=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A')
{"golden_diff": "diff --git a/yt_dlp/extractor/globo.py b/yt_dlp/extractor/globo.py\n--- a/yt_dlp/extractor/globo.py\n+++ b/yt_dlp/extractor/globo.py\n@@ -12,6 +12,7 @@\n compat_str,\n )\n from ..utils import (\n+ HEADRequest,\n ExtractorError,\n float_or_none,\n orderedSet,\n@@ -67,11 +68,28 @@\n }, {\n 'url': 'globo:3607726',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://globoplay.globo.com/v/10248083/',\n+ 'info_dict': {\n+ 'id': '10248083',\n+ 'ext': 'mp4',\n+ 'title': 'Melhores momentos: Equador 1 x 1 Brasil pelas Eliminat\u00f3rias da Copa do Mundo 2022',\n+ 'duration': 530.964,\n+ 'uploader': 'SporTV',\n+ 'uploader_id': '698',\n+ },\n+ 'params': {\n+ 'skip_download': True,\n+ },\n }]\n \n def _real_extract(self, url):\n video_id = self._match_id(url)\n \n+ self._request_webpage(\n+ HEADRequest('https://globo-ab.globo.com/v2/selected-alternatives?experiments=player-isolated-experiment-02&skipImpressions=true'),\n+ video_id, 'Getting cookies')\n+\n video = self._download_json(\n 'http://api.globovideos.com/videos/%s/playlist' % video_id,\n video_id)['videos'][0]\n@@ -82,7 +100,7 @@\n \n formats = []\n security = self._download_json(\n- 'https://playback.video.globo.com/v1/video-session', video_id, 'Downloading security hash for %s' % video_id,\n+ 'https://playback.video.globo.com/v2/video-session', video_id, 'Downloading security hash for %s' % video_id,\n headers={'content-type': 'application/json'}, data=json.dumps({\n \"player_type\": \"desktop\",\n \"video_id\": video_id,\n@@ -92,7 +110,9 @@\n \"tz\": \"-3.0:00\"\n }).encode())\n \n- security_hash = security['source']['token']\n+ self._request_webpage(HEADRequest(security['sources'][0]['url_template']), video_id, 'Getting locksession cookie')\n+\n+ security_hash = security['sources'][0]['token']\n if not security_hash:\n message = security.get('message')\n if message:\n@@ -115,7 +135,7 @@\n md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode()\n signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=')\n signed_hash = hash_prefix + padded_sign_time + signed_md5\n- source = security['source']['url_parts']\n+ source = security['sources'][0]['url_parts']\n resource_url = source['scheme'] + '://' + source['domain'] + source['path']\n signed_url = '%s?h=%s&k=html5&a=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A')\n", "issue": "apt doesn't keep track of latest version\n### Checklist\r\n\r\n- [X] I'm asking a question and **not** reporting a bug/feature request\r\n- [X] I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions including closed ones\r\n\r\n### Question\r\n\r\nYesterday I came online and the first thing I did was to run `sudo apt upgrade yt-dlp`. At the time I had a version 2021-12-27 or something similar. Apt then told me I had the latest version and didn't need to upgrade,\r\n\r\nI run Linux Mint 20.3 and as it happened the gui Update Manager ran at the same time, and it told me I could upgrade to version 2022.01.21, which I did using the Upgrade Manager.\r\n\r\nI have never before experienced the Upgrade Manager telling me about an available upgrade for yt-dlp. Perhaps this is a new interaction. In any case I appreciate it since that's the most dead-sure way of catching new upgrades and being up-to-date.\r\n\r\n### Verbose log\r\n\r\n_No response_\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport base64\nimport hashlib\nimport json\nimport random\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_str,\n)\nfrom ..utils import (\n ExtractorError,\n float_or_none,\n orderedSet,\n str_or_none,\n try_get,\n)\n\n\nclass GloboIE(InfoExtractor):\n _VALID_URL = r'(?:globo:|https?://.+?\\.globo\\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\\d{7,})'\n _NETRC_MACHINE = 'globo'\n _TESTS = [{\n 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',\n 'info_dict': {\n 'id': '3607726',\n 'ext': 'mp4',\n 'title': 'Mercedes-Benz GLA passa por teste de colis\u00e3o na Europa',\n 'duration': 103.204,\n 'uploader': 'G1',\n 'uploader_id': '2015',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://globoplay.globo.com/v/4581987/',\n 'info_dict': {\n 'id': '4581987',\n 'ext': 'mp4',\n 'title': 'Acidentes de tr\u00e2nsito est\u00e3o entre as maiores causas de queda de energia em SP',\n 'duration': 137.973,\n 'uploader': 'Rede Globo',\n 'uploader_id': '196',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html',\n 'only_matching': True,\n }, {\n 'url': 'http://globosatplay.globo.com/globonews/v/4472924/',\n 'only_matching': True,\n }, {\n 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/',\n 'only_matching': True,\n }, {\n 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',\n 'only_matching': True,\n }, {\n 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html',\n 'only_matching': True,\n }, {\n 'url': 'globo:3607726',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n video = self._download_json(\n 'http://api.globovideos.com/videos/%s/playlist' % video_id,\n video_id)['videos'][0]\n if not self.get_param('allow_unplayable_formats') and video.get('encrypted') is True:\n self.report_drm(video_id)\n\n title = video['title']\n\n formats = []\n security = self._download_json(\n 'https://playback.video.globo.com/v1/video-session', video_id, 'Downloading security hash for %s' % video_id,\n headers={'content-type': 'application/json'}, data=json.dumps({\n \"player_type\": \"desktop\",\n \"video_id\": video_id,\n \"quality\": \"max\",\n \"content_protection\": \"widevine\",\n \"vsid\": \"581b986b-4c40-71f0-5a58-803e579d5fa2\",\n \"tz\": \"-3.0:00\"\n }).encode())\n\n security_hash = security['source']['token']\n if not security_hash:\n message = security.get('message')\n if message:\n raise ExtractorError(\n '%s returned error: %s' % (self.IE_NAME, message), expected=True)\n\n hash_code = security_hash[:2]\n padding = '%010d' % random.randint(1, 10000000000)\n if hash_code in ('04', '14'):\n received_time = security_hash[3:13]\n received_md5 = security_hash[24:]\n hash_prefix = security_hash[:23]\n elif hash_code in ('02', '12', '03', '13'):\n received_time = security_hash[2:12]\n received_md5 = security_hash[22:]\n padding += '1'\n hash_prefix = '05' + security_hash[:22]\n\n padded_sign_time = compat_str(int(received_time) + 86400) + padding\n md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode()\n signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=')\n signed_hash = hash_prefix + padded_sign_time + signed_md5\n source = security['source']['url_parts']\n resource_url = source['scheme'] + '://' + source['domain'] + source['path']\n signed_url = '%s?h=%s&k=html5&a=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A')\n\n formats.extend(self._extract_m3u8_formats(\n signed_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))\n self._sort_formats(formats)\n\n subtitles = {}\n for resource in video['resources']:\n if resource.get('type') == 'subtitle':\n subtitles.setdefault(resource.get('language') or 'por', []).append({\n 'url': resource.get('url'),\n })\n subs = try_get(security, lambda x: x['source']['subtitles'], expected_type=dict) or {}\n for sub_lang, sub_url in subs.items():\n if sub_url:\n subtitles.setdefault(sub_lang or 'por', []).append({\n 'url': sub_url,\n })\n subs = try_get(security, lambda x: x['source']['subtitles_webvtt'], expected_type=dict) or {}\n for sub_lang, sub_url in subs.items():\n if sub_url:\n subtitles.setdefault(sub_lang or 'por', []).append({\n 'url': sub_url,\n })\n\n duration = float_or_none(video.get('duration'), 1000)\n uploader = video.get('channel')\n uploader_id = str_or_none(video.get('channel_id'))\n\n return {\n 'id': video_id,\n 'title': title,\n 'duration': duration,\n 'uploader': uploader,\n 'uploader_id': uploader_id,\n 'formats': formats,\n 'subtitles': subtitles,\n }\n\n\nclass GloboArticleIE(InfoExtractor):\n _VALID_URL = r'https?://.+?\\.globo\\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\\.html)?'\n\n _VIDEOID_REGEXES = [\n r'\\bdata-video-id=[\"\\'](\\d{7,})',\n r'\\bdata-player-videosids=[\"\\'](\\d{7,})',\n r'\\bvideosIDs\\s*:\\s*[\"\\']?(\\d{7,})',\n r'\\bdata-id=[\"\\'](\\d{7,})',\n r'<div[^>]+\\bid=[\"\\'](\\d{7,})',\n ]\n\n _TESTS = [{\n 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',\n 'info_dict': {\n 'id': 'novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes',\n 'title': 'Novidade na fiscaliza\u00e7\u00e3o de bagagem pela Receita provoca discuss\u00f5es',\n 'description': 'md5:c3c4b4d4c30c32fce460040b1ac46b12',\n },\n 'playlist_count': 1,\n }, {\n 'url': 'http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html',\n 'info_dict': {\n 'id': 'mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato',\n 'title': \"Lula era o 'comandante m\u00e1ximo' do esquema da Lava Jato, diz MPF\",\n 'description': 'md5:8aa7cc8beda4dc71cc8553e00b77c54c',\n },\n 'playlist_count': 6,\n }, {\n 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html',\n 'only_matching': True,\n }, {\n 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html',\n 'only_matching': True,\n }, {\n 'url': 'http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271',\n 'only_matching': True,\n }]\n\n @classmethod\n def suitable(cls, url):\n return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n video_ids = []\n for video_regex in self._VIDEOID_REGEXES:\n video_ids.extend(re.findall(video_regex, webpage))\n entries = [\n self.url_result('globo:%s' % video_id, GloboIE.ie_key())\n for video_id in orderedSet(video_ids)]\n title = self._og_search_title(webpage, fatal=False)\n description = self._html_search_meta('description', webpage)\n return self.playlist_result(entries, display_id, title, description)\n", "path": "yt_dlp/extractor/globo.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport base64\nimport hashlib\nimport json\nimport random\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_str,\n)\nfrom ..utils import (\n HEADRequest,\n ExtractorError,\n float_or_none,\n orderedSet,\n str_or_none,\n try_get,\n)\n\n\nclass GloboIE(InfoExtractor):\n _VALID_URL = r'(?:globo:|https?://.+?\\.globo\\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\\d{7,})'\n _NETRC_MACHINE = 'globo'\n _TESTS = [{\n 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',\n 'info_dict': {\n 'id': '3607726',\n 'ext': 'mp4',\n 'title': 'Mercedes-Benz GLA passa por teste de colis\u00e3o na Europa',\n 'duration': 103.204,\n 'uploader': 'G1',\n 'uploader_id': '2015',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://globoplay.globo.com/v/4581987/',\n 'info_dict': {\n 'id': '4581987',\n 'ext': 'mp4',\n 'title': 'Acidentes de tr\u00e2nsito est\u00e3o entre as maiores causas de queda de energia em SP',\n 'duration': 137.973,\n 'uploader': 'Rede Globo',\n 'uploader_id': '196',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html',\n 'only_matching': True,\n }, {\n 'url': 'http://globosatplay.globo.com/globonews/v/4472924/',\n 'only_matching': True,\n }, {\n 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/',\n 'only_matching': True,\n }, {\n 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',\n 'only_matching': True,\n }, {\n 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html',\n 'only_matching': True,\n }, {\n 'url': 'globo:3607726',\n 'only_matching': True,\n }, {\n 'url': 'https://globoplay.globo.com/v/10248083/',\n 'info_dict': {\n 'id': '10248083',\n 'ext': 'mp4',\n 'title': 'Melhores momentos: Equador 1 x 1 Brasil pelas Eliminat\u00f3rias da Copa do Mundo 2022',\n 'duration': 530.964,\n 'uploader': 'SporTV',\n 'uploader_id': '698',\n },\n 'params': {\n 'skip_download': True,\n },\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n self._request_webpage(\n HEADRequest('https://globo-ab.globo.com/v2/selected-alternatives?experiments=player-isolated-experiment-02&skipImpressions=true'),\n video_id, 'Getting cookies')\n\n video = self._download_json(\n 'http://api.globovideos.com/videos/%s/playlist' % video_id,\n video_id)['videos'][0]\n if not self.get_param('allow_unplayable_formats') and video.get('encrypted') is True:\n self.report_drm(video_id)\n\n title = video['title']\n\n formats = []\n security = self._download_json(\n 'https://playback.video.globo.com/v2/video-session', video_id, 'Downloading security hash for %s' % video_id,\n headers={'content-type': 'application/json'}, data=json.dumps({\n \"player_type\": \"desktop\",\n \"video_id\": video_id,\n \"quality\": \"max\",\n \"content_protection\": \"widevine\",\n \"vsid\": \"581b986b-4c40-71f0-5a58-803e579d5fa2\",\n \"tz\": \"-3.0:00\"\n }).encode())\n\n self._request_webpage(HEADRequest(security['sources'][0]['url_template']), video_id, 'Getting locksession cookie')\n\n security_hash = security['sources'][0]['token']\n if not security_hash:\n message = security.get('message')\n if message:\n raise ExtractorError(\n '%s returned error: %s' % (self.IE_NAME, message), expected=True)\n\n hash_code = security_hash[:2]\n padding = '%010d' % random.randint(1, 10000000000)\n if hash_code in ('04', '14'):\n received_time = security_hash[3:13]\n received_md5 = security_hash[24:]\n hash_prefix = security_hash[:23]\n elif hash_code in ('02', '12', '03', '13'):\n received_time = security_hash[2:12]\n received_md5 = security_hash[22:]\n padding += '1'\n hash_prefix = '05' + security_hash[:22]\n\n padded_sign_time = compat_str(int(received_time) + 86400) + padding\n md5_data = (received_md5 + padded_sign_time + '0xAC10FD').encode()\n signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=')\n signed_hash = hash_prefix + padded_sign_time + signed_md5\n source = security['sources'][0]['url_parts']\n resource_url = source['scheme'] + '://' + source['domain'] + source['path']\n signed_url = '%s?h=%s&k=html5&a=%s' % (resource_url, signed_hash, 'F' if video.get('subscriber_only') else 'A')\n\n formats.extend(self._extract_m3u8_formats(\n signed_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))\n self._sort_formats(formats)\n\n subtitles = {}\n for resource in video['resources']:\n if resource.get('type') == 'subtitle':\n subtitles.setdefault(resource.get('language') or 'por', []).append({\n 'url': resource.get('url'),\n })\n subs = try_get(security, lambda x: x['source']['subtitles'], expected_type=dict) or {}\n for sub_lang, sub_url in subs.items():\n if sub_url:\n subtitles.setdefault(sub_lang or 'por', []).append({\n 'url': sub_url,\n })\n subs = try_get(security, lambda x: x['source']['subtitles_webvtt'], expected_type=dict) or {}\n for sub_lang, sub_url in subs.items():\n if sub_url:\n subtitles.setdefault(sub_lang or 'por', []).append({\n 'url': sub_url,\n })\n\n duration = float_or_none(video.get('duration'), 1000)\n uploader = video.get('channel')\n uploader_id = str_or_none(video.get('channel_id'))\n\n return {\n 'id': video_id,\n 'title': title,\n 'duration': duration,\n 'uploader': uploader,\n 'uploader_id': uploader_id,\n 'formats': formats,\n 'subtitles': subtitles,\n }\n\n\nclass GloboArticleIE(InfoExtractor):\n _VALID_URL = r'https?://.+?\\.globo\\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\\.html)?'\n\n _VIDEOID_REGEXES = [\n r'\\bdata-video-id=[\"\\'](\\d{7,})',\n r'\\bdata-player-videosids=[\"\\'](\\d{7,})',\n r'\\bvideosIDs\\s*:\\s*[\"\\']?(\\d{7,})',\n r'\\bdata-id=[\"\\'](\\d{7,})',\n r'<div[^>]+\\bid=[\"\\'](\\d{7,})',\n ]\n\n _TESTS = [{\n 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',\n 'info_dict': {\n 'id': 'novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes',\n 'title': 'Novidade na fiscaliza\u00e7\u00e3o de bagagem pela Receita provoca discuss\u00f5es',\n 'description': 'md5:c3c4b4d4c30c32fce460040b1ac46b12',\n },\n 'playlist_count': 1,\n }, {\n 'url': 'http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html',\n 'info_dict': {\n 'id': 'mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato',\n 'title': \"Lula era o 'comandante m\u00e1ximo' do esquema da Lava Jato, diz MPF\",\n 'description': 'md5:8aa7cc8beda4dc71cc8553e00b77c54c',\n },\n 'playlist_count': 6,\n }, {\n 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html',\n 'only_matching': True,\n }, {\n 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html',\n 'only_matching': True,\n }, {\n 'url': 'http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271',\n 'only_matching': True,\n }]\n\n @classmethod\n def suitable(cls, url):\n return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n video_ids = []\n for video_regex in self._VIDEOID_REGEXES:\n video_ids.extend(re.findall(video_regex, webpage))\n entries = [\n self.url_result('globo:%s' % video_id, GloboIE.ie_key())\n for video_id in orderedSet(video_ids)]\n title = self._og_search_title(webpage, fatal=False)\n description = self._html_search_meta('description', webpage)\n return self.playlist_result(entries, display_id, title, description)\n", "path": "yt_dlp/extractor/globo.py"}]}
3,600
794
gh_patches_debug_113
rasdani/github-patches
git_diff
elastic__apm-agent-python-1494
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [META 576] Sanitize `*auth*` instead of `authorization` [![issue details](https://img.shields.io/endpoint?label=meta-issue:&url=https%3A%2F%2Fgiss.app.elstc.co%2Fapi%2Fstatus%2Felastic%2Fapm%2F576)](https://github.com/elastic/apm/issues/576) [![issue details](https://img.shields.io/endpoint?label=spec-issue:&url=https%3A%2F%2Fgiss.app.elstc.co%2Fapi%2Fstatus%2Felastic%2Fapm%2F577)](https://github.com/elastic/apm/issues/577) Sanitize `*auth*` instead of `authorization` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/conf/constants.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 import decimal 32 import re 33 from collections import namedtuple 34 35 36 def _starmatch_to_regex(pattern): 37 """ 38 This is a duplicate of starmatch_to_regex() in utils/__init__.py 39 40 Duplication to avoid circular imports 41 """ 42 options = re.DOTALL 43 # check if we are case sensitive 44 if pattern.startswith("(?-i)"): 45 pattern = pattern[5:] 46 else: 47 options |= re.IGNORECASE 48 i, n = 0, len(pattern) 49 res = [] 50 while i < n: 51 c = pattern[i] 52 i = i + 1 53 if c == "*": 54 res.append(".*") 55 else: 56 res.append(re.escape(c)) 57 return re.compile(r"(?:%s)\Z" % "".join(res), options) 58 59 60 EVENTS_API_PATH = "intake/v2/events" 61 AGENT_CONFIG_PATH = "config/v1/agents" 62 SERVER_INFO_PATH = "" 63 64 TRACE_CONTEXT_VERSION = 0 65 TRACEPARENT_HEADER_NAME = "traceparent" 66 TRACEPARENT_LEGACY_HEADER_NAME = "elastic-apm-traceparent" 67 TRACESTATE_HEADER_NAME = "tracestate" 68 69 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" 70 71 KEYWORD_MAX_LENGTH = 1024 72 73 HTTP_WITH_BODY = {"POST", "PUT", "PATCH", "DELETE"} 74 75 MASK = "[REDACTED]" 76 77 EXCEPTION_CHAIN_MAX_DEPTH = 50 78 79 ERROR = "error" 80 TRANSACTION = "transaction" 81 SPAN = "span" 82 METRICSET = "metricset" 83 84 LABEL_RE = re.compile('[.*"]') 85 86 HARDCODED_PROCESSORS = ["elasticapm.processors.add_context_lines_to_frames"] 87 88 BASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [ 89 "password", 90 "passwd", 91 "pwd", 92 "secret", 93 "*key", 94 "*token*", 95 "*session*", 96 "*credit*", 97 "*card*", 98 "authorization", 99 "set-cookie", 100 ] 101 102 BASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED] 103 104 OUTCOME = namedtuple("OUTCOME", ["SUCCESS", "FAILURE", "UNKNOWN"])( 105 SUCCESS="success", FAILURE="failure", UNKNOWN="unknown" 106 ) 107 108 try: 109 # Python 2 110 LABEL_TYPES = (bool, int, long, float, decimal.Decimal) 111 except NameError: 112 # Python 3 113 LABEL_TYPES = (bool, int, float, decimal.Decimal) 114 115 TRACESTATE = namedtuple("TRACESTATE", ["SAMPLE_RATE"])(SAMPLE_RATE="s") 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py --- a/elasticapm/conf/constants.py +++ b/elasticapm/conf/constants.py @@ -95,7 +95,7 @@ "*session*", "*credit*", "*card*", - "authorization", + "*auth*", "set-cookie", ]
{"golden_diff": "diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py\n--- a/elasticapm/conf/constants.py\n+++ b/elasticapm/conf/constants.py\n@@ -95,7 +95,7 @@\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n- \"authorization\",\n+ \"*auth*\",\n \"set-cookie\",\n ]\n", "issue": "[META 576] Sanitize `*auth*` instead of `authorization`\n[![issue details](https://img.shields.io/endpoint?label=meta-issue:&url=https%3A%2F%2Fgiss.app.elstc.co%2Fapi%2Fstatus%2Felastic%2Fapm%2F576)](https://github.com/elastic/apm/issues/576)\n\n[![issue details](https://img.shields.io/endpoint?label=spec-issue:&url=https%3A%2F%2Fgiss.app.elstc.co%2Fapi%2Fstatus%2Felastic%2Fapm%2F577)](https://github.com/elastic/apm/issues/577)\n\nSanitize `*auth*` instead of `authorization`\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"authorization\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n", "path": "elasticapm/conf/constants.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"*auth*\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n", "path": "elasticapm/conf/constants.py"}]}
1,584
84
gh_patches_debug_39602
rasdani/github-patches
git_diff
microsoft__playwright-python-222
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running playwright raises PermissionError on Linux ### Reproducing the error - Created a virtual environment - Installed playwright with `python -m pip install playwright` Tried to run some code ```python from playwright import sync_playwright with sync_playwright() as p: for browser_type in [p.chromium, p.firefox, p.webkit]: browser = browser_type.launch() page = browser.newPage() page.goto('http://whatsmyuseragent.org/') page.screenshot(path=f'example-{browser_type.name}.png') browser.close() ``` Then it raised this error ```python PermissionError: [Errno 13] Permission denied: '/home/leno/Desktop/open-source/pwright/env/lib/python3.8/site-packages/playwright/drivers/driver-linux ``` I think this is not a normal behavior since running Python under sudo is a terrible idea. **OS**: Ubuntu 20.04 **Python Version**: 3.8.2 **Playwright Version**: 0.142.3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `playwright/path_utils.py` Content: ``` 1 import inspect 2 from pathlib import Path 3 4 5 def get_file_dirname() -> Path: 6 """Returns the callee (`__file__`) directory name""" 7 frame = inspect.stack()[1] 8 module = inspect.getmodule(frame[0]) 9 assert module 10 return Path(module.__file__).parent.absolute() 11 ``` Path: `playwright/main.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import asyncio 16 import io 17 import os 18 import stat 19 import subprocess 20 import sys 21 from pathlib import Path 22 from typing import Any 23 24 from greenlet import greenlet 25 26 from playwright.async_api import Playwright as AsyncPlaywright 27 from playwright.connection import Connection 28 from playwright.helper import Error 29 from playwright.object_factory import create_remote_object 30 from playwright.path_utils import get_file_dirname 31 from playwright.playwright import Playwright 32 from playwright.sync_api import Playwright as SyncPlaywright 33 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber 34 35 36 def compute_driver_executable() -> Path: 37 package_path = get_file_dirname() 38 platform = sys.platform 39 if platform == "darwin": 40 return package_path / "drivers" / "driver-darwin" 41 elif platform == "linux": 42 return package_path / "drivers" / "driver-linux" 43 elif platform == "win32": 44 result = package_path / "drivers" / "driver-win32-amd64.exe" 45 if result.exists(): 46 return result 47 return package_path / "drivers" / "driver-win32.exe" 48 return package_path / "drivers" / "driver-linux" 49 50 51 async def run_driver_async() -> Connection: 52 driver_executable = compute_driver_executable() 53 54 # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80 55 def _get_stderr_fileno() -> int: 56 try: 57 return sys.stderr.fileno() 58 except io.UnsupportedOperation: 59 # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. 60 # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors 61 # This is potentially dangerous, but the best we can do. 62 return sys.__stderr__.fileno() 63 64 proc = await asyncio.create_subprocess_exec( 65 str(driver_executable), 66 stdin=asyncio.subprocess.PIPE, 67 stdout=asyncio.subprocess.PIPE, 68 stderr=_get_stderr_fileno(), 69 limit=32768, 70 ) 71 assert proc.stdout 72 assert proc.stdin 73 connection = Connection( 74 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop() 75 ) 76 return connection 77 78 79 def run_driver() -> Connection: 80 loop = asyncio.get_event_loop() 81 if loop.is_running(): 82 raise Error("Can only run one Playwright at a time.") 83 return loop.run_until_complete(run_driver_async()) 84 85 86 class SyncPlaywrightContextManager: 87 def __init__(self) -> None: 88 self._connection = run_driver() 89 self._playwright: SyncPlaywright 90 91 def __enter__(self) -> SyncPlaywright: 92 g_self = greenlet.getcurrent() 93 94 def callback_wrapper(playwright_impl: Playwright) -> None: 95 self._playwright = SyncPlaywright(playwright_impl) 96 g_self.switch() 97 98 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper) 99 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync())) 100 dispatcher_fiber().switch() 101 playwright = self._playwright 102 playwright.stop = self.__exit__ # type: ignore 103 return playwright 104 105 def start(self) -> SyncPlaywright: 106 return self.__enter__() 107 108 def __exit__(self, *args: Any) -> None: 109 self._connection.stop_sync() 110 111 112 class AsyncPlaywrightContextManager: 113 def __init__(self) -> None: 114 self._connection: Connection 115 116 async def __aenter__(self) -> AsyncPlaywright: 117 self._connection = await run_driver_async() 118 self._connection.run_async() 119 playwright = AsyncPlaywright( 120 await self._connection.wait_for_object_with_known_name("Playwright") 121 ) 122 playwright.stop = self.__aexit__ # type: ignore 123 return playwright 124 125 async def start(self) -> AsyncPlaywright: 126 return await self.__aenter__() 127 128 async def __aexit__(self, *args: Any) -> None: 129 self._connection.stop_async() 130 131 132 if sys.platform == "win32": 133 # Use ProactorEventLoop in 3.7, which is default in 3.8 134 loop = asyncio.ProactorEventLoop() 135 asyncio.set_event_loop(loop) 136 137 138 def main() -> None: 139 if "install" not in sys.argv: 140 print('Run "python -m playwright install" to complete installation') 141 return 142 driver_executable = compute_driver_executable() 143 # Fix the executable bit during the installation. 144 if not sys.platform == "win32": 145 st = os.stat(driver_executable) 146 if st.st_mode & stat.S_IEXEC == 0: 147 os.chmod(driver_executable, st.st_mode | stat.S_IEXEC) 148 print("Installing the browsers...") 149 subprocess.check_call([str(driver_executable), "install"]) 150 151 print("Playwright is now ready for use") 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/playwright/main.py b/playwright/main.py --- a/playwright/main.py +++ b/playwright/main.py @@ -14,8 +14,6 @@ import asyncio import io -import os -import stat import subprocess import sys from pathlib import Path @@ -27,7 +25,7 @@ from playwright.connection import Connection from playwright.helper import Error from playwright.object_factory import create_remote_object -from playwright.path_utils import get_file_dirname +from playwright.path_utils import get_file_dirname, make_file_executable from playwright.playwright import Playwright from playwright.sync_api import Playwright as SyncPlaywright from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber @@ -37,15 +35,19 @@ package_path = get_file_dirname() platform = sys.platform if platform == "darwin": - return package_path / "drivers" / "driver-darwin" + path = package_path / "drivers" / "driver-darwin" + return make_file_executable(path) elif platform == "linux": - return package_path / "drivers" / "driver-linux" + path = package_path / "drivers" / "driver-linux" + return make_file_executable(path) elif platform == "win32": result = package_path / "drivers" / "driver-win32-amd64.exe" if result.exists(): return result return package_path / "drivers" / "driver-win32.exe" - return package_path / "drivers" / "driver-linux" + + path = package_path / "drivers" / "driver-linux" + return make_file_executable(path) async def run_driver_async() -> Connection: @@ -140,11 +142,7 @@ print('Run "python -m playwright install" to complete installation') return driver_executable = compute_driver_executable() - # Fix the executable bit during the installation. - if not sys.platform == "win32": - st = os.stat(driver_executable) - if st.st_mode & stat.S_IEXEC == 0: - os.chmod(driver_executable, st.st_mode | stat.S_IEXEC) + print("Installing the browsers...") subprocess.check_call([str(driver_executable), "install"]) diff --git a/playwright/path_utils.py b/playwright/path_utils.py --- a/playwright/path_utils.py +++ b/playwright/path_utils.py @@ -1,4 +1,5 @@ import inspect +import stat from pathlib import Path @@ -8,3 +9,9 @@ module = inspect.getmodule(frame[0]) assert module return Path(module.__file__).parent.absolute() + + +def make_file_executable(file_path: Path) -> Path: + """Makes a file executable.""" + file_path.chmod(file_path.stat().st_mode | stat.S_IEXEC) + return file_path
{"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -14,8 +14,6 @@\n \n import asyncio\n import io\n-import os\n-import stat\n import subprocess\n import sys\n from pathlib import Path\n@@ -27,7 +25,7 @@\n from playwright.connection import Connection\n from playwright.helper import Error\n from playwright.object_factory import create_remote_object\n-from playwright.path_utils import get_file_dirname\n+from playwright.path_utils import get_file_dirname, make_file_executable\n from playwright.playwright import Playwright\n from playwright.sync_api import Playwright as SyncPlaywright\n from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n@@ -37,15 +35,19 @@\n package_path = get_file_dirname()\n platform = sys.platform\n if platform == \"darwin\":\n- return package_path / \"drivers\" / \"driver-darwin\"\n+ path = package_path / \"drivers\" / \"driver-darwin\"\n+ return make_file_executable(path)\n elif platform == \"linux\":\n- return package_path / \"drivers\" / \"driver-linux\"\n+ path = package_path / \"drivers\" / \"driver-linux\"\n+ return make_file_executable(path)\n elif platform == \"win32\":\n result = package_path / \"drivers\" / \"driver-win32-amd64.exe\"\n if result.exists():\n return result\n return package_path / \"drivers\" / \"driver-win32.exe\"\n- return package_path / \"drivers\" / \"driver-linux\"\n+\n+ path = package_path / \"drivers\" / \"driver-linux\"\n+ return make_file_executable(path)\n \n \n async def run_driver_async() -> Connection:\n@@ -140,11 +142,7 @@\n print('Run \"python -m playwright install\" to complete installation')\n return\n driver_executable = compute_driver_executable()\n- # Fix the executable bit during the installation.\n- if not sys.platform == \"win32\":\n- st = os.stat(driver_executable)\n- if st.st_mode & stat.S_IEXEC == 0:\n- os.chmod(driver_executable, st.st_mode | stat.S_IEXEC)\n+\n print(\"Installing the browsers...\")\n subprocess.check_call([str(driver_executable), \"install\"])\n \ndiff --git a/playwright/path_utils.py b/playwright/path_utils.py\n--- a/playwright/path_utils.py\n+++ b/playwright/path_utils.py\n@@ -1,4 +1,5 @@\n import inspect\n+import stat\n from pathlib import Path\n \n \n@@ -8,3 +9,9 @@\n module = inspect.getmodule(frame[0])\n assert module\n return Path(module.__file__).parent.absolute()\n+\n+\n+def make_file_executable(file_path: Path) -> Path:\n+ \"\"\"Makes a file executable.\"\"\"\n+ file_path.chmod(file_path.stat().st_mode | stat.S_IEXEC)\n+ return file_path\n", "issue": "Running playwright raises PermissionError on Linux\n### Reproducing the error\r\n\r\n- Created a virtual environment \r\n- Installed playwright with `python -m pip install playwright`\r\n\r\nTried to run some code\r\n\r\n```python\r\nfrom playwright import sync_playwright\r\n\r\nwith sync_playwright() as p:\r\n for browser_type in [p.chromium, p.firefox, p.webkit]:\r\n browser = browser_type.launch()\r\n page = browser.newPage()\r\n page.goto('http://whatsmyuseragent.org/')\r\n page.screenshot(path=f'example-{browser_type.name}.png')\r\n browser.close()\r\n```\r\n\r\nThen it raised this error\r\n\r\n```python\r\nPermissionError: [Errno 13] Permission denied: '/home/leno/Desktop/open-source/pwright/env/lib/python3.8/site-packages/playwright/drivers/driver-linux\r\n```\r\n\r\nI think this is not a normal behavior since running Python under sudo is a terrible idea.\r\n\r\n**OS**: Ubuntu 20.04\r\n**Python Version**: 3.8.2\r\n**Playwright Version**: 0.142.3\r\n\r\n\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\n\n\ndef get_file_dirname() -> Path:\n \"\"\"Returns the callee (`__file__`) directory name\"\"\"\n frame = inspect.stack()[1]\n module = inspect.getmodule(frame[0])\n assert module\n return Path(module.__file__).parent.absolute()\n", "path": "playwright/path_utils.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport os\nimport stat\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_executable() -> Path:\n package_path = get_file_dirname()\n platform = sys.platform\n if platform == \"darwin\":\n return package_path / \"drivers\" / \"driver-darwin\"\n elif platform == \"linux\":\n return package_path / \"drivers\" / \"driver-linux\"\n elif platform == \"win32\":\n result = package_path / \"drivers\" / \"driver-win32-amd64.exe\"\n if result.exists():\n return result\n return package_path / \"drivers\" / \"driver-win32.exe\"\n return package_path / \"drivers\" / \"driver-linux\"\n\n\nasync def run_driver_async() -> Connection:\n driver_executable = compute_driver_executable()\n\n # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n def _get_stderr_fileno() -> int:\n try:\n return sys.stderr.fileno()\n except io.UnsupportedOperation:\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n return sys.__stderr__.fileno()\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n playwright = self._playwright\n playwright.stop = self.__exit__ # type: ignore\n return playwright\n\n def start(self) -> SyncPlaywright:\n return self.__enter__()\n\n def __exit__(self, *args: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n playwright = AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n playwright.stop = self.__aexit__ # type: ignore\n return playwright\n\n async def start(self) -> AsyncPlaywright:\n return await self.__aenter__()\n\n async def __aexit__(self, *args: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n driver_executable = compute_driver_executable()\n # Fix the executable bit during the installation.\n if not sys.platform == \"win32\":\n st = os.stat(driver_executable)\n if st.st_mode & stat.S_IEXEC == 0:\n os.chmod(driver_executable, st.st_mode | stat.S_IEXEC)\n print(\"Installing the browsers...\")\n subprocess.check_call([str(driver_executable), \"install\"])\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}], "after_files": [{"content": "import inspect\nimport stat\nfrom pathlib import Path\n\n\ndef get_file_dirname() -> Path:\n \"\"\"Returns the callee (`__file__`) directory name\"\"\"\n frame = inspect.stack()[1]\n module = inspect.getmodule(frame[0])\n assert module\n return Path(module.__file__).parent.absolute()\n\n\ndef make_file_executable(file_path: Path) -> Path:\n \"\"\"Makes a file executable.\"\"\"\n file_path.chmod(file_path.stat().st_mode | stat.S_IEXEC)\n return file_path\n", "path": "playwright/path_utils.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname, make_file_executable\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_executable() -> Path:\n package_path = get_file_dirname()\n platform = sys.platform\n if platform == \"darwin\":\n path = package_path / \"drivers\" / \"driver-darwin\"\n return make_file_executable(path)\n elif platform == \"linux\":\n path = package_path / \"drivers\" / \"driver-linux\"\n return make_file_executable(path)\n elif platform == \"win32\":\n result = package_path / \"drivers\" / \"driver-win32-amd64.exe\"\n if result.exists():\n return result\n return package_path / \"drivers\" / \"driver-win32.exe\"\n\n path = package_path / \"drivers\" / \"driver-linux\"\n return make_file_executable(path)\n\n\nasync def run_driver_async() -> Connection:\n driver_executable = compute_driver_executable()\n\n # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n def _get_stderr_fileno() -> int:\n try:\n return sys.stderr.fileno()\n except io.UnsupportedOperation:\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n return sys.__stderr__.fileno()\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n playwright = self._playwright\n playwright.stop = self.__exit__ # type: ignore\n return playwright\n\n def start(self) -> SyncPlaywright:\n return self.__enter__()\n\n def __exit__(self, *args: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n playwright = AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n playwright.stop = self.__aexit__ # type: ignore\n return playwright\n\n async def start(self) -> AsyncPlaywright:\n return await self.__aenter__()\n\n async def __aexit__(self, *args: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n driver_executable = compute_driver_executable()\n\n print(\"Installing the browsers...\")\n subprocess.check_call([str(driver_executable), \"install\"])\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}]}
2,159
652
gh_patches_debug_12070
rasdani/github-patches
git_diff
Kinto__kinto-2011
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DELETE /v1/accounts raises a 500 ``` $ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin HTTP/1.1 500 Internal Server Error Access-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length Content-Length: 177 Content-Type: application/json Date: Mon, 28 Jan 2019 20:45:56 GMT Via: 1.1 alproxy X-Content-Type-Options: nosniff ``` ``` File "/home/natim/kinto/kinto/kinto/plugins/accounts/views.py", line 221, in on_account_changed username = request.matchdict["id"] KeyError: 'id' ``` DELETE /v1/accounts raises a 500 ``` $ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin HTTP/1.1 500 Internal Server Error Access-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length Content-Length: 177 Content-Type: application/json Date: Mon, 28 Jan 2019 20:45:56 GMT Via: 1.1 alproxy X-Content-Type-Options: nosniff ``` ``` File "/home/natim/kinto/kinto/kinto/plugins/accounts/views.py", line 221, in on_account_changed username = request.matchdict["id"] KeyError: 'id' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/plugins/accounts/views.py` Content: ``` 1 import colander 2 from pyramid import httpexceptions 3 from pyramid.decorator import reify 4 from pyramid.security import Authenticated, Everyone 5 from pyramid.settings import aslist 6 from pyramid.events import subscriber 7 8 from kinto.views import NameGenerator 9 from kinto.core import resource, utils 10 from kinto.core.errors import raise_invalid, http_error 11 from kinto.core.events import ResourceChanged, ACTIONS 12 13 from .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME 14 15 16 def _extract_posted_body_id(request): 17 try: 18 # Anonymous creation with POST. 19 return request.json["data"]["id"] 20 except (ValueError, KeyError): 21 # Bad POST data. 22 if request.method.lower() == "post": 23 error_details = {"name": "data.id", "description": "data.id in body: Required"} 24 raise_invalid(request, **error_details) 25 # Anonymous GET 26 error_msg = "Cannot read accounts." 27 raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg) 28 29 30 class AccountIdGenerator(NameGenerator): 31 """Allow @ signs in account IDs.""" 32 33 regexp = r"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$" 34 35 36 class AccountSchema(resource.ResourceSchema): 37 password = colander.SchemaNode(colander.String()) 38 39 40 @resource.register() 41 class Account(resource.Resource): 42 43 schema = AccountSchema 44 45 def __init__(self, request, context): 46 # Store if current user is administrator (before accessing get_parent_id()) 47 allowed_from_settings = request.registry.settings.get("account_write_principals", []) 48 context.is_administrator = ( 49 len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0 50 ) 51 # Shortcut to check if current is anonymous (before get_parent_id()). 52 context.is_anonymous = Authenticated not in request.effective_principals 53 54 super().__init__(request, context) 55 56 # Overwrite the current principal set by Resource. 57 if self.model.current_principal == Everyone or context.is_administrator: 58 # Creation is anonymous, but author with write perm is this: 59 self.model.current_principal = f"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}" 60 61 @reify 62 def id_generator(self): 63 # This generator is used for ID validation. 64 return AccountIdGenerator() 65 66 def get_parent_id(self, request): 67 # The whole challenge here is that we want to isolate what 68 # authenticated users can list, but give access to everything to 69 # administrators. 70 # Plus when anonymous create accounts, we have to set their parent id 71 # to the same value they would obtain when authenticated. 72 if self.context.is_administrator: 73 if self.context.on_plural_endpoint: 74 # Accounts created by admin should have userid as parent. 75 if request.method.lower() == "post": 76 return _extract_posted_body_id(request) 77 else: 78 # Admin see all accounts. 79 return "*" 80 else: 81 # No pattern matching for admin on single record. 82 return request.matchdict["id"] 83 84 if not self.context.is_anonymous: 85 # Authenticated users see their own account only. 86 return request.selected_userid 87 88 # Anonymous creation with PUT. 89 if "id" in request.matchdict: 90 return request.matchdict["id"] 91 92 return _extract_posted_body_id(request) 93 94 def plural_post(self): 95 result = super(Account, self).plural_post() 96 if self.context.is_anonymous and self.request.response.status_code == 200: 97 error_details = {"message": "Account ID %r already exists" % result["data"]["id"]} 98 raise http_error(httpexceptions.HTTPForbidden(), **error_details) 99 return result 100 101 def process_object(self, new, old=None): 102 new = super(Account, self).process_object(new, old) 103 104 new["password"] = hash_password(new["password"]) 105 106 # Administrators can reach other accounts and anonymous have no 107 # selected_userid. So do not try to enforce. 108 if self.context.is_administrator or self.context.is_anonymous: 109 return new 110 111 # Do not let accounts be created without usernames. 112 if self.model.id_field not in new: 113 error_details = {"name": "data.id", "description": "Accounts must have an ID."} 114 raise_invalid(self.request, **error_details) 115 116 # Otherwise, we force the id to match the authenticated username. 117 if new[self.model.id_field] != self.request.selected_userid: 118 error_details = { 119 "name": "data.id", 120 "description": "Username and account ID do not match.", 121 } 122 raise_invalid(self.request, **error_details) 123 124 return new 125 126 127 # Clear cache on account change 128 @subscriber( 129 ResourceChanged, for_resources=("account",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE) 130 ) 131 def on_account_changed(event): 132 request = event.request 133 cache = request.registry.cache 134 settings = request.registry.settings 135 # Extract username and password from current user 136 username = request.matchdict["id"] 137 hmac_secret = settings["userid_hmac_secret"] 138 cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username)) 139 # Delete cache 140 cache.delete(cache_key) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py --- a/kinto/plugins/accounts/views.py +++ b/kinto/plugins/accounts/views.py @@ -132,9 +132,11 @@ request = event.request cache = request.registry.cache settings = request.registry.settings - # Extract username and password from current user - username = request.matchdict["id"] hmac_secret = settings["userid_hmac_secret"] - cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username)) - # Delete cache - cache.delete(cache_key) + + for obj in event.impacted_objects: + # Extract username and password from current user + username = obj["old"]["id"] + cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username)) + # Delete cache + cache.delete(cache_key)
{"golden_diff": "diff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py\n--- a/kinto/plugins/accounts/views.py\n+++ b/kinto/plugins/accounts/views.py\n@@ -132,9 +132,11 @@\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n- # Extract username and password from current user\n- username = request.matchdict[\"id\"]\n hmac_secret = settings[\"userid_hmac_secret\"]\n- cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n- # Delete cache\n- cache.delete(cache_key)\n+\n+ for obj in event.impacted_objects:\n+ # Extract username and password from current user\n+ username = obj[\"old\"][\"id\"]\n+ cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n+ # Delete cache\n+ cache.delete(cache_key)\n", "issue": "DELETE /v1/accounts raises a 500\n```\r\n$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin\r\nHTTP/1.1 500 Internal Server Error\r\nAccess-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length\r\nContent-Length: 177\r\nContent-Type: application/json\r\nDate: Mon, 28 Jan 2019 20:45:56 GMT\r\nVia: 1.1 alproxy\r\nX-Content-Type-Options: nosniff\r\n```\r\n\r\n```\r\n File \"/home/natim/kinto/kinto/kinto/plugins/accounts/views.py\", line 221, in on_account_changed\r\n username = request.matchdict[\"id\"]\r\nKeyError: 'id'\r\n```\nDELETE /v1/accounts raises a 500\n```\r\n$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin\r\nHTTP/1.1 500 Internal Server Error\r\nAccess-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length\r\nContent-Length: 177\r\nContent-Type: application/json\r\nDate: Mon, 28 Jan 2019 20:45:56 GMT\r\nVia: 1.1 alproxy\r\nX-Content-Type-Options: nosniff\r\n```\r\n\r\n```\r\n File \"/home/natim/kinto/kinto/kinto/plugins/accounts/views.py\", line 221, in on_account_changed\r\n username = request.matchdict[\"id\"]\r\nKeyError: 'id'\r\n```\n", "before_files": [{"content": "import colander\nfrom pyramid import httpexceptions\nfrom pyramid.decorator import reify\nfrom pyramid.security import Authenticated, Everyone\nfrom pyramid.settings import aslist\nfrom pyramid.events import subscriber\n\nfrom kinto.views import NameGenerator\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid, http_error\nfrom kinto.core.events import ResourceChanged, ACTIONS\n\nfrom .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME\n\n\ndef _extract_posted_body_id(request):\n try:\n # Anonymous creation with POST.\n return request.json[\"data\"][\"id\"]\n except (ValueError, KeyError):\n # Bad POST data.\n if request.method.lower() == \"post\":\n error_details = {\"name\": \"data.id\", \"description\": \"data.id in body: Required\"}\n raise_invalid(request, **error_details)\n # Anonymous GET\n error_msg = \"Cannot read accounts.\"\n raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)\n\n\nclass AccountIdGenerator(NameGenerator):\n \"\"\"Allow @ signs in account IDs.\"\"\"\n\n regexp = r\"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$\"\n\n\nclass AccountSchema(resource.ResourceSchema):\n password = colander.SchemaNode(colander.String())\n\n\[email protected]()\nclass Account(resource.Resource):\n\n schema = AccountSchema\n\n def __init__(self, request, context):\n # Store if current user is administrator (before accessing get_parent_id())\n allowed_from_settings = request.registry.settings.get(\"account_write_principals\", [])\n context.is_administrator = (\n len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0\n )\n # Shortcut to check if current is anonymous (before get_parent_id()).\n context.is_anonymous = Authenticated not in request.effective_principals\n\n super().__init__(request, context)\n\n # Overwrite the current principal set by Resource.\n if self.model.current_principal == Everyone or context.is_administrator:\n # Creation is anonymous, but author with write perm is this:\n self.model.current_principal = f\"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}\"\n\n @reify\n def id_generator(self):\n # This generator is used for ID validation.\n return AccountIdGenerator()\n\n def get_parent_id(self, request):\n # The whole challenge here is that we want to isolate what\n # authenticated users can list, but give access to everything to\n # administrators.\n # Plus when anonymous create accounts, we have to set their parent id\n # to the same value they would obtain when authenticated.\n if self.context.is_administrator:\n if self.context.on_plural_endpoint:\n # Accounts created by admin should have userid as parent.\n if request.method.lower() == \"post\":\n return _extract_posted_body_id(request)\n else:\n # Admin see all accounts.\n return \"*\"\n else:\n # No pattern matching for admin on single record.\n return request.matchdict[\"id\"]\n\n if not self.context.is_anonymous:\n # Authenticated users see their own account only.\n return request.selected_userid\n\n # Anonymous creation with PUT.\n if \"id\" in request.matchdict:\n return request.matchdict[\"id\"]\n\n return _extract_posted_body_id(request)\n\n def plural_post(self):\n result = super(Account, self).plural_post()\n if self.context.is_anonymous and self.request.response.status_code == 200:\n error_details = {\"message\": \"Account ID %r already exists\" % result[\"data\"][\"id\"]}\n raise http_error(httpexceptions.HTTPForbidden(), **error_details)\n return result\n\n def process_object(self, new, old=None):\n new = super(Account, self).process_object(new, old)\n\n new[\"password\"] = hash_password(new[\"password\"])\n\n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n if self.context.is_administrator or self.context.is_anonymous:\n return new\n\n # Do not let accounts be created without usernames.\n if self.model.id_field not in new:\n error_details = {\"name\": \"data.id\", \"description\": \"Accounts must have an ID.\"}\n raise_invalid(self.request, **error_details)\n\n # Otherwise, we force the id to match the authenticated username.\n if new[self.model.id_field] != self.request.selected_userid:\n error_details = {\n \"name\": \"data.id\",\n \"description\": \"Username and account ID do not match.\",\n }\n raise_invalid(self.request, **error_details)\n\n return new\n\n\n# Clear cache on account change\n@subscriber(\n ResourceChanged, for_resources=(\"account\",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE)\n)\ndef on_account_changed(event):\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n # Extract username and password from current user\n username = request.matchdict[\"id\"]\n hmac_secret = settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n # Delete cache\n cache.delete(cache_key)\n", "path": "kinto/plugins/accounts/views.py"}], "after_files": [{"content": "import colander\nfrom pyramid import httpexceptions\nfrom pyramid.decorator import reify\nfrom pyramid.security import Authenticated, Everyone\nfrom pyramid.settings import aslist\nfrom pyramid.events import subscriber\n\nfrom kinto.views import NameGenerator\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid, http_error\nfrom kinto.core.events import ResourceChanged, ACTIONS\n\nfrom .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME\n\n\ndef _extract_posted_body_id(request):\n try:\n # Anonymous creation with POST.\n return request.json[\"data\"][\"id\"]\n except (ValueError, KeyError):\n # Bad POST data.\n if request.method.lower() == \"post\":\n error_details = {\"name\": \"data.id\", \"description\": \"data.id in body: Required\"}\n raise_invalid(request, **error_details)\n # Anonymous GET\n error_msg = \"Cannot read accounts.\"\n raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)\n\n\nclass AccountIdGenerator(NameGenerator):\n \"\"\"Allow @ signs in account IDs.\"\"\"\n\n regexp = r\"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$\"\n\n\nclass AccountSchema(resource.ResourceSchema):\n password = colander.SchemaNode(colander.String())\n\n\[email protected]()\nclass Account(resource.Resource):\n\n schema = AccountSchema\n\n def __init__(self, request, context):\n # Store if current user is administrator (before accessing get_parent_id())\n allowed_from_settings = request.registry.settings.get(\"account_write_principals\", [])\n context.is_administrator = (\n len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0\n )\n # Shortcut to check if current is anonymous (before get_parent_id()).\n context.is_anonymous = Authenticated not in request.effective_principals\n\n super().__init__(request, context)\n\n # Overwrite the current principal set by Resource.\n if self.model.current_principal == Everyone or context.is_administrator:\n # Creation is anonymous, but author with write perm is this:\n self.model.current_principal = f\"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}\"\n\n @reify\n def id_generator(self):\n # This generator is used for ID validation.\n return AccountIdGenerator()\n\n def get_parent_id(self, request):\n # The whole challenge here is that we want to isolate what\n # authenticated users can list, but give access to everything to\n # administrators.\n # Plus when anonymous create accounts, we have to set their parent id\n # to the same value they would obtain when authenticated.\n if self.context.is_administrator:\n if self.context.on_plural_endpoint:\n # Accounts created by admin should have userid as parent.\n if request.method.lower() == \"post\":\n return _extract_posted_body_id(request)\n else:\n # Admin see all accounts.\n return \"*\"\n else:\n # No pattern matching for admin on single record.\n return request.matchdict[\"id\"]\n\n if not self.context.is_anonymous:\n # Authenticated users see their own account only.\n return request.selected_userid\n\n # Anonymous creation with PUT.\n if \"id\" in request.matchdict:\n return request.matchdict[\"id\"]\n\n return _extract_posted_body_id(request)\n\n def plural_post(self):\n result = super(Account, self).plural_post()\n if self.context.is_anonymous and self.request.response.status_code == 200:\n error_details = {\"message\": \"Account ID %r already exists\" % result[\"data\"][\"id\"]}\n raise http_error(httpexceptions.HTTPForbidden(), **error_details)\n return result\n\n def process_object(self, new, old=None):\n new = super(Account, self).process_object(new, old)\n\n new[\"password\"] = hash_password(new[\"password\"])\n\n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n if self.context.is_administrator or self.context.is_anonymous:\n return new\n\n # Do not let accounts be created without usernames.\n if self.model.id_field not in new:\n error_details = {\"name\": \"data.id\", \"description\": \"Accounts must have an ID.\"}\n raise_invalid(self.request, **error_details)\n\n # Otherwise, we force the id to match the authenticated username.\n if new[self.model.id_field] != self.request.selected_userid:\n error_details = {\n \"name\": \"data.id\",\n \"description\": \"Username and account ID do not match.\",\n }\n raise_invalid(self.request, **error_details)\n\n return new\n\n\n# Clear cache on account change\n@subscriber(\n ResourceChanged, for_resources=(\"account\",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE)\n)\ndef on_account_changed(event):\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n hmac_secret = settings[\"userid_hmac_secret\"]\n\n for obj in event.impacted_objects:\n # Extract username and password from current user\n username = obj[\"old\"][\"id\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n # Delete cache\n cache.delete(cache_key)\n", "path": "kinto/plugins/accounts/views.py"}]}
2,037
199
gh_patches_debug_39351
rasdani/github-patches
git_diff
pyodide__pyodide-1457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Version selection for packages availble both on PyPi and in Pyodide For packages not built in pyodide, version selection works as expected. For instance, ```py >>> import micropip >>> micropip.install('idna==2.9') # version before last on PyPi, package not in pyodide Installed idna >>> import idna >>> idna.__version__ 2.9 ``` However, when one specifies the version for a package available in the pyodide distribution, it is ignored and the version from pyodide is installed regardless if PyPi includes the requested version, ```py >>> import micropip >>> micropip.install('pytz==2020.1') Installed pytz >>> import pytz >>> pytz.__version__ 2019.3 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packages/micropip/micropip/micropip.py` Content: ``` 1 import asyncio 2 import hashlib 3 import importlib 4 import io 5 import json 6 from pathlib import Path 7 import zipfile 8 from typing import Dict, Any, Union, List, Tuple 9 10 from distlib import markers, util, version 11 12 # Provide stubs for testing in native python 13 try: 14 import pyodide_js 15 16 IN_BROWSER = True 17 except ImportError: 18 IN_BROWSER = False 19 20 if IN_BROWSER: 21 # In practice, this is the `site-packages` directory. 22 WHEEL_BASE = Path(__file__).parent 23 else: 24 WHEEL_BASE = Path(".") / "wheels" 25 26 if IN_BROWSER: 27 from js import fetch 28 29 async def _get_url(url): 30 resp = await fetch(url) 31 if not resp.ok: 32 raise OSError( 33 f"Request for {url} failed with status {resp.status}: {resp.statusText}" 34 ) 35 return io.BytesIO(await resp.arrayBuffer()) 36 37 38 else: 39 from urllib.request import urlopen 40 41 async def _get_url(url): 42 with urlopen(url) as fd: 43 content = fd.read() 44 return io.BytesIO(content) 45 46 47 if IN_BROWSER: 48 from asyncio import gather 49 else: 50 # asyncio.gather will schedule any coroutines to run on the event loop but 51 # we want to avoid using the event loop at all. Instead just run the 52 # coroutines in sequence. 53 async def gather(*coroutines): # type: ignore 54 result = [] 55 for coroutine in coroutines: 56 result.append(await coroutine) 57 return result 58 59 60 async def _get_pypi_json(pkgname): 61 url = f"https://pypi.org/pypi/{pkgname}/json" 62 fd = await _get_url(url) 63 return json.load(fd) 64 65 66 def _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]: 67 """Parse wheels URL and extract available metadata 68 69 See https://www.python.org/dev/peps/pep-0427/#file-name-convention 70 """ 71 file_name = Path(url).name 72 # also strip '.whl' extension. 73 wheel_name = Path(url).stem 74 tokens = wheel_name.split("-") 75 # TODO: support optional build tags in the filename (cf PEP 427) 76 if len(tokens) < 5: 77 raise ValueError(f"{file_name} is not a valid wheel file name.") 78 version, python_tag, abi_tag, platform = tokens[-4:] 79 name = "-".join(tokens[:-4]) 80 wheel = { 81 "digests": None, # checksums not available 82 "filename": file_name, 83 "packagetype": "bdist_wheel", 84 "python_version": python_tag, 85 "abi_tag": abi_tag, 86 "platform": platform, 87 "url": url, 88 } 89 90 return name, wheel, version 91 92 93 def _extract_wheel(fd): 94 with zipfile.ZipFile(fd) as zf: 95 zf.extractall(WHEEL_BASE) 96 97 98 def _validate_wheel(data, fileinfo): 99 if fileinfo.get("digests") is None: 100 # No checksums available, e.g. because installing 101 # from a different location than PyPi. 102 return 103 sha256 = fileinfo["digests"]["sha256"] 104 m = hashlib.sha256() 105 m.update(data.getvalue()) 106 if m.hexdigest() != sha256: 107 raise ValueError("Contents don't match hash") 108 109 110 async def _install_wheel(name, fileinfo): 111 url = fileinfo["url"] 112 wheel = await _get_url(url) 113 _validate_wheel(wheel, fileinfo) 114 _extract_wheel(wheel) 115 116 117 class _PackageManager: 118 version_scheme = version.get_scheme("normalized") 119 120 def __init__(self): 121 if IN_BROWSER: 122 self.builtin_packages = pyodide_js._module.packages.dependencies.to_py() 123 else: 124 self.builtin_packages = {} 125 self.installed_packages = {} 126 127 async def install(self, requirements: Union[str, List[str]], ctx=None): 128 if ctx is None: 129 ctx = {"extra": None} 130 131 complete_ctx = dict(markers.DEFAULT_CONTEXT) 132 complete_ctx.update(ctx) 133 134 if isinstance(requirements, str): 135 requirements = [requirements] 136 137 transaction: Dict[str, Any] = { 138 "wheels": [], 139 "pyodide_packages": set(), 140 "locked": dict(self.installed_packages), 141 } 142 requirement_promises = [] 143 for requirement in requirements: 144 requirement_promises.append( 145 self.add_requirement(requirement, complete_ctx, transaction) 146 ) 147 148 await gather(*requirement_promises) 149 150 wheel_promises = [] 151 152 # Install built-in packages 153 pyodide_packages = transaction["pyodide_packages"] 154 if len(pyodide_packages): 155 # Note: branch never happens in out-of-browser testing because we 156 # report that all dependencies are empty. 157 self.installed_packages.update(dict((k, None) for k in pyodide_packages)) 158 wheel_promises.append(pyodide_js.loadPackage(list(pyodide_packages))) 159 160 # Now install PyPI packages 161 for name, wheel, ver in transaction["wheels"]: 162 wheel_promises.append(_install_wheel(name, wheel)) 163 self.installed_packages[name] = ver 164 await gather(*wheel_promises) 165 return f'Installed {", ".join(self.installed_packages.keys())}' 166 167 async def add_requirement(self, requirement: str, ctx, transaction): 168 if requirement.endswith(".whl"): 169 # custom download location 170 name, wheel, version = _parse_wheel_url(requirement) 171 transaction["wheels"].append((name, wheel, version)) 172 return 173 174 req = util.parse_requirement(requirement) 175 176 # If it's a Pyodide package, use that instead of the one on PyPI 177 if req.name in self.builtin_packages: 178 transaction["pyodide_packages"].add(req.name) 179 return 180 181 if req.marker: 182 if not markers.evaluator.evaluate(req.marker, ctx): 183 return 184 185 matcher = self.version_scheme.matcher(req.requirement) 186 187 # If we already have something that will work, don't 188 # fetch again 189 for name, ver in transaction["locked"].items(): 190 if name == req.name: 191 if matcher.match(ver): 192 break 193 else: 194 raise ValueError( 195 f"Requested '{requirement}', " 196 f"but {name}=={ver} is already installed" 197 ) 198 else: 199 metadata = await _get_pypi_json(req.name) 200 wheel, ver = self.find_wheel(metadata, req) 201 transaction["locked"][req.name] = ver 202 203 recurs_reqs = metadata.get("info", {}).get("requires_dist") or [] 204 for recurs_req in recurs_reqs: 205 await self.add_requirement(recurs_req, ctx, transaction) 206 207 transaction["wheels"].append((req.name, wheel, ver)) 208 209 def find_wheel(self, metadata, req): 210 releases = [] 211 for ver, files in metadata.get("releases", {}).items(): 212 ver = self.version_scheme.suggest(ver) 213 if ver is not None: 214 releases.append((ver, files)) 215 216 def version_number(release): 217 return version.NormalizedVersion(release[0]) 218 219 releases = sorted(releases, key=version_number, reverse=True) 220 matcher = self.version_scheme.matcher(req.requirement) 221 for ver, meta in releases: 222 if matcher.match(ver): 223 for fileinfo in meta: 224 if fileinfo["filename"].endswith("py3-none-any.whl"): 225 return fileinfo, ver 226 227 raise ValueError(f"Couldn't find a pure Python 3 wheel for '{req.requirement}'") 228 229 230 # Make PACKAGE_MANAGER singleton 231 PACKAGE_MANAGER = _PackageManager() 232 del _PackageManager 233 234 235 def install(requirements: Union[str, List[str]]): 236 """Install the given package and all of its dependencies. 237 238 See :ref:`loading packages <loading_packages>` for more information. 239 240 This only works for packages that are either pure Python or for packages 241 with C extensions that are built in Pyodide. If a pure Python package is not 242 found in the Pyodide repository it will be loaded from PyPi. 243 244 Parameters 245 ---------- 246 requirements : ``str | List[str]`` 247 248 A requirement or list of requirements to install. Each requirement is a string, which should be either 249 a package name or URL to a wheel: 250 251 - If the requirement ends in ``.whl`` it will be interpreted as a URL. 252 The file must be a wheel named in compliance with the 253 `PEP 427 naming convention <https://www.python.org/dev/peps/pep-0427/#file-format>`_. 254 255 - If the requirement does not end in ``.whl``, it will interpreted as the 256 name of a package. A package by this name must either be present in the 257 Pyodide repository at `indexURL <globalThis.loadPyodide>` or on PyPi 258 259 Returns 260 ------- 261 ``Future`` 262 263 A ``Future`` that resolves to ``None`` when all packages have 264 been downloaded and installed. 265 """ 266 importlib.invalidate_caches() 267 return asyncio.ensure_future(PACKAGE_MANAGER.install(requirements)) 268 269 270 __all__ = ["install"] 271 272 273 if __name__ == "__main__": 274 install("snowballstemmer") 275 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packages/micropip/micropip/micropip.py b/packages/micropip/micropip/micropip.py --- a/packages/micropip/micropip/micropip.py +++ b/packages/micropip/micropip/micropip.py @@ -57,6 +57,14 @@ return result +if IN_BROWSER: + from pyodide_js import loadedPackages +else: + + class loadedPackages: # type: ignore + pass + + async def _get_pypi_json(pkgname): url = f"https://pypi.org/pypi/{pkgname}/json" fd = await _get_url(url) @@ -112,6 +120,7 @@ wheel = await _get_url(url) _validate_wheel(wheel, fileinfo) _extract_wheel(wheel) + setattr(loadedPackages, name, url) class _PackageManager: @@ -155,14 +164,15 @@ # Note: branch never happens in out-of-browser testing because we # report that all dependencies are empty. self.installed_packages.update(dict((k, None) for k in pyodide_packages)) - wheel_promises.append(pyodide_js.loadPackage(list(pyodide_packages))) + wheel_promises.append( + asyncio.ensure_future(pyodide_js.loadPackage(list(pyodide_packages))) + ) # Now install PyPI packages for name, wheel, ver in transaction["wheels"]: wheel_promises.append(_install_wheel(name, wheel)) self.installed_packages[name] = ver await gather(*wheel_promises) - return f'Installed {", ".join(self.installed_packages.keys())}' async def add_requirement(self, requirement: str, ctx, transaction): if requirement.endswith(".whl"): @@ -245,8 +255,8 @@ ---------- requirements : ``str | List[str]`` - A requirement or list of requirements to install. Each requirement is a string, which should be either - a package name or URL to a wheel: + A requirement or list of requirements to install. Each requirement is a + string, which should be either a package name or URL to a wheel: - If the requirement ends in ``.whl`` it will be interpreted as a URL. The file must be a wheel named in compliance with the @@ -260,8 +270,8 @@ ------- ``Future`` - A ``Future`` that resolves to ``None`` when all packages have - been downloaded and installed. + A ``Future`` that resolves to ``None`` when all packages have been + downloaded and installed. """ importlib.invalidate_caches() return asyncio.ensure_future(PACKAGE_MANAGER.install(requirements))
{"golden_diff": "diff --git a/packages/micropip/micropip/micropip.py b/packages/micropip/micropip/micropip.py\n--- a/packages/micropip/micropip/micropip.py\n+++ b/packages/micropip/micropip/micropip.py\n@@ -57,6 +57,14 @@\n return result\n \n \n+if IN_BROWSER:\n+ from pyodide_js import loadedPackages\n+else:\n+\n+ class loadedPackages: # type: ignore\n+ pass\n+\n+\n async def _get_pypi_json(pkgname):\n url = f\"https://pypi.org/pypi/{pkgname}/json\"\n fd = await _get_url(url)\n@@ -112,6 +120,7 @@\n wheel = await _get_url(url)\n _validate_wheel(wheel, fileinfo)\n _extract_wheel(wheel)\n+ setattr(loadedPackages, name, url)\n \n \n class _PackageManager:\n@@ -155,14 +164,15 @@\n # Note: branch never happens in out-of-browser testing because we\n # report that all dependencies are empty.\n self.installed_packages.update(dict((k, None) for k in pyodide_packages))\n- wheel_promises.append(pyodide_js.loadPackage(list(pyodide_packages)))\n+ wheel_promises.append(\n+ asyncio.ensure_future(pyodide_js.loadPackage(list(pyodide_packages)))\n+ )\n \n # Now install PyPI packages\n for name, wheel, ver in transaction[\"wheels\"]:\n wheel_promises.append(_install_wheel(name, wheel))\n self.installed_packages[name] = ver\n await gather(*wheel_promises)\n- return f'Installed {\", \".join(self.installed_packages.keys())}'\n \n async def add_requirement(self, requirement: str, ctx, transaction):\n if requirement.endswith(\".whl\"):\n@@ -245,8 +255,8 @@\n ----------\n requirements : ``str | List[str]``\n \n- A requirement or list of requirements to install. Each requirement is a string, which should be either\n- a package name or URL to a wheel:\n+ A requirement or list of requirements to install. Each requirement is a\n+ string, which should be either a package name or URL to a wheel:\n \n - If the requirement ends in ``.whl`` it will be interpreted as a URL.\n The file must be a wheel named in compliance with the\n@@ -260,8 +270,8 @@\n -------\n ``Future``\n \n- A ``Future`` that resolves to ``None`` when all packages have\n- been downloaded and installed.\n+ A ``Future`` that resolves to ``None`` when all packages have been\n+ downloaded and installed.\n \"\"\"\n importlib.invalidate_caches()\n return asyncio.ensure_future(PACKAGE_MANAGER.install(requirements))\n", "issue": "Version selection for packages availble both on PyPi and in Pyodide\nFor packages not built in pyodide, version selection works as expected. For instance,\r\n```py\r\n>>> import micropip\r\n>>> micropip.install('idna==2.9') # version before last on PyPi, package not in pyodide\r\nInstalled idna\r\n>>> import idna\r\n>>> idna.__version__\r\n2.9\r\n```\r\n\r\nHowever, when one specifies the version for a package available in the pyodide distribution, it is ignored and the version from pyodide is installed regardless if PyPi includes the requested version,\r\n```py\r\n>>> import micropip\r\n>>> micropip.install('pytz==2020.1')\r\nInstalled pytz\r\n>>> import pytz\r\n>>> pytz.__version__\r\n2019.3\r\n```\n", "before_files": [{"content": "import asyncio\nimport hashlib\nimport importlib\nimport io\nimport json\nfrom pathlib import Path\nimport zipfile\nfrom typing import Dict, Any, Union, List, Tuple\n\nfrom distlib import markers, util, version\n\n# Provide stubs for testing in native python\ntry:\n import pyodide_js\n\n IN_BROWSER = True\nexcept ImportError:\n IN_BROWSER = False\n\nif IN_BROWSER:\n # In practice, this is the `site-packages` directory.\n WHEEL_BASE = Path(__file__).parent\nelse:\n WHEEL_BASE = Path(\".\") / \"wheels\"\n\nif IN_BROWSER:\n from js import fetch\n\n async def _get_url(url):\n resp = await fetch(url)\n if not resp.ok:\n raise OSError(\n f\"Request for {url} failed with status {resp.status}: {resp.statusText}\"\n )\n return io.BytesIO(await resp.arrayBuffer())\n\n\nelse:\n from urllib.request import urlopen\n\n async def _get_url(url):\n with urlopen(url) as fd:\n content = fd.read()\n return io.BytesIO(content)\n\n\nif IN_BROWSER:\n from asyncio import gather\nelse:\n # asyncio.gather will schedule any coroutines to run on the event loop but\n # we want to avoid using the event loop at all. Instead just run the\n # coroutines in sequence.\n async def gather(*coroutines): # type: ignore\n result = []\n for coroutine in coroutines:\n result.append(await coroutine)\n return result\n\n\nasync def _get_pypi_json(pkgname):\n url = f\"https://pypi.org/pypi/{pkgname}/json\"\n fd = await _get_url(url)\n return json.load(fd)\n\n\ndef _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]:\n \"\"\"Parse wheels URL and extract available metadata\n\n See https://www.python.org/dev/peps/pep-0427/#file-name-convention\n \"\"\"\n file_name = Path(url).name\n # also strip '.whl' extension.\n wheel_name = Path(url).stem\n tokens = wheel_name.split(\"-\")\n # TODO: support optional build tags in the filename (cf PEP 427)\n if len(tokens) < 5:\n raise ValueError(f\"{file_name} is not a valid wheel file name.\")\n version, python_tag, abi_tag, platform = tokens[-4:]\n name = \"-\".join(tokens[:-4])\n wheel = {\n \"digests\": None, # checksums not available\n \"filename\": file_name,\n \"packagetype\": \"bdist_wheel\",\n \"python_version\": python_tag,\n \"abi_tag\": abi_tag,\n \"platform\": platform,\n \"url\": url,\n }\n\n return name, wheel, version\n\n\ndef _extract_wheel(fd):\n with zipfile.ZipFile(fd) as zf:\n zf.extractall(WHEEL_BASE)\n\n\ndef _validate_wheel(data, fileinfo):\n if fileinfo.get(\"digests\") is None:\n # No checksums available, e.g. because installing\n # from a different location than PyPi.\n return\n sha256 = fileinfo[\"digests\"][\"sha256\"]\n m = hashlib.sha256()\n m.update(data.getvalue())\n if m.hexdigest() != sha256:\n raise ValueError(\"Contents don't match hash\")\n\n\nasync def _install_wheel(name, fileinfo):\n url = fileinfo[\"url\"]\n wheel = await _get_url(url)\n _validate_wheel(wheel, fileinfo)\n _extract_wheel(wheel)\n\n\nclass _PackageManager:\n version_scheme = version.get_scheme(\"normalized\")\n\n def __init__(self):\n if IN_BROWSER:\n self.builtin_packages = pyodide_js._module.packages.dependencies.to_py()\n else:\n self.builtin_packages = {}\n self.installed_packages = {}\n\n async def install(self, requirements: Union[str, List[str]], ctx=None):\n if ctx is None:\n ctx = {\"extra\": None}\n\n complete_ctx = dict(markers.DEFAULT_CONTEXT)\n complete_ctx.update(ctx)\n\n if isinstance(requirements, str):\n requirements = [requirements]\n\n transaction: Dict[str, Any] = {\n \"wheels\": [],\n \"pyodide_packages\": set(),\n \"locked\": dict(self.installed_packages),\n }\n requirement_promises = []\n for requirement in requirements:\n requirement_promises.append(\n self.add_requirement(requirement, complete_ctx, transaction)\n )\n\n await gather(*requirement_promises)\n\n wheel_promises = []\n\n # Install built-in packages\n pyodide_packages = transaction[\"pyodide_packages\"]\n if len(pyodide_packages):\n # Note: branch never happens in out-of-browser testing because we\n # report that all dependencies are empty.\n self.installed_packages.update(dict((k, None) for k in pyodide_packages))\n wheel_promises.append(pyodide_js.loadPackage(list(pyodide_packages)))\n\n # Now install PyPI packages\n for name, wheel, ver in transaction[\"wheels\"]:\n wheel_promises.append(_install_wheel(name, wheel))\n self.installed_packages[name] = ver\n await gather(*wheel_promises)\n return f'Installed {\", \".join(self.installed_packages.keys())}'\n\n async def add_requirement(self, requirement: str, ctx, transaction):\n if requirement.endswith(\".whl\"):\n # custom download location\n name, wheel, version = _parse_wheel_url(requirement)\n transaction[\"wheels\"].append((name, wheel, version))\n return\n\n req = util.parse_requirement(requirement)\n\n # If it's a Pyodide package, use that instead of the one on PyPI\n if req.name in self.builtin_packages:\n transaction[\"pyodide_packages\"].add(req.name)\n return\n\n if req.marker:\n if not markers.evaluator.evaluate(req.marker, ctx):\n return\n\n matcher = self.version_scheme.matcher(req.requirement)\n\n # If we already have something that will work, don't\n # fetch again\n for name, ver in transaction[\"locked\"].items():\n if name == req.name:\n if matcher.match(ver):\n break\n else:\n raise ValueError(\n f\"Requested '{requirement}', \"\n f\"but {name}=={ver} is already installed\"\n )\n else:\n metadata = await _get_pypi_json(req.name)\n wheel, ver = self.find_wheel(metadata, req)\n transaction[\"locked\"][req.name] = ver\n\n recurs_reqs = metadata.get(\"info\", {}).get(\"requires_dist\") or []\n for recurs_req in recurs_reqs:\n await self.add_requirement(recurs_req, ctx, transaction)\n\n transaction[\"wheels\"].append((req.name, wheel, ver))\n\n def find_wheel(self, metadata, req):\n releases = []\n for ver, files in metadata.get(\"releases\", {}).items():\n ver = self.version_scheme.suggest(ver)\n if ver is not None:\n releases.append((ver, files))\n\n def version_number(release):\n return version.NormalizedVersion(release[0])\n\n releases = sorted(releases, key=version_number, reverse=True)\n matcher = self.version_scheme.matcher(req.requirement)\n for ver, meta in releases:\n if matcher.match(ver):\n for fileinfo in meta:\n if fileinfo[\"filename\"].endswith(\"py3-none-any.whl\"):\n return fileinfo, ver\n\n raise ValueError(f\"Couldn't find a pure Python 3 wheel for '{req.requirement}'\")\n\n\n# Make PACKAGE_MANAGER singleton\nPACKAGE_MANAGER = _PackageManager()\ndel _PackageManager\n\n\ndef install(requirements: Union[str, List[str]]):\n \"\"\"Install the given package and all of its dependencies.\n\n See :ref:`loading packages <loading_packages>` for more information.\n\n This only works for packages that are either pure Python or for packages\n with C extensions that are built in Pyodide. If a pure Python package is not\n found in the Pyodide repository it will be loaded from PyPi.\n\n Parameters\n ----------\n requirements : ``str | List[str]``\n\n A requirement or list of requirements to install. Each requirement is a string, which should be either\n a package name or URL to a wheel:\n\n - If the requirement ends in ``.whl`` it will be interpreted as a URL.\n The file must be a wheel named in compliance with the\n `PEP 427 naming convention <https://www.python.org/dev/peps/pep-0427/#file-format>`_.\n\n - If the requirement does not end in ``.whl``, it will interpreted as the\n name of a package. A package by this name must either be present in the\n Pyodide repository at `indexURL <globalThis.loadPyodide>` or on PyPi\n\n Returns\n -------\n ``Future``\n\n A ``Future`` that resolves to ``None`` when all packages have\n been downloaded and installed.\n \"\"\"\n importlib.invalidate_caches()\n return asyncio.ensure_future(PACKAGE_MANAGER.install(requirements))\n\n\n__all__ = [\"install\"]\n\n\nif __name__ == \"__main__\":\n install(\"snowballstemmer\")\n", "path": "packages/micropip/micropip/micropip.py"}], "after_files": [{"content": "import asyncio\nimport hashlib\nimport importlib\nimport io\nimport json\nfrom pathlib import Path\nimport zipfile\nfrom typing import Dict, Any, Union, List, Tuple\n\nfrom distlib import markers, util, version\n\n# Provide stubs for testing in native python\ntry:\n import pyodide_js\n\n IN_BROWSER = True\nexcept ImportError:\n IN_BROWSER = False\n\nif IN_BROWSER:\n # In practice, this is the `site-packages` directory.\n WHEEL_BASE = Path(__file__).parent\nelse:\n WHEEL_BASE = Path(\".\") / \"wheels\"\n\nif IN_BROWSER:\n from js import fetch\n\n async def _get_url(url):\n resp = await fetch(url)\n if not resp.ok:\n raise OSError(\n f\"Request for {url} failed with status {resp.status}: {resp.statusText}\"\n )\n return io.BytesIO(await resp.arrayBuffer())\n\n\nelse:\n from urllib.request import urlopen\n\n async def _get_url(url):\n with urlopen(url) as fd:\n content = fd.read()\n return io.BytesIO(content)\n\n\nif IN_BROWSER:\n from asyncio import gather\nelse:\n # asyncio.gather will schedule any coroutines to run on the event loop but\n # we want to avoid using the event loop at all. Instead just run the\n # coroutines in sequence.\n async def gather(*coroutines): # type: ignore\n result = []\n for coroutine in coroutines:\n result.append(await coroutine)\n return result\n\n\nif IN_BROWSER:\n from pyodide_js import loadedPackages\nelse:\n\n class loadedPackages: # type: ignore\n pass\n\n\nasync def _get_pypi_json(pkgname):\n url = f\"https://pypi.org/pypi/{pkgname}/json\"\n fd = await _get_url(url)\n return json.load(fd)\n\n\ndef _parse_wheel_url(url: str) -> Tuple[str, Dict[str, Any], str]:\n \"\"\"Parse wheels URL and extract available metadata\n\n See https://www.python.org/dev/peps/pep-0427/#file-name-convention\n \"\"\"\n file_name = Path(url).name\n # also strip '.whl' extension.\n wheel_name = Path(url).stem\n tokens = wheel_name.split(\"-\")\n # TODO: support optional build tags in the filename (cf PEP 427)\n if len(tokens) < 5:\n raise ValueError(f\"{file_name} is not a valid wheel file name.\")\n version, python_tag, abi_tag, platform = tokens[-4:]\n name = \"-\".join(tokens[:-4])\n wheel = {\n \"digests\": None, # checksums not available\n \"filename\": file_name,\n \"packagetype\": \"bdist_wheel\",\n \"python_version\": python_tag,\n \"abi_tag\": abi_tag,\n \"platform\": platform,\n \"url\": url,\n }\n\n return name, wheel, version\n\n\ndef _extract_wheel(fd):\n with zipfile.ZipFile(fd) as zf:\n zf.extractall(WHEEL_BASE)\n\n\ndef _validate_wheel(data, fileinfo):\n if fileinfo.get(\"digests\") is None:\n # No checksums available, e.g. because installing\n # from a different location than PyPi.\n return\n sha256 = fileinfo[\"digests\"][\"sha256\"]\n m = hashlib.sha256()\n m.update(data.getvalue())\n if m.hexdigest() != sha256:\n raise ValueError(\"Contents don't match hash\")\n\n\nasync def _install_wheel(name, fileinfo):\n url = fileinfo[\"url\"]\n wheel = await _get_url(url)\n _validate_wheel(wheel, fileinfo)\n _extract_wheel(wheel)\n setattr(loadedPackages, name, url)\n\n\nclass _PackageManager:\n version_scheme = version.get_scheme(\"normalized\")\n\n def __init__(self):\n if IN_BROWSER:\n self.builtin_packages = pyodide_js._module.packages.dependencies.to_py()\n else:\n self.builtin_packages = {}\n self.installed_packages = {}\n\n async def install(self, requirements: Union[str, List[str]], ctx=None):\n if ctx is None:\n ctx = {\"extra\": None}\n\n complete_ctx = dict(markers.DEFAULT_CONTEXT)\n complete_ctx.update(ctx)\n\n if isinstance(requirements, str):\n requirements = [requirements]\n\n transaction: Dict[str, Any] = {\n \"wheels\": [],\n \"pyodide_packages\": set(),\n \"locked\": dict(self.installed_packages),\n }\n requirement_promises = []\n for requirement in requirements:\n requirement_promises.append(\n self.add_requirement(requirement, complete_ctx, transaction)\n )\n\n await gather(*requirement_promises)\n\n wheel_promises = []\n\n # Install built-in packages\n pyodide_packages = transaction[\"pyodide_packages\"]\n if len(pyodide_packages):\n # Note: branch never happens in out-of-browser testing because we\n # report that all dependencies are empty.\n self.installed_packages.update(dict((k, None) for k in pyodide_packages))\n wheel_promises.append(\n asyncio.ensure_future(pyodide_js.loadPackage(list(pyodide_packages)))\n )\n\n # Now install PyPI packages\n for name, wheel, ver in transaction[\"wheels\"]:\n wheel_promises.append(_install_wheel(name, wheel))\n self.installed_packages[name] = ver\n await gather(*wheel_promises)\n\n async def add_requirement(self, requirement: str, ctx, transaction):\n if requirement.endswith(\".whl\"):\n # custom download location\n name, wheel, version = _parse_wheel_url(requirement)\n transaction[\"wheels\"].append((name, wheel, version))\n return\n\n req = util.parse_requirement(requirement)\n\n # If it's a Pyodide package, use that instead of the one on PyPI\n if req.name in self.builtin_packages:\n transaction[\"pyodide_packages\"].add(req.name)\n return\n\n if req.marker:\n if not markers.evaluator.evaluate(req.marker, ctx):\n return\n\n matcher = self.version_scheme.matcher(req.requirement)\n\n # If we already have something that will work, don't\n # fetch again\n for name, ver in transaction[\"locked\"].items():\n if name == req.name:\n if matcher.match(ver):\n break\n else:\n raise ValueError(\n f\"Requested '{requirement}', \"\n f\"but {name}=={ver} is already installed\"\n )\n else:\n metadata = await _get_pypi_json(req.name)\n wheel, ver = self.find_wheel(metadata, req)\n transaction[\"locked\"][req.name] = ver\n\n recurs_reqs = metadata.get(\"info\", {}).get(\"requires_dist\") or []\n for recurs_req in recurs_reqs:\n await self.add_requirement(recurs_req, ctx, transaction)\n\n transaction[\"wheels\"].append((req.name, wheel, ver))\n\n def find_wheel(self, metadata, req):\n releases = []\n for ver, files in metadata.get(\"releases\", {}).items():\n ver = self.version_scheme.suggest(ver)\n if ver is not None:\n releases.append((ver, files))\n\n def version_number(release):\n return version.NormalizedVersion(release[0])\n\n releases = sorted(releases, key=version_number, reverse=True)\n matcher = self.version_scheme.matcher(req.requirement)\n for ver, meta in releases:\n if matcher.match(ver):\n for fileinfo in meta:\n if fileinfo[\"filename\"].endswith(\"py3-none-any.whl\"):\n return fileinfo, ver\n\n raise ValueError(f\"Couldn't find a pure Python 3 wheel for '{req.requirement}'\")\n\n\n# Make PACKAGE_MANAGER singleton\nPACKAGE_MANAGER = _PackageManager()\ndel _PackageManager\n\n\ndef install(requirements: Union[str, List[str]]):\n \"\"\"Install the given package and all of its dependencies.\n\n See :ref:`loading packages <loading_packages>` for more information.\n\n This only works for packages that are either pure Python or for packages\n with C extensions that are built in Pyodide. If a pure Python package is not\n found in the Pyodide repository it will be loaded from PyPi.\n\n Parameters\n ----------\n requirements : ``str | List[str]``\n\n A requirement or list of requirements to install. Each requirement is a\n string, which should be either a package name or URL to a wheel:\n\n - If the requirement ends in ``.whl`` it will be interpreted as a URL.\n The file must be a wheel named in compliance with the\n `PEP 427 naming convention <https://www.python.org/dev/peps/pep-0427/#file-format>`_.\n\n - If the requirement does not end in ``.whl``, it will interpreted as the\n name of a package. A package by this name must either be present in the\n Pyodide repository at `indexURL <globalThis.loadPyodide>` or on PyPi\n\n Returns\n -------\n ``Future``\n\n A ``Future`` that resolves to ``None`` when all packages have been\n downloaded and installed.\n \"\"\"\n importlib.invalidate_caches()\n return asyncio.ensure_future(PACKAGE_MANAGER.install(requirements))\n\n\n__all__ = [\"install\"]\n\n\nif __name__ == \"__main__\":\n install(\"snowballstemmer\")\n", "path": "packages/micropip/micropip/micropip.py"}]}
3,200
633
gh_patches_debug_8411
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-2964
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SyntaxWarnings While running an `apt upgrade` I noticed: ``` /usr/lib/python3/dist-packages/pyqtgraph/examples/SpinBox.py:38: SyntaxWarning: invalid escape sequence '\$' regex='\$?(?P<number>(-?\d+(\.\d+)?)|(-?\.\d+))$')), ``` The `\$` should be written `\\$` or `r'\$'` since a few Python releases (same for all backslash escape that have no meanings). I don't have the time to search for other occurrences of this fact, but running the tests with `PYTHONDEVMODE=1` should help spotting them :) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyqtgraph/examples/SpinBox.py` Content: ``` 1 """ 2 This example demonstrates the SpinBox widget, which is an extension of 3 QDoubleSpinBox providing some advanced features: 4 5 * SI-prefixed units 6 * Non-linear stepping modes 7 * Bounded/unbounded values 8 9 """ 10 11 import ast 12 13 import pyqtgraph as pg 14 from pyqtgraph.Qt import QtWidgets 15 16 app = pg.mkQApp("SpinBox Example") 17 18 19 spins = [ 20 ("Floating-point spin box, min=0, no maximum.<br>Non-finite values (nan, inf) are permitted.", 21 pg.SpinBox(value=5.0, bounds=[0, None], finite=False)), 22 ("Integer spin box, dec stepping<br>(1-9, 10-90, 100-900, etc), decimals=4", 23 pg.SpinBox(value=10, int=True, dec=True, minStep=1, step=1, decimals=4)), 24 ("Float with SI-prefixed units<br>(n, u, m, k, M, etc)", 25 pg.SpinBox(value=0.9, suffix='V', siPrefix=True)), 26 ("Float with SI-prefixed units,<br>dec step=0.1, minStep=0.1", 27 pg.SpinBox(value=1.0, suffix='PSI', siPrefix=True, dec=True, step=0.1, minStep=0.1)), 28 ("Float with SI-prefixed units,<br>dec step=0.5, minStep=0.01", 29 pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.5, minStep=0.01)), 30 ("Float with SI-prefixed units,<br>dec step=1.0, minStep=0.001", 31 pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=1.0, minStep=0.001)), 32 ("Float with SI-prefixed units,<br>scaleAtZero=1e-6, step=1e-9", 33 pg.SpinBox(value=0, suffix='V', siPrefix=True, scaleAtZero=1e-6, step=1e-9)), 34 ("Float with SI prefix but no suffix", 35 pg.SpinBox(value=1e9, siPrefix=True)), 36 ("Float with custom formatting", 37 pg.SpinBox(value=23.07, format='${value:0.02f}', 38 regex='\$?(?P<number>(-?\d+(\.\d+)?)|(-?\.\d+))$')), 39 ("Int with suffix", 40 pg.SpinBox(value=999, step=1, int=True, suffix="V")), 41 ("Int with custom formatting", 42 pg.SpinBox(value=4567, step=1, int=True, bounds=[0,None], format='0x{value:X}', 43 regex='(0x)?(?P<number>[0-9a-fA-F]+)$', 44 evalFunc=lambda s: ast.literal_eval('0x'+s))), 45 ("Integer with bounds=[10, 20] and wrapping", 46 pg.SpinBox(value=10, bounds=[10, 20], int=True, minStep=1, step=1, wrapping=True)), 47 ] 48 49 50 win = QtWidgets.QMainWindow() 51 win.setWindowTitle('pyqtgraph example: SpinBox') 52 cw = QtWidgets.QWidget() 53 layout = QtWidgets.QGridLayout() 54 cw.setLayout(layout) 55 win.setCentralWidget(cw) 56 win.show() 57 #win.resize(300, 600) 58 changingLabel = QtWidgets.QLabel() ## updated immediately 59 changedLabel = QtWidgets.QLabel() ## updated only when editing is finished or mouse wheel has stopped for 0.3sec 60 changingLabel.setMinimumWidth(200) 61 font = changingLabel.font() 62 font.setBold(True) 63 font.setPointSize(14) 64 changingLabel.setFont(font) 65 changedLabel.setFont(font) 66 labels = [] 67 68 69 def valueChanged(sb): 70 changedLabel.setText("Final value: %s" % str(sb.value())) 71 72 def valueChanging(sb, value): 73 changingLabel.setText("Value changing: %s" % str(sb.value())) 74 75 76 for text, spin in spins: 77 label = QtWidgets.QLabel(text) 78 labels.append(label) 79 layout.addWidget(label) 80 layout.addWidget(spin) 81 spin.sigValueChanged.connect(valueChanged) 82 spin.sigValueChanging.connect(valueChanging) 83 84 layout.addWidget(changingLabel, 0, 1) 85 layout.addWidget(changedLabel, 2, 1) 86 87 88 #def mkWin(): 89 #win = QtWidgets.QMainWindow() 90 #g = QtWidgets.QFormLayout() 91 #w = QtWidgets.QWidget() 92 #w.setLayout(g) 93 #win.setCentralWidget(w) 94 #s1 = SpinBox(value=5, step=0.1, bounds=[-1.5, None], suffix='units') 95 #t1 = QtWidgets.QLineEdit() 96 #g.addRow(s1, t1) 97 #s2 = SpinBox(value=10e-6, dec=True, step=0.1, minStep=1e-6, suffix='A', siPrefix=True) 98 #t2 = QtWidgets.QLineEdit() 99 #g.addRow(s2, t2) 100 #s3 = SpinBox(value=1000, dec=True, step=0.5, minStep=1e-6, bounds=[1, 1e9], suffix='Hz', siPrefix=True) 101 #t3 = QtWidgets.QLineEdit() 102 #g.addRow(s3, t3) 103 #s4 = SpinBox(int=True, dec=True, step=1, minStep=1, bounds=[-10, 1000]) 104 #t4 = QtWidgets.QLineEdit() 105 #g.addRow(s4, t4) 106 107 #win.show() 108 109 #import sys 110 #for sb in [s1, s2, s3,s4]: 111 112 ##QtCore.QObject.connect(sb, QtCore.SIGNAL('valueChanged(double)'), lambda v: sys.stdout.write(str(sb) + " valueChanged\n")) 113 ##QtCore.QObject.connect(sb, QtCore.SIGNAL('editingFinished()'), lambda: sys.stdout.write(str(sb) + " editingFinished\n")) 114 #sb.sigValueChanged.connect(valueChanged) 115 #sb.sigValueChanging.connect(valueChanging) 116 #sb.editingFinished.connect(lambda: sys.stdout.write(str(sb) + " editingFinished\n")) 117 #return win, w, [s1, s2, s3, s4] 118 #a = mkWin() 119 120 121 #def test(n=100): 122 #for i in range(n): 123 #win, w, sb = mkWin() 124 #for s in sb: 125 #w.setParent(None) 126 #s.setParent(None) 127 #s.valueChanged.disconnect() 128 #s.editingFinished.disconnect() 129 130 131 if __name__ == '__main__': 132 pg.exec() 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyqtgraph/examples/SpinBox.py b/pyqtgraph/examples/SpinBox.py --- a/pyqtgraph/examples/SpinBox.py +++ b/pyqtgraph/examples/SpinBox.py @@ -35,7 +35,7 @@ pg.SpinBox(value=1e9, siPrefix=True)), ("Float with custom formatting", pg.SpinBox(value=23.07, format='${value:0.02f}', - regex='\$?(?P<number>(-?\d+(\.\d+)?)|(-?\.\d+))$')), + regex = r'\$?(?P<number>(-?\d+(\.\d+)?)|(-?\.\d+))$')), ("Int with suffix", pg.SpinBox(value=999, step=1, int=True, suffix="V")), ("Int with custom formatting",
{"golden_diff": "diff --git a/pyqtgraph/examples/SpinBox.py b/pyqtgraph/examples/SpinBox.py\n--- a/pyqtgraph/examples/SpinBox.py\n+++ b/pyqtgraph/examples/SpinBox.py\n@@ -35,7 +35,7 @@\n pg.SpinBox(value=1e9, siPrefix=True)),\n (\"Float with custom formatting\", \n pg.SpinBox(value=23.07, format='${value:0.02f}',\n- regex='\\$?(?P<number>(-?\\d+(\\.\\d+)?)|(-?\\.\\d+))$')),\n+ regex = r'\\$?(?P<number>(-?\\d+(\\.\\d+)?)|(-?\\.\\d+))$')),\n (\"Int with suffix\",\n pg.SpinBox(value=999, step=1, int=True, suffix=\"V\")),\n (\"Int with custom formatting\",\n", "issue": "SyntaxWarnings\nWhile running an `apt upgrade` I noticed:\r\n\r\n```\r\n/usr/lib/python3/dist-packages/pyqtgraph/examples/SpinBox.py:38: SyntaxWarning: invalid escape sequence '\\$'\r\n regex='\\$?(?P<number>(-?\\d+(\\.\\d+)?)|(-?\\.\\d+))$')),\r\n```\r\n\r\nThe `\\$` should be written `\\\\$` or `r'\\$'` since a few Python releases (same for all backslash escape that have no meanings). I don't have the time to search for other occurrences of this fact, but running the tests with `PYTHONDEVMODE=1` should help spotting them :)\n", "before_files": [{"content": "\"\"\"\nThis example demonstrates the SpinBox widget, which is an extension of \nQDoubleSpinBox providing some advanced features:\n\n * SI-prefixed units\n * Non-linear stepping modes\n * Bounded/unbounded values\n\n\"\"\"\n\nimport ast\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtWidgets\n\napp = pg.mkQApp(\"SpinBox Example\")\n\n\nspins = [\n (\"Floating-point spin box, min=0, no maximum.<br>Non-finite values (nan, inf) are permitted.\",\n pg.SpinBox(value=5.0, bounds=[0, None], finite=False)),\n (\"Integer spin box, dec stepping<br>(1-9, 10-90, 100-900, etc), decimals=4\", \n pg.SpinBox(value=10, int=True, dec=True, minStep=1, step=1, decimals=4)),\n (\"Float with SI-prefixed units<br>(n, u, m, k, M, etc)\", \n pg.SpinBox(value=0.9, suffix='V', siPrefix=True)),\n (\"Float with SI-prefixed units,<br>dec step=0.1, minStep=0.1\", \n pg.SpinBox(value=1.0, suffix='PSI', siPrefix=True, dec=True, step=0.1, minStep=0.1)),\n (\"Float with SI-prefixed units,<br>dec step=0.5, minStep=0.01\", \n pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.5, minStep=0.01)),\n (\"Float with SI-prefixed units,<br>dec step=1.0, minStep=0.001\", \n pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=1.0, minStep=0.001)),\n (\"Float with SI-prefixed units,<br>scaleAtZero=1e-6, step=1e-9\",\n pg.SpinBox(value=0, suffix='V', siPrefix=True, scaleAtZero=1e-6, step=1e-9)),\n (\"Float with SI prefix but no suffix\",\n pg.SpinBox(value=1e9, siPrefix=True)),\n (\"Float with custom formatting\", \n pg.SpinBox(value=23.07, format='${value:0.02f}',\n regex='\\$?(?P<number>(-?\\d+(\\.\\d+)?)|(-?\\.\\d+))$')),\n (\"Int with suffix\",\n pg.SpinBox(value=999, step=1, int=True, suffix=\"V\")),\n (\"Int with custom formatting\", \n pg.SpinBox(value=4567, step=1, int=True, bounds=[0,None], format='0x{value:X}', \n regex='(0x)?(?P<number>[0-9a-fA-F]+)$',\n evalFunc=lambda s: ast.literal_eval('0x'+s))),\n (\"Integer with bounds=[10, 20] and wrapping\",\n pg.SpinBox(value=10, bounds=[10, 20], int=True, minStep=1, step=1, wrapping=True)),\n]\n\n\nwin = QtWidgets.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: SpinBox')\ncw = QtWidgets.QWidget()\nlayout = QtWidgets.QGridLayout()\ncw.setLayout(layout)\nwin.setCentralWidget(cw)\nwin.show()\n#win.resize(300, 600)\nchangingLabel = QtWidgets.QLabel() ## updated immediately\nchangedLabel = QtWidgets.QLabel() ## updated only when editing is finished or mouse wheel has stopped for 0.3sec\nchangingLabel.setMinimumWidth(200)\nfont = changingLabel.font()\nfont.setBold(True)\nfont.setPointSize(14)\nchangingLabel.setFont(font)\nchangedLabel.setFont(font)\nlabels = []\n\n\ndef valueChanged(sb):\n changedLabel.setText(\"Final value: %s\" % str(sb.value()))\n\ndef valueChanging(sb, value):\n changingLabel.setText(\"Value changing: %s\" % str(sb.value()))\n\n \nfor text, spin in spins:\n label = QtWidgets.QLabel(text)\n labels.append(label)\n layout.addWidget(label)\n layout.addWidget(spin)\n spin.sigValueChanged.connect(valueChanged)\n spin.sigValueChanging.connect(valueChanging)\n\nlayout.addWidget(changingLabel, 0, 1)\nlayout.addWidget(changedLabel, 2, 1)\n\n\n#def mkWin():\n #win = QtWidgets.QMainWindow()\n #g = QtWidgets.QFormLayout()\n #w = QtWidgets.QWidget()\n #w.setLayout(g)\n #win.setCentralWidget(w)\n #s1 = SpinBox(value=5, step=0.1, bounds=[-1.5, None], suffix='units')\n #t1 = QtWidgets.QLineEdit()\n #g.addRow(s1, t1)\n #s2 = SpinBox(value=10e-6, dec=True, step=0.1, minStep=1e-6, suffix='A', siPrefix=True)\n #t2 = QtWidgets.QLineEdit()\n #g.addRow(s2, t2)\n #s3 = SpinBox(value=1000, dec=True, step=0.5, minStep=1e-6, bounds=[1, 1e9], suffix='Hz', siPrefix=True)\n #t3 = QtWidgets.QLineEdit()\n #g.addRow(s3, t3)\n #s4 = SpinBox(int=True, dec=True, step=1, minStep=1, bounds=[-10, 1000])\n #t4 = QtWidgets.QLineEdit()\n #g.addRow(s4, t4)\n\n #win.show()\n\n #import sys\n #for sb in [s1, s2, s3,s4]:\n\n ##QtCore.QObject.connect(sb, QtCore.SIGNAL('valueChanged(double)'), lambda v: sys.stdout.write(str(sb) + \" valueChanged\\n\"))\n ##QtCore.QObject.connect(sb, QtCore.SIGNAL('editingFinished()'), lambda: sys.stdout.write(str(sb) + \" editingFinished\\n\"))\n #sb.sigValueChanged.connect(valueChanged)\n #sb.sigValueChanging.connect(valueChanging)\n #sb.editingFinished.connect(lambda: sys.stdout.write(str(sb) + \" editingFinished\\n\"))\n #return win, w, [s1, s2, s3, s4]\n#a = mkWin()\n\n\n#def test(n=100):\n #for i in range(n):\n #win, w, sb = mkWin()\n #for s in sb:\n #w.setParent(None)\n #s.setParent(None)\n #s.valueChanged.disconnect()\n #s.editingFinished.disconnect()\n\n\nif __name__ == '__main__':\n pg.exec()\n", "path": "pyqtgraph/examples/SpinBox.py"}], "after_files": [{"content": "\"\"\"\nThis example demonstrates the SpinBox widget, which is an extension of \nQDoubleSpinBox providing some advanced features:\n\n * SI-prefixed units\n * Non-linear stepping modes\n * Bounded/unbounded values\n\n\"\"\"\n\nimport ast\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtWidgets\n\napp = pg.mkQApp(\"SpinBox Example\")\n\n\nspins = [\n (\"Floating-point spin box, min=0, no maximum.<br>Non-finite values (nan, inf) are permitted.\",\n pg.SpinBox(value=5.0, bounds=[0, None], finite=False)),\n (\"Integer spin box, dec stepping<br>(1-9, 10-90, 100-900, etc), decimals=4\", \n pg.SpinBox(value=10, int=True, dec=True, minStep=1, step=1, decimals=4)),\n (\"Float with SI-prefixed units<br>(n, u, m, k, M, etc)\", \n pg.SpinBox(value=0.9, suffix='V', siPrefix=True)),\n (\"Float with SI-prefixed units,<br>dec step=0.1, minStep=0.1\", \n pg.SpinBox(value=1.0, suffix='PSI', siPrefix=True, dec=True, step=0.1, minStep=0.1)),\n (\"Float with SI-prefixed units,<br>dec step=0.5, minStep=0.01\", \n pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.5, minStep=0.01)),\n (\"Float with SI-prefixed units,<br>dec step=1.0, minStep=0.001\", \n pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=1.0, minStep=0.001)),\n (\"Float with SI-prefixed units,<br>scaleAtZero=1e-6, step=1e-9\",\n pg.SpinBox(value=0, suffix='V', siPrefix=True, scaleAtZero=1e-6, step=1e-9)),\n (\"Float with SI prefix but no suffix\",\n pg.SpinBox(value=1e9, siPrefix=True)),\n (\"Float with custom formatting\", \n pg.SpinBox(value=23.07, format='${value:0.02f}',\n regex = r'\\$?(?P<number>(-?\\d+(\\.\\d+)?)|(-?\\.\\d+))$')),\n (\"Int with suffix\",\n pg.SpinBox(value=999, step=1, int=True, suffix=\"V\")),\n (\"Int with custom formatting\", \n pg.SpinBox(value=4567, step=1, int=True, bounds=[0,None], format='0x{value:X}', \n regex='(0x)?(?P<number>[0-9a-fA-F]+)$',\n evalFunc=lambda s: ast.literal_eval('0x'+s))),\n (\"Integer with bounds=[10, 20] and wrapping\",\n pg.SpinBox(value=10, bounds=[10, 20], int=True, minStep=1, step=1, wrapping=True)),\n]\n\n\nwin = QtWidgets.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: SpinBox')\ncw = QtWidgets.QWidget()\nlayout = QtWidgets.QGridLayout()\ncw.setLayout(layout)\nwin.setCentralWidget(cw)\nwin.show()\n#win.resize(300, 600)\nchangingLabel = QtWidgets.QLabel() ## updated immediately\nchangedLabel = QtWidgets.QLabel() ## updated only when editing is finished or mouse wheel has stopped for 0.3sec\nchangingLabel.setMinimumWidth(200)\nfont = changingLabel.font()\nfont.setBold(True)\nfont.setPointSize(14)\nchangingLabel.setFont(font)\nchangedLabel.setFont(font)\nlabels = []\n\n\ndef valueChanged(sb):\n changedLabel.setText(\"Final value: %s\" % str(sb.value()))\n\ndef valueChanging(sb, value):\n changingLabel.setText(\"Value changing: %s\" % str(sb.value()))\n\n \nfor text, spin in spins:\n label = QtWidgets.QLabel(text)\n labels.append(label)\n layout.addWidget(label)\n layout.addWidget(spin)\n spin.sigValueChanged.connect(valueChanged)\n spin.sigValueChanging.connect(valueChanging)\n\nlayout.addWidget(changingLabel, 0, 1)\nlayout.addWidget(changedLabel, 2, 1)\n\n\n#def mkWin():\n #win = QtWidgets.QMainWindow()\n #g = QtWidgets.QFormLayout()\n #w = QtWidgets.QWidget()\n #w.setLayout(g)\n #win.setCentralWidget(w)\n #s1 = SpinBox(value=5, step=0.1, bounds=[-1.5, None], suffix='units')\n #t1 = QtWidgets.QLineEdit()\n #g.addRow(s1, t1)\n #s2 = SpinBox(value=10e-6, dec=True, step=0.1, minStep=1e-6, suffix='A', siPrefix=True)\n #t2 = QtWidgets.QLineEdit()\n #g.addRow(s2, t2)\n #s3 = SpinBox(value=1000, dec=True, step=0.5, minStep=1e-6, bounds=[1, 1e9], suffix='Hz', siPrefix=True)\n #t3 = QtWidgets.QLineEdit()\n #g.addRow(s3, t3)\n #s4 = SpinBox(int=True, dec=True, step=1, minStep=1, bounds=[-10, 1000])\n #t4 = QtWidgets.QLineEdit()\n #g.addRow(s4, t4)\n\n #win.show()\n\n #import sys\n #for sb in [s1, s2, s3,s4]:\n\n ##QtCore.QObject.connect(sb, QtCore.SIGNAL('valueChanged(double)'), lambda v: sys.stdout.write(str(sb) + \" valueChanged\\n\"))\n ##QtCore.QObject.connect(sb, QtCore.SIGNAL('editingFinished()'), lambda: sys.stdout.write(str(sb) + \" editingFinished\\n\"))\n #sb.sigValueChanged.connect(valueChanged)\n #sb.sigValueChanging.connect(valueChanging)\n #sb.editingFinished.connect(lambda: sys.stdout.write(str(sb) + \" editingFinished\\n\"))\n #return win, w, [s1, s2, s3, s4]\n#a = mkWin()\n\n\n#def test(n=100):\n #for i in range(n):\n #win, w, sb = mkWin()\n #for s in sb:\n #w.setParent(None)\n #s.setParent(None)\n #s.valueChanged.disconnect()\n #s.editingFinished.disconnect()\n\n\nif __name__ == '__main__':\n pg.exec()\n", "path": "pyqtgraph/examples/SpinBox.py"}]}
2,189
189
gh_patches_debug_27192
rasdani/github-patches
git_diff
nipy__nipype-3154
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Test on Python 3.8 ### Summary Python 3.8 has been out for a month or so, and upstream libraries are mostly providing wheels. We should start 3.8 tests when it's feasible. Necessary steps: * [ ] Update `.travis.yml` to use Python 3.8 * [ ] Note any failures, identify whether they exist on our end or in a dependency * [ ] Find blocking upstream issues and link to them so we can track * [ ] When tests pass and urgent `FutureWarning`/`DeprecationWarning`s are dealt with, update the classifiers in `nipype/info.py` to indicate we support Python 3.8. I marked this [![good-first-issue](https://img.shields.io/github/labels/nipy/nipype/good-first-issue)](https://github.com/nipy/nipype/labels/good-first-issue) but it's less for a new developer than someone looking to get more involved in project maintenance. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nipype/info.py` Content: ``` 1 """ This file contains defines parameters for nipy that we use to fill 2 settings in setup.py, the nipy top-level docstring, and for building the 3 docs. In setup.py in particular, we exec this file, so it cannot import nipy 4 """ 5 6 # nipype version information 7 # Remove -dev for release 8 __version__ = "1.5.0-rc1.post-dev" 9 10 11 def get_nipype_gitversion(): 12 """Nipype version as reported by the last commit in git 13 14 Returns 15 ------- 16 None or str 17 Version of Nipype according to git. 18 """ 19 import os 20 import subprocess 21 22 try: 23 import nipype 24 25 gitpath = os.path.realpath( 26 os.path.join(os.path.dirname(nipype.__file__), os.path.pardir) 27 ) 28 except: 29 gitpath = os.getcwd() 30 gitpathgit = os.path.join(gitpath, ".git") 31 if not os.path.exists(gitpathgit): 32 return None 33 ver = None 34 try: 35 o, _ = subprocess.Popen( 36 "git describe", shell=True, cwd=gitpath, stdout=subprocess.PIPE 37 ).communicate() 38 except Exception: 39 pass 40 else: 41 ver = o.decode().strip().split("-")[-1] 42 return ver 43 44 45 if __version__.endswith("-dev"): 46 gitversion = get_nipype_gitversion() 47 if gitversion: 48 __version__ = "{}+{}".format(__version__, gitversion) 49 50 CLASSIFIERS = [ 51 "Development Status :: 5 - Production/Stable", 52 "Environment :: Console", 53 "Intended Audience :: Science/Research", 54 "License :: OSI Approved :: Apache Software License", 55 "Operating System :: MacOS :: MacOS X", 56 "Operating System :: POSIX :: Linux", 57 "Programming Language :: Python :: 3.6", 58 "Programming Language :: Python :: 3.7", 59 "Topic :: Scientific/Engineering", 60 ] 61 PYTHON_REQUIRES = ">= 3.6" 62 63 description = "Neuroimaging in Python: Pipelines and Interfaces" 64 65 # Note: this long_description is actually a copy/paste from the top-level 66 # README.txt, so that it shows up nicely on PyPI. So please remember to edit 67 # it only in one place and sync it correctly. 68 long_description = """======================================================== 69 NIPYPE: Neuroimaging in Python: Pipelines and Interfaces 70 ======================================================== 71 72 Current neuroimaging software offer users an incredible opportunity to 73 analyze data using a variety of different algorithms. However, this has 74 resulted in a heterogeneous collection of specialized applications 75 without transparent interoperability or a uniform operating interface. 76 77 *Nipype*, an open-source, community-developed initiative under the 78 umbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a 79 uniform interface to existing neuroimaging software and facilitates interaction 80 between these packages within a single workflow. Nipype provides an environment 81 that encourages interactive exploration of algorithms from different 82 packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE, 83 MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and 84 between packages, and reduces the learning curve necessary to use different \ 85 packages. Nipype is creating a collaborative platform for neuroimaging \ 86 software development in a high-level language and addressing limitations of \ 87 existing pipeline systems. 88 89 *Nipype* allows you to: 90 91 * easily interact with tools from different software packages 92 * combine processing steps from different software packages 93 * develop new workflows faster by reusing common steps from old ones 94 * process data faster by running it in parallel on many cores/machines 95 * make your research easily reproducible 96 * share your processing workflows with the community 97 """ 98 99 # versions 100 NIBABEL_MIN_VERSION = "2.1.0" 101 NETWORKX_MIN_VERSION = "1.9" 102 NUMPY_MIN_VERSION = "1.13" 103 # Numpy bug in python 3.7: 104 # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html 105 NUMPY_MIN_VERSION_37 = "1.15.3" 106 SCIPY_MIN_VERSION = "0.14" 107 TRAITS_MIN_VERSION = "4.6" 108 DATEUTIL_MIN_VERSION = "2.2" 109 FUTURE_MIN_VERSION = "0.16.0" 110 SIMPLEJSON_MIN_VERSION = "3.8.0" 111 PROV_VERSION = "1.5.2" 112 CLICK_MIN_VERSION = "6.6.0" 113 PYDOT_MIN_VERSION = "1.2.3" 114 115 NAME = "nipype" 116 MAINTAINER = "nipype developers" 117 MAINTAINER_EMAIL = "[email protected]" 118 DESCRIPTION = description 119 LONG_DESCRIPTION = long_description 120 URL = "http://nipy.org/nipype" 121 DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master" 122 LICENSE = "Apache License, 2.0" 123 AUTHOR = "nipype developers" 124 AUTHOR_EMAIL = "[email protected]" 125 PLATFORMS = "OS Independent" 126 MAJOR = __version__.split(".")[0] 127 MINOR = __version__.split(".")[1] 128 MICRO = __version__.replace("-", ".").split(".")[2] 129 ISRELEASE = ( 130 len(__version__.replace("-", ".").split(".")) == 3 131 or "post" in __version__.replace("-", ".").split(".")[-1] 132 ) 133 VERSION = __version__ 134 PROVIDES = ["nipype"] 135 REQUIRES = [ 136 "click>=%s" % CLICK_MIN_VERSION, 137 "networkx>=%s" % NETWORKX_MIN_VERSION, 138 "nibabel>=%s" % NIBABEL_MIN_VERSION, 139 'numpy>=%s ; python_version < "3.7"' % NUMPY_MIN_VERSION, 140 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37, 141 "packaging", 142 "prov>=%s" % PROV_VERSION, 143 "pydot>=%s" % PYDOT_MIN_VERSION, 144 "pydotplus", 145 "python-dateutil>=%s" % DATEUTIL_MIN_VERSION, 146 "scipy>=%s" % SCIPY_MIN_VERSION, 147 "simplejson>=%s" % SIMPLEJSON_MIN_VERSION, 148 "traits>=%s,!=5.0" % TRAITS_MIN_VERSION, 149 "filelock>=3.0.0", 150 "etelemetry>=0.2.0", 151 ] 152 153 # neurdflib has to come after prov 154 # https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 155 REQUIRES += ["neurdflib"] 156 157 TESTS_REQUIRES = [ 158 "codecov", 159 "coverage<5", 160 "pytest", 161 "pytest-cov", 162 "pytest-env", 163 "pytest-timeout", 164 ] 165 166 EXTRA_REQUIRES = { 167 "data": ["datalad"], 168 "doc": [ 169 "dipy", 170 "ipython", 171 "matplotlib", 172 "nbsphinx", 173 "sphinx-argparse", 174 "sphinx>=2.1.2", 175 "sphinxcontrib-apidoc", 176 "sphinxcontrib-napoleon", 177 ], 178 "duecredit": ["duecredit"], 179 "nipy": ["nitime", "nilearn<0.5.0", "dipy", "nipy", "matplotlib"], 180 "profiler": ["psutil>=5.0"], 181 "pybids": ["pybids>=0.7.0"], 182 "specs": ["black"], 183 "ssh": ["paramiko"], 184 "tests": TESTS_REQUIRES, 185 "xvfbwrapper": ["xvfbwrapper"], 186 # 'mesh': ['mayavi'] # Enable when it works 187 } 188 189 190 def _list_union(iterable): 191 return list(set(sum(iterable, []))) 192 193 194 # Enable a handle to install all extra dependencies at once 195 EXTRA_REQUIRES["all"] = _list_union(EXTRA_REQUIRES.values()) 196 # dev = doc + tests + specs 197 EXTRA_REQUIRES["dev"] = _list_union( 198 val for key, val in EXTRA_REQUIRES.items() if key in ("doc", "tests", "specs") 199 ) 200 201 STATUS = "stable" 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nipype/info.py b/nipype/info.py --- a/nipype/info.py +++ b/nipype/info.py @@ -56,6 +56,7 @@ "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering", ] PYTHON_REQUIRES = ">= 3.6" @@ -109,6 +110,7 @@ FUTURE_MIN_VERSION = "0.16.0" SIMPLEJSON_MIN_VERSION = "3.8.0" PROV_VERSION = "1.5.2" +RDFLIB_MIN_VERSION = "5.0.0" CLICK_MIN_VERSION = "6.6.0" PYDOT_MIN_VERSION = "1.2.3" @@ -143,6 +145,7 @@ "pydot>=%s" % PYDOT_MIN_VERSION, "pydotplus", "python-dateutil>=%s" % DATEUTIL_MIN_VERSION, + "rdflib>=%s" % RDFLIB_MIN_VERSION, "scipy>=%s" % SCIPY_MIN_VERSION, "simplejson>=%s" % SIMPLEJSON_MIN_VERSION, "traits>=%s,!=5.0" % TRAITS_MIN_VERSION, @@ -150,10 +153,6 @@ "etelemetry>=0.2.0", ] -# neurdflib has to come after prov -# https://github.com/nipy/nipype/pull/2961#issuecomment-512035484 -REQUIRES += ["neurdflib"] - TESTS_REQUIRES = [ "codecov", "coverage<5",
{"golden_diff": "diff --git a/nipype/info.py b/nipype/info.py\n--- a/nipype/info.py\n+++ b/nipype/info.py\n@@ -56,6 +56,7 @@\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n ]\n PYTHON_REQUIRES = \">= 3.6\"\n@@ -109,6 +110,7 @@\n FUTURE_MIN_VERSION = \"0.16.0\"\n SIMPLEJSON_MIN_VERSION = \"3.8.0\"\n PROV_VERSION = \"1.5.2\"\n+RDFLIB_MIN_VERSION = \"5.0.0\"\n CLICK_MIN_VERSION = \"6.6.0\"\n PYDOT_MIN_VERSION = \"1.2.3\"\n \n@@ -143,6 +145,7 @@\n \"pydot>=%s\" % PYDOT_MIN_VERSION,\n \"pydotplus\",\n \"python-dateutil>=%s\" % DATEUTIL_MIN_VERSION,\n+ \"rdflib>=%s\" % RDFLIB_MIN_VERSION,\n \"scipy>=%s\" % SCIPY_MIN_VERSION,\n \"simplejson>=%s\" % SIMPLEJSON_MIN_VERSION,\n \"traits>=%s,!=5.0\" % TRAITS_MIN_VERSION,\n@@ -150,10 +153,6 @@\n \"etelemetry>=0.2.0\",\n ]\n \n-# neurdflib has to come after prov\n-# https://github.com/nipy/nipype/pull/2961#issuecomment-512035484\n-REQUIRES += [\"neurdflib\"]\n-\n TESTS_REQUIRES = [\n \"codecov\",\n \"coverage<5\",\n", "issue": "Test on Python 3.8\n### Summary\r\n\r\nPython 3.8 has been out for a month or so, and upstream libraries are mostly providing wheels. We should start 3.8 tests when it's feasible.\r\n\r\nNecessary steps:\r\n\r\n* [ ] Update `.travis.yml` to use Python 3.8\r\n* [ ] Note any failures, identify whether they exist on our end or in a dependency\r\n* [ ] Find blocking upstream issues and link to them so we can track\r\n* [ ] When tests pass and urgent `FutureWarning`/`DeprecationWarning`s are dealt with, update the classifiers in `nipype/info.py` to indicate we support Python 3.8.\r\n\r\nI marked this [![good-first-issue](https://img.shields.io/github/labels/nipy/nipype/good-first-issue)](https://github.com/nipy/nipype/labels/good-first-issue) but it's less for a new developer than someone looking to get more involved in project maintenance.\n", "before_files": [{"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\n\n# nipype version information\n# Remove -dev for release\n__version__ = \"1.5.0-rc1.post-dev\"\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n\n try:\n import nipype\n\n gitpath = os.path.realpath(\n os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)\n )\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, \".git\")\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen(\n \"git describe\", shell=True, cwd=gitpath, stdout=subprocess.PIPE\n ).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split(\"-\")[-1]\n return ver\n\n\nif __version__.endswith(\"-dev\"):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = \"{}+{}\".format(__version__, gitversion)\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n]\nPYTHON_REQUIRES = \">= 3.6\"\n\ndescription = \"Neuroimaging in Python: Pipelines and Interfaces\"\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to\nanalyze data using a variety of different algorithms. However, this has\nresulted in a heterogeneous collection of specialized applications\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the\numbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a\nuniform interface to existing neuroimaging software and facilitates interaction\nbetween these packages within a single workflow. Nipype provides an environment\nthat encourages interactive exploration of algorithms from different\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging \\\nsoftware development in a high-level language and addressing limitations of \\\nexisting pipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = \"2.1.0\"\nNETWORKX_MIN_VERSION = \"1.9\"\nNUMPY_MIN_VERSION = \"1.13\"\n# Numpy bug in python 3.7:\n# https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html\nNUMPY_MIN_VERSION_37 = \"1.15.3\"\nSCIPY_MIN_VERSION = \"0.14\"\nTRAITS_MIN_VERSION = \"4.6\"\nDATEUTIL_MIN_VERSION = \"2.2\"\nFUTURE_MIN_VERSION = \"0.16.0\"\nSIMPLEJSON_MIN_VERSION = \"3.8.0\"\nPROV_VERSION = \"1.5.2\"\nCLICK_MIN_VERSION = \"6.6.0\"\nPYDOT_MIN_VERSION = \"1.2.3\"\n\nNAME = \"nipype\"\nMAINTAINER = \"nipype developers\"\nMAINTAINER_EMAIL = \"[email protected]\"\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = \"http://nipy.org/nipype\"\nDOWNLOAD_URL = \"http://github.com/nipy/nipype/archives/master\"\nLICENSE = \"Apache License, 2.0\"\nAUTHOR = \"nipype developers\"\nAUTHOR_EMAIL = \"[email protected]\"\nPLATFORMS = \"OS Independent\"\nMAJOR = __version__.split(\".\")[0]\nMINOR = __version__.split(\".\")[1]\nMICRO = __version__.replace(\"-\", \".\").split(\".\")[2]\nISRELEASE = (\n len(__version__.replace(\"-\", \".\").split(\".\")) == 3\n or \"post\" in __version__.replace(\"-\", \".\").split(\".\")[-1]\n)\nVERSION = __version__\nPROVIDES = [\"nipype\"]\nREQUIRES = [\n \"click>=%s\" % CLICK_MIN_VERSION,\n \"networkx>=%s\" % NETWORKX_MIN_VERSION,\n \"nibabel>=%s\" % NIBABEL_MIN_VERSION,\n 'numpy>=%s ; python_version < \"3.7\"' % NUMPY_MIN_VERSION,\n 'numpy>=%s ; python_version >= \"3.7\"' % NUMPY_MIN_VERSION_37,\n \"packaging\",\n \"prov>=%s\" % PROV_VERSION,\n \"pydot>=%s\" % PYDOT_MIN_VERSION,\n \"pydotplus\",\n \"python-dateutil>=%s\" % DATEUTIL_MIN_VERSION,\n \"scipy>=%s\" % SCIPY_MIN_VERSION,\n \"simplejson>=%s\" % SIMPLEJSON_MIN_VERSION,\n \"traits>=%s,!=5.0\" % TRAITS_MIN_VERSION,\n \"filelock>=3.0.0\",\n \"etelemetry>=0.2.0\",\n]\n\n# neurdflib has to come after prov\n# https://github.com/nipy/nipype/pull/2961#issuecomment-512035484\nREQUIRES += [\"neurdflib\"]\n\nTESTS_REQUIRES = [\n \"codecov\",\n \"coverage<5\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-timeout\",\n]\n\nEXTRA_REQUIRES = {\n \"data\": [\"datalad\"],\n \"doc\": [\n \"dipy\",\n \"ipython\",\n \"matplotlib\",\n \"nbsphinx\",\n \"sphinx-argparse\",\n \"sphinx>=2.1.2\",\n \"sphinxcontrib-apidoc\",\n \"sphinxcontrib-napoleon\",\n ],\n \"duecredit\": [\"duecredit\"],\n \"nipy\": [\"nitime\", \"nilearn<0.5.0\", \"dipy\", \"nipy\", \"matplotlib\"],\n \"profiler\": [\"psutil>=5.0\"],\n \"pybids\": [\"pybids>=0.7.0\"],\n \"specs\": [\"black\"],\n \"ssh\": [\"paramiko\"],\n \"tests\": TESTS_REQUIRES,\n \"xvfbwrapper\": [\"xvfbwrapper\"],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n\ndef _list_union(iterable):\n return list(set(sum(iterable, [])))\n\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES[\"all\"] = _list_union(EXTRA_REQUIRES.values())\n# dev = doc + tests + specs\nEXTRA_REQUIRES[\"dev\"] = _list_union(\n val for key, val in EXTRA_REQUIRES.items() if key in (\"doc\", \"tests\", \"specs\")\n)\n\nSTATUS = \"stable\"\n", "path": "nipype/info.py"}], "after_files": [{"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\n\n# nipype version information\n# Remove -dev for release\n__version__ = \"1.5.0-rc1.post-dev\"\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n\n try:\n import nipype\n\n gitpath = os.path.realpath(\n os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)\n )\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, \".git\")\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen(\n \"git describe\", shell=True, cwd=gitpath, stdout=subprocess.PIPE\n ).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split(\"-\")[-1]\n return ver\n\n\nif __version__.endswith(\"-dev\"):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = \"{}+{}\".format(__version__, gitversion)\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n]\nPYTHON_REQUIRES = \">= 3.6\"\n\ndescription = \"Neuroimaging in Python: Pipelines and Interfaces\"\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to\nanalyze data using a variety of different algorithms. However, this has\nresulted in a heterogeneous collection of specialized applications\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the\numbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a\nuniform interface to existing neuroimaging software and facilitates interaction\nbetween these packages within a single workflow. Nipype provides an environment\nthat encourages interactive exploration of algorithms from different\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging \\\nsoftware development in a high-level language and addressing limitations of \\\nexisting pipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = \"2.1.0\"\nNETWORKX_MIN_VERSION = \"1.9\"\nNUMPY_MIN_VERSION = \"1.13\"\n# Numpy bug in python 3.7:\n# https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html\nNUMPY_MIN_VERSION_37 = \"1.15.3\"\nSCIPY_MIN_VERSION = \"0.14\"\nTRAITS_MIN_VERSION = \"4.6\"\nDATEUTIL_MIN_VERSION = \"2.2\"\nFUTURE_MIN_VERSION = \"0.16.0\"\nSIMPLEJSON_MIN_VERSION = \"3.8.0\"\nPROV_VERSION = \"1.5.2\"\nRDFLIB_MIN_VERSION = \"5.0.0\"\nCLICK_MIN_VERSION = \"6.6.0\"\nPYDOT_MIN_VERSION = \"1.2.3\"\n\nNAME = \"nipype\"\nMAINTAINER = \"nipype developers\"\nMAINTAINER_EMAIL = \"[email protected]\"\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = \"http://nipy.org/nipype\"\nDOWNLOAD_URL = \"http://github.com/nipy/nipype/archives/master\"\nLICENSE = \"Apache License, 2.0\"\nAUTHOR = \"nipype developers\"\nAUTHOR_EMAIL = \"[email protected]\"\nPLATFORMS = \"OS Independent\"\nMAJOR = __version__.split(\".\")[0]\nMINOR = __version__.split(\".\")[1]\nMICRO = __version__.replace(\"-\", \".\").split(\".\")[2]\nISRELEASE = (\n len(__version__.replace(\"-\", \".\").split(\".\")) == 3\n or \"post\" in __version__.replace(\"-\", \".\").split(\".\")[-1]\n)\nVERSION = __version__\nPROVIDES = [\"nipype\"]\nREQUIRES = [\n \"click>=%s\" % CLICK_MIN_VERSION,\n \"networkx>=%s\" % NETWORKX_MIN_VERSION,\n \"nibabel>=%s\" % NIBABEL_MIN_VERSION,\n 'numpy>=%s ; python_version < \"3.7\"' % NUMPY_MIN_VERSION,\n 'numpy>=%s ; python_version >= \"3.7\"' % NUMPY_MIN_VERSION_37,\n \"packaging\",\n \"prov>=%s\" % PROV_VERSION,\n \"pydot>=%s\" % PYDOT_MIN_VERSION,\n \"pydotplus\",\n \"python-dateutil>=%s\" % DATEUTIL_MIN_VERSION,\n \"rdflib>=%s\" % RDFLIB_MIN_VERSION,\n \"scipy>=%s\" % SCIPY_MIN_VERSION,\n \"simplejson>=%s\" % SIMPLEJSON_MIN_VERSION,\n \"traits>=%s,!=5.0\" % TRAITS_MIN_VERSION,\n \"filelock>=3.0.0\",\n \"etelemetry>=0.2.0\",\n]\n\nTESTS_REQUIRES = [\n \"codecov\",\n \"coverage<5\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-timeout\",\n]\n\nEXTRA_REQUIRES = {\n \"data\": [\"datalad\"],\n \"doc\": [\n \"dipy\",\n \"ipython\",\n \"matplotlib\",\n \"nbsphinx\",\n \"sphinx-argparse\",\n \"sphinx>=2.1.2\",\n \"sphinxcontrib-apidoc\",\n \"sphinxcontrib-napoleon\",\n ],\n \"duecredit\": [\"duecredit\"],\n \"nipy\": [\"nitime\", \"nilearn<0.5.0\", \"dipy\", \"nipy\", \"matplotlib\"],\n \"profiler\": [\"psutil>=5.0\"],\n \"pybids\": [\"pybids>=0.7.0\"],\n \"specs\": [\"black\"],\n \"ssh\": [\"paramiko\"],\n \"tests\": TESTS_REQUIRES,\n \"xvfbwrapper\": [\"xvfbwrapper\"],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n\ndef _list_union(iterable):\n return list(set(sum(iterable, [])))\n\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES[\"all\"] = _list_union(EXTRA_REQUIRES.values())\n# dev = doc + tests + specs\nEXTRA_REQUIRES[\"dev\"] = _list_union(\n val for key, val in EXTRA_REQUIRES.items() if key in (\"doc\", \"tests\", \"specs\")\n)\n\nSTATUS = \"stable\"\n", "path": "nipype/info.py"}]}
2,768
410
gh_patches_debug_26194
rasdani/github-patches
git_diff
streamlink__streamlink-95
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Connectcast stream fails with "invalid url" Attempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error: `error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)` Similarly, using `http://connectcast.tv/streamname` for the url also fails. Running on Windows, built with python 3.5.0rc2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/connectcast.py` Content: ``` 1 import re 2 import json 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http, validate 6 from streamlink.stream import HDSStream 7 8 SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf" 9 10 _url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/") 11 _manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*") 12 13 14 class ConnectCast(Plugin): 15 @classmethod 16 def can_handle_url(self, url): 17 return _url_re.match(url) 18 19 def _get_streams(self): 20 res = http.get(self.url) 21 match = _manifest_re.search(res.text) 22 manifest = match.group(1) 23 streams = {} 24 streams.update( 25 HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL) 26 ) 27 28 return streams 29 30 __plugin__ = ConnectCast 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py --- a/src/streamlink/plugins/connectcast.py +++ b/src/streamlink/plugins/connectcast.py @@ -3,13 +3,11 @@ from streamlink.plugin import Plugin from streamlink.plugin.api import http, validate -from streamlink.stream import HDSStream - -SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf" - -_url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/") -_manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*") +from streamlink.stream import RTMPStream +_url_re = re.compile(r"http(?:s)?://connectcast.tv/(\w+)?") +_stream_re = re.compile(r'<video src="mp4:(.*?)"') +_stream_url = "http://connectcast.tv/channel/stream/{channel}" class ConnectCast(Plugin): @classmethod @@ -17,14 +15,15 @@ return _url_re.match(url) def _get_streams(self): - res = http.get(self.url) - match = _manifest_re.search(res.text) - manifest = match.group(1) - streams = {} - streams.update( - HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL) - ) - - return streams + url_match = _url_re.match(self.url) + stream_url = _stream_url.format(channel=url_match.group(1)) + res = self.session.http.get(stream_url) + match = _stream_re.search(res.content) + if match: + params = dict(rtmp="rtmp://stream.connectcast.tv/live", + playpath=match.group(1), + live=True) + + return dict(live=RTMPStream(self.session, params)) __plugin__ = ConnectCast
{"golden_diff": "diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py\n--- a/src/streamlink/plugins/connectcast.py\n+++ b/src/streamlink/plugins/connectcast.py\n@@ -3,13 +3,11 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, validate\n-from streamlink.stream import HDSStream\n-\n-SWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n-\n-_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n-_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n+from streamlink.stream import RTMPStream\n \n+_url_re = re.compile(r\"http(?:s)?://connectcast.tv/(\\w+)?\")\n+_stream_re = re.compile(r'<video src=\"mp4:(.*?)\"')\n+_stream_url = \"http://connectcast.tv/channel/stream/{channel}\"\n \n class ConnectCast(Plugin):\n @classmethod\n@@ -17,14 +15,15 @@\n return _url_re.match(url)\n \n def _get_streams(self):\n- res = http.get(self.url)\n- match = _manifest_re.search(res.text)\n- manifest = match.group(1)\n- streams = {}\n- streams.update(\n- HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n- )\n- \n- return streams\n+ url_match = _url_re.match(self.url)\n+ stream_url = _stream_url.format(channel=url_match.group(1))\n+ res = self.session.http.get(stream_url)\n+ match = _stream_re.search(res.content)\n+ if match:\n+ params = dict(rtmp=\"rtmp://stream.connectcast.tv/live\",\n+ playpath=match.group(1),\n+ live=True)\n+\n+ return dict(live=RTMPStream(self.session, params))\n \n __plugin__ = ConnectCast\n", "issue": "Connectcast stream fails with \"invalid url\"\nAttempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error:\n`error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)`\n\nSimilarly, using `http://connectcast.tv/streamname` for the url also fails.\n\nRunning on Windows, built with python 3.5.0rc2\n\n", "before_files": [{"content": "import re\nimport json\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import HDSStream\n\nSWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n\n_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n\n\nclass ConnectCast(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _manifest_re.search(res.text)\n manifest = match.group(1)\n streams = {}\n streams.update(\n HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n )\n \n return streams\n\n__plugin__ = ConnectCast\n", "path": "src/streamlink/plugins/connectcast.py"}], "after_files": [{"content": "import re\nimport json\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"http(?:s)?://connectcast.tv/(\\w+)?\")\n_stream_re = re.compile(r'<video src=\"mp4:(.*?)\"')\n_stream_url = \"http://connectcast.tv/channel/stream/{channel}\"\n\nclass ConnectCast(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n url_match = _url_re.match(self.url)\n stream_url = _stream_url.format(channel=url_match.group(1))\n res = self.session.http.get(stream_url)\n match = _stream_re.search(res.content)\n if match:\n params = dict(rtmp=\"rtmp://stream.connectcast.tv/live\",\n playpath=match.group(1),\n live=True)\n\n return dict(live=RTMPStream(self.session, params))\n\n__plugin__ = ConnectCast\n", "path": "src/streamlink/plugins/connectcast.py"}]}
605
426
gh_patches_debug_25428
rasdani/github-patches
git_diff
scikit-image__scikit-image-3210
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `watershed_demo` fails on mouse click ## Description ``` [egor@host scikit-image]$ python viewer_examples/plugins/watershed_demo.py Watershed plugin ---------------- Use mouse to paint each region with a different label. Press OK to display segmented image. Traceback (most recent call last): File "/home/egor/.local/lib/python3.6/site-packages/matplotlib/cbook/__init__.py", line 388, in process proxy(*args, **kwargs) File "/home/egor/.local/lib/python3.6/site-packages/matplotlib/cbook/__init__.py", line 228, in __call__ return mtd(*args, **kwargs) File "/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/utils/canvas.py", line 75, in on_mouse_press self.active_tool.on_mouse_press(event) File "/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/canvastools/painttool.py", line 149, in on_mouse_press self.update_overlay(event.xdata, event.ydata) File "/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/canvastools/painttool.py", line 172, in update_overlay overlay[self.window.at(y, x)] = self.label TypeError: slice indices must be integers or None or have an __index__ method ``` ## Way to reproduce [If reporting a bug, please include the following important information:] - [x] Code example - [x] Relevant images (if any) - [x] Operating system and version: `Linux 4.14.49-1-lts #1 SMP Tue Jun 12 16:32:50 CEST 2018 x86_64 GNU/Linux` - [x] Python version: 3.6.5 - [x] scikit-image version (run `skimage.__version__`): https://github.com/scikit-image/scikit-image/commit/18f97d864a9468555851aac08c731b6813db2091 - [x] matplotlib: 2.2.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `viewer_examples/plugins/watershed_demo.py` Content: ``` 1 import matplotlib.pyplot as plt 2 3 from skimage import data 4 from skimage import filters 5 from skimage import morphology 6 from skimage.viewer import ImageViewer 7 from skimage.viewer.widgets import history 8 from skimage.viewer.plugins.labelplugin import LabelPainter 9 10 11 class OKCancelButtons(history.OKCancelButtons): 12 13 def update_original_image(self): 14 # OKCancelButtons updates the original image with the filtered image 15 # by default. Override this method to update the overlay. 16 self.plugin._show_watershed() 17 self.plugin.close() 18 19 20 class WatershedPlugin(LabelPainter): 21 22 def help(self): 23 helpstr = ("Watershed plugin", 24 "----------------", 25 "Use mouse to paint each region with a different label.", 26 "Press OK to display segmented image.") 27 return '\n'.join(helpstr) 28 29 def _show_watershed(self): 30 viewer = self.image_viewer 31 edge_image = filter.sobel(viewer.image) 32 labels = morphology.watershed(edge_image, self.paint_tool.overlay) 33 viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5) 34 viewer.redraw() 35 36 37 image = data.coins() 38 plugin = WatershedPlugin() 39 plugin += OKCancelButtons() 40 41 viewer = ImageViewer(image) 42 viewer += plugin 43 viewer.show() 44 ``` Path: `skimage/viewer/canvastools/painttool.py` Content: ``` 1 import numpy as np 2 import matplotlib.pyplot as plt 3 import matplotlib.colors as mcolors 4 LABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold', 5 'greenyellow', 'blueviolet']) 6 from ...viewer.canvastools.base import CanvasToolBase 7 8 9 __all__ = ['PaintTool'] 10 11 12 class PaintTool(CanvasToolBase): 13 """Widget for painting on top of a plot. 14 15 Parameters 16 ---------- 17 manager : Viewer or PlotPlugin. 18 Skimage viewer or plot plugin object. 19 overlay_shape : shape tuple 20 2D shape tuple used to initialize overlay image. 21 alpha : float (between [0, 1]) 22 Opacity of overlay 23 on_move : function 24 Function called whenever a control handle is moved. 25 This function must accept the end points of line as the only argument. 26 on_release : function 27 Function called whenever the control handle is released. 28 on_enter : function 29 Function called whenever the "enter" key is pressed. 30 rect_props : dict 31 Properties for :class:`matplotlib.patches.Rectangle`. This class 32 redefines defaults in :class:`matplotlib.widgets.RectangleSelector`. 33 34 Attributes 35 ---------- 36 overlay : array 37 Overlay of painted labels displayed on top of image. 38 label : int 39 Current paint color. 40 41 Examples 42 ---------- 43 >>> from skimage.data import camera 44 >>> import matplotlib.pyplot as plt 45 >>> from skimage.viewer.canvastools import PaintTool 46 >>> import numpy as np 47 48 >>> img = camera() #doctest: +SKIP 49 50 >>> ax = plt.subplot(111) #doctest: +SKIP 51 >>> plt.imshow(img, cmap=plt.cm.gray) #doctest: +SKIP 52 >>> p = PaintTool(ax,np.shape(img[:-1]),10,0.2) #doctest: +SKIP 53 >>> plt.show() #doctest: +SKIP 54 55 >>> mask = p.overlay #doctest: +SKIP 56 >>> plt.imshow(mask,cmap=plt.cm.gray) #doctest: +SKIP 57 >>> plt.show() #doctest: +SKIP 58 """ 59 def __init__(self, manager, overlay_shape, radius=5, alpha=0.3, 60 on_move=None, on_release=None, on_enter=None, 61 rect_props=None): 62 super(PaintTool, self).__init__(manager, on_move=on_move, 63 on_enter=on_enter, 64 on_release=on_release) 65 66 props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True) 67 props.update(rect_props if rect_props is not None else {}) 68 69 self.alpha = alpha 70 self.cmap = LABELS_CMAP 71 self._overlay_plot = None 72 self.shape = overlay_shape 73 74 self._cursor = plt.Rectangle((0, 0), 0, 0, **props) 75 self._cursor.set_visible(False) 76 self.ax.add_patch(self._cursor) 77 78 # `label` and `radius` can only be set after initializing `_cursor` 79 self.label = 1 80 self.radius = radius 81 82 # Note that the order is important: Redraw cursor *after* overlay 83 self.artists = [self._overlay_plot, self._cursor] 84 self.manager.add_tool(self) 85 86 @property 87 def label(self): 88 return self._label 89 90 @label.setter 91 def label(self, value): 92 if value >= self.cmap.N: 93 raise ValueError('Maximum label value = %s' % len(self.cmap - 1)) 94 self._label = value 95 self._cursor.set_edgecolor(self.cmap(value)) 96 97 @property 98 def radius(self): 99 return self._radius 100 101 @radius.setter 102 def radius(self, r): 103 self._radius = r 104 self._width = 2 * r + 1 105 self._cursor.set_width(self._width) 106 self._cursor.set_height(self._width) 107 self.window = CenteredWindow(r, self._shape) 108 109 @property 110 def overlay(self): 111 return self._overlay 112 113 @overlay.setter 114 def overlay(self, image): 115 self._overlay = image 116 if image is None: 117 self.ax.images.remove(self._overlay_plot) 118 self._overlay_plot = None 119 elif self._overlay_plot is None: 120 props = dict(cmap=self.cmap, alpha=self.alpha, 121 norm=mcolors.NoNorm(), animated=True) 122 self._overlay_plot = self.ax.imshow(image, **props) 123 else: 124 self._overlay_plot.set_data(image) 125 self.redraw() 126 127 @property 128 def shape(self): 129 return self._shape 130 131 @shape.setter 132 def shape(self, shape): 133 self._shape = shape 134 if not self._overlay_plot is None: 135 self._overlay_plot.set_extent((-0.5, shape[1] + 0.5, 136 shape[0] + 0.5, -0.5)) 137 self.radius = self._radius 138 self.overlay = np.zeros(shape, dtype='uint8') 139 140 def on_key_press(self, event): 141 if event.key == 'enter': 142 self.callback_on_enter(self.geometry) 143 self.redraw() 144 145 def on_mouse_press(self, event): 146 if event.button != 1 or not self.ax.in_axes(event): 147 return 148 self.update_cursor(event.xdata, event.ydata) 149 self.update_overlay(event.xdata, event.ydata) 150 151 def on_mouse_release(self, event): 152 if event.button != 1: 153 return 154 self.callback_on_release(self.geometry) 155 156 def on_move(self, event): 157 if not self.ax.in_axes(event): 158 self._cursor.set_visible(False) 159 self.redraw() # make sure cursor is not visible 160 return 161 self._cursor.set_visible(True) 162 163 self.update_cursor(event.xdata, event.ydata) 164 if event.button != 1: 165 self.redraw() # update cursor position 166 return 167 self.update_overlay(event.xdata, event.ydata) 168 self.callback_on_move(self.geometry) 169 170 def update_overlay(self, x, y): 171 overlay = self.overlay 172 overlay[self.window.at(y, x)] = self.label 173 # Note that overlay calls `redraw` 174 self.overlay = overlay 175 176 def update_cursor(self, x, y): 177 x = x - self.radius - 1 178 y = y - self.radius - 1 179 self._cursor.set_xy((x, y)) 180 181 @property 182 def geometry(self): 183 return self.overlay 184 185 186 class CenteredWindow(object): 187 """Window that create slices numpy arrays over 2D windows. 188 189 Examples 190 -------- 191 >>> a = np.arange(16).reshape(4, 4) 192 >>> w = CenteredWindow(1, a.shape) 193 >>> a[w.at(1, 1)] 194 array([[ 0, 1, 2], 195 [ 4, 5, 6], 196 [ 8, 9, 10]]) 197 >>> a[w.at(0, 0)] 198 array([[0, 1], 199 [4, 5]]) 200 >>> a[w.at(4, 3)] 201 array([[14, 15]]) 202 """ 203 def __init__(self, radius, array_shape): 204 self.radius = radius 205 self.array_shape = array_shape 206 207 def at(self, row, col): 208 h, w = self.array_shape 209 r = self.radius 210 xmin = max(0, col - r) 211 xmax = min(w, col + r + 1) 212 ymin = max(0, row - r) 213 ymax = min(h, row + r + 1) 214 return [slice(ymin, ymax), slice(xmin, xmax)] 215 216 217 if __name__ == '__main__': # pragma: no cover 218 np.testing.rundocs() 219 from ... import data 220 from ...viewer import ImageViewer 221 222 image = data.camera() 223 224 viewer = ImageViewer(image) 225 paint_tool = PaintTool(viewer, image.shape) 226 viewer.show() 227 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/viewer/canvastools/painttool.py b/skimage/viewer/canvastools/painttool.py --- a/skimage/viewer/canvastools/painttool.py +++ b/skimage/viewer/canvastools/painttool.py @@ -206,12 +206,21 @@ def at(self, row, col): h, w = self.array_shape - r = self.radius + r = round(self.radius) + # Note: the int() cast is necessary because row and col are np.float64, + # which does not get cast by round(), unlike a normal Python float: + # >>> round(4.5) + # 4 + # >>> round(np.float64(4.5)) + # 4.0 + # >>> int(round(np.float64(4.5))) + # 4 + row, col = int(round(row)), int(round(col)) xmin = max(0, col - r) xmax = min(w, col + r + 1) ymin = max(0, row - r) ymax = min(h, row + r + 1) - return [slice(ymin, ymax), slice(xmin, xmax)] + return (slice(ymin, ymax), slice(xmin, xmax)) if __name__ == '__main__': # pragma: no cover diff --git a/viewer_examples/plugins/watershed_demo.py b/viewer_examples/plugins/watershed_demo.py --- a/viewer_examples/plugins/watershed_demo.py +++ b/viewer_examples/plugins/watershed_demo.py @@ -28,7 +28,7 @@ def _show_watershed(self): viewer = self.image_viewer - edge_image = filter.sobel(viewer.image) + edge_image = filters.sobel(viewer.image) labels = morphology.watershed(edge_image, self.paint_tool.overlay) viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5) viewer.redraw()
{"golden_diff": "diff --git a/skimage/viewer/canvastools/painttool.py b/skimage/viewer/canvastools/painttool.py\n--- a/skimage/viewer/canvastools/painttool.py\n+++ b/skimage/viewer/canvastools/painttool.py\n@@ -206,12 +206,21 @@\n \n def at(self, row, col):\n h, w = self.array_shape\n- r = self.radius\n+ r = round(self.radius)\n+ # Note: the int() cast is necessary because row and col are np.float64,\n+ # which does not get cast by round(), unlike a normal Python float:\n+ # >>> round(4.5)\n+ # 4\n+ # >>> round(np.float64(4.5))\n+ # 4.0\n+ # >>> int(round(np.float64(4.5)))\n+ # 4\n+ row, col = int(round(row)), int(round(col))\n xmin = max(0, col - r)\n xmax = min(w, col + r + 1)\n ymin = max(0, row - r)\n ymax = min(h, row + r + 1)\n- return [slice(ymin, ymax), slice(xmin, xmax)]\n+ return (slice(ymin, ymax), slice(xmin, xmax))\n \n \n if __name__ == '__main__': # pragma: no cover\ndiff --git a/viewer_examples/plugins/watershed_demo.py b/viewer_examples/plugins/watershed_demo.py\n--- a/viewer_examples/plugins/watershed_demo.py\n+++ b/viewer_examples/plugins/watershed_demo.py\n@@ -28,7 +28,7 @@\n \n def _show_watershed(self):\n viewer = self.image_viewer\n- edge_image = filter.sobel(viewer.image)\n+ edge_image = filters.sobel(viewer.image)\n labels = morphology.watershed(edge_image, self.paint_tool.overlay)\n viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)\n viewer.redraw()\n", "issue": "`watershed_demo` fails on mouse click\n## Description\r\n```\r\n[egor@host scikit-image]$ python viewer_examples/plugins/watershed_demo.py \r\nWatershed plugin\r\n----------------\r\nUse mouse to paint each region with a different label.\r\nPress OK to display segmented image.\r\nTraceback (most recent call last):\r\n File \"/home/egor/.local/lib/python3.6/site-packages/matplotlib/cbook/__init__.py\", line 388, in process\r\n proxy(*args, **kwargs)\r\n File \"/home/egor/.local/lib/python3.6/site-packages/matplotlib/cbook/__init__.py\", line 228, in __call__\r\n return mtd(*args, **kwargs)\r\n File \"/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/utils/canvas.py\", line 75, in on_mouse_press\r\n self.active_tool.on_mouse_press(event)\r\n File \"/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/canvastools/painttool.py\", line 149, in on_mouse_press\r\n self.update_overlay(event.xdata, event.ydata)\r\n File \"/home/egor/Workspace/_contrib/scikit-image/skimage/viewer/canvastools/painttool.py\", line 172, in update_overlay\r\n overlay[self.window.at(y, x)] = self.label\r\nTypeError: slice indices must be integers or None or have an __index__ method\r\n```\r\n\r\n## Way to reproduce\r\n[If reporting a bug, please include the following important information:]\r\n- [x] Code example\r\n- [x] Relevant images (if any)\r\n- [x] Operating system and version: `Linux 4.14.49-1-lts #1 SMP Tue Jun 12 16:32:50 CEST 2018 x86_64 GNU/Linux`\r\n- [x] Python version: 3.6.5\r\n- [x] scikit-image version (run `skimage.__version__`): https://github.com/scikit-image/scikit-image/commit/18f97d864a9468555851aac08c731b6813db2091\r\n- [x] matplotlib: 2.2.2\r\n\r\n\n", "before_files": [{"content": "import matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import filters\nfrom skimage import morphology\nfrom skimage.viewer import ImageViewer\nfrom skimage.viewer.widgets import history\nfrom skimage.viewer.plugins.labelplugin import LabelPainter\n\n\nclass OKCancelButtons(history.OKCancelButtons):\n\n def update_original_image(self):\n # OKCancelButtons updates the original image with the filtered image\n # by default. Override this method to update the overlay.\n self.plugin._show_watershed()\n self.plugin.close()\n\n\nclass WatershedPlugin(LabelPainter):\n\n def help(self):\n helpstr = (\"Watershed plugin\",\n \"----------------\",\n \"Use mouse to paint each region with a different label.\",\n \"Press OK to display segmented image.\")\n return '\\n'.join(helpstr)\n\n def _show_watershed(self):\n viewer = self.image_viewer\n edge_image = filter.sobel(viewer.image)\n labels = morphology.watershed(edge_image, self.paint_tool.overlay)\n viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)\n viewer.redraw()\n\n\nimage = data.coins()\nplugin = WatershedPlugin()\nplugin += OKCancelButtons()\n\nviewer = ImageViewer(image)\nviewer += plugin\nviewer.show()\n", "path": "viewer_examples/plugins/watershed_demo.py"}, {"content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nLABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold',\n 'greenyellow', 'blueviolet'])\nfrom ...viewer.canvastools.base import CanvasToolBase\n\n\n__all__ = ['PaintTool']\n\n\nclass PaintTool(CanvasToolBase):\n \"\"\"Widget for painting on top of a plot.\n\n Parameters\n ----------\n manager : Viewer or PlotPlugin.\n Skimage viewer or plot plugin object.\n overlay_shape : shape tuple\n 2D shape tuple used to initialize overlay image.\n alpha : float (between [0, 1])\n Opacity of overlay\n on_move : function\n Function called whenever a control handle is moved.\n This function must accept the end points of line as the only argument.\n on_release : function\n Function called whenever the control handle is released.\n on_enter : function\n Function called whenever the \"enter\" key is pressed.\n rect_props : dict\n Properties for :class:`matplotlib.patches.Rectangle`. This class\n redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.\n\n Attributes\n ----------\n overlay : array\n Overlay of painted labels displayed on top of image.\n label : int\n Current paint color.\n\n Examples\n ----------\n >>> from skimage.data import camera\n >>> import matplotlib.pyplot as plt\n >>> from skimage.viewer.canvastools import PaintTool\n >>> import numpy as np\n\n >>> img = camera() #doctest: +SKIP\n\n >>> ax = plt.subplot(111) #doctest: +SKIP \n >>> plt.imshow(img, cmap=plt.cm.gray) #doctest: +SKIP\n >>> p = PaintTool(ax,np.shape(img[:-1]),10,0.2) #doctest: +SKIP\n >>> plt.show() #doctest: +SKIP\n\n >>> mask = p.overlay #doctest: +SKIP\n >>> plt.imshow(mask,cmap=plt.cm.gray) #doctest: +SKIP\n >>> plt.show() #doctest: +SKIP\n \"\"\"\n def __init__(self, manager, overlay_shape, radius=5, alpha=0.3,\n on_move=None, on_release=None, on_enter=None,\n rect_props=None):\n super(PaintTool, self).__init__(manager, on_move=on_move,\n on_enter=on_enter,\n on_release=on_release)\n\n props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True)\n props.update(rect_props if rect_props is not None else {})\n\n self.alpha = alpha\n self.cmap = LABELS_CMAP\n self._overlay_plot = None\n self.shape = overlay_shape\n\n self._cursor = plt.Rectangle((0, 0), 0, 0, **props)\n self._cursor.set_visible(False)\n self.ax.add_patch(self._cursor)\n\n # `label` and `radius` can only be set after initializing `_cursor`\n self.label = 1\n self.radius = radius\n\n # Note that the order is important: Redraw cursor *after* overlay\n self.artists = [self._overlay_plot, self._cursor]\n self.manager.add_tool(self)\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, value):\n if value >= self.cmap.N:\n raise ValueError('Maximum label value = %s' % len(self.cmap - 1))\n self._label = value\n self._cursor.set_edgecolor(self.cmap(value))\n\n @property\n def radius(self):\n return self._radius\n\n @radius.setter\n def radius(self, r):\n self._radius = r\n self._width = 2 * r + 1\n self._cursor.set_width(self._width)\n self._cursor.set_height(self._width)\n self.window = CenteredWindow(r, self._shape)\n\n @property\n def overlay(self):\n return self._overlay\n\n @overlay.setter\n def overlay(self, image):\n self._overlay = image\n if image is None:\n self.ax.images.remove(self._overlay_plot)\n self._overlay_plot = None\n elif self._overlay_plot is None:\n props = dict(cmap=self.cmap, alpha=self.alpha,\n norm=mcolors.NoNorm(), animated=True)\n self._overlay_plot = self.ax.imshow(image, **props)\n else:\n self._overlay_plot.set_data(image)\n self.redraw()\n\n @property\n def shape(self):\n return self._shape\n\n @shape.setter\n def shape(self, shape):\n self._shape = shape\n if not self._overlay_plot is None:\n self._overlay_plot.set_extent((-0.5, shape[1] + 0.5,\n shape[0] + 0.5, -0.5))\n self.radius = self._radius\n self.overlay = np.zeros(shape, dtype='uint8')\n\n def on_key_press(self, event):\n if event.key == 'enter':\n self.callback_on_enter(self.geometry)\n self.redraw()\n\n def on_mouse_press(self, event):\n if event.button != 1 or not self.ax.in_axes(event):\n return\n self.update_cursor(event.xdata, event.ydata)\n self.update_overlay(event.xdata, event.ydata)\n\n def on_mouse_release(self, event):\n if event.button != 1:\n return\n self.callback_on_release(self.geometry)\n\n def on_move(self, event):\n if not self.ax.in_axes(event):\n self._cursor.set_visible(False)\n self.redraw() # make sure cursor is not visible\n return\n self._cursor.set_visible(True)\n\n self.update_cursor(event.xdata, event.ydata)\n if event.button != 1:\n self.redraw() # update cursor position\n return\n self.update_overlay(event.xdata, event.ydata)\n self.callback_on_move(self.geometry)\n\n def update_overlay(self, x, y):\n overlay = self.overlay\n overlay[self.window.at(y, x)] = self.label\n # Note that overlay calls `redraw`\n self.overlay = overlay\n\n def update_cursor(self, x, y):\n x = x - self.radius - 1\n y = y - self.radius - 1\n self._cursor.set_xy((x, y))\n\n @property\n def geometry(self):\n return self.overlay\n\n\nclass CenteredWindow(object):\n \"\"\"Window that create slices numpy arrays over 2D windows.\n\n Examples\n --------\n >>> a = np.arange(16).reshape(4, 4)\n >>> w = CenteredWindow(1, a.shape)\n >>> a[w.at(1, 1)]\n array([[ 0, 1, 2],\n [ 4, 5, 6],\n [ 8, 9, 10]])\n >>> a[w.at(0, 0)]\n array([[0, 1],\n [4, 5]])\n >>> a[w.at(4, 3)]\n array([[14, 15]])\n \"\"\"\n def __init__(self, radius, array_shape):\n self.radius = radius\n self.array_shape = array_shape\n\n def at(self, row, col):\n h, w = self.array_shape\n r = self.radius\n xmin = max(0, col - r)\n xmax = min(w, col + r + 1)\n ymin = max(0, row - r)\n ymax = min(h, row + r + 1)\n return [slice(ymin, ymax), slice(xmin, xmax)]\n\n\nif __name__ == '__main__': # pragma: no cover\n np.testing.rundocs()\n from ... import data\n from ...viewer import ImageViewer\n\n image = data.camera()\n\n viewer = ImageViewer(image)\n paint_tool = PaintTool(viewer, image.shape)\n viewer.show()\n", "path": "skimage/viewer/canvastools/painttool.py"}], "after_files": [{"content": "import matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import filters\nfrom skimage import morphology\nfrom skimage.viewer import ImageViewer\nfrom skimage.viewer.widgets import history\nfrom skimage.viewer.plugins.labelplugin import LabelPainter\n\n\nclass OKCancelButtons(history.OKCancelButtons):\n\n def update_original_image(self):\n # OKCancelButtons updates the original image with the filtered image\n # by default. Override this method to update the overlay.\n self.plugin._show_watershed()\n self.plugin.close()\n\n\nclass WatershedPlugin(LabelPainter):\n\n def help(self):\n helpstr = (\"Watershed plugin\",\n \"----------------\",\n \"Use mouse to paint each region with a different label.\",\n \"Press OK to display segmented image.\")\n return '\\n'.join(helpstr)\n\n def _show_watershed(self):\n viewer = self.image_viewer\n edge_image = filters.sobel(viewer.image)\n labels = morphology.watershed(edge_image, self.paint_tool.overlay)\n viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)\n viewer.redraw()\n\n\nimage = data.coins()\nplugin = WatershedPlugin()\nplugin += OKCancelButtons()\n\nviewer = ImageViewer(image)\nviewer += plugin\nviewer.show()\n", "path": "viewer_examples/plugins/watershed_demo.py"}, {"content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nLABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold',\n 'greenyellow', 'blueviolet'])\nfrom ...viewer.canvastools.base import CanvasToolBase\n\n\n__all__ = ['PaintTool']\n\n\nclass PaintTool(CanvasToolBase):\n \"\"\"Widget for painting on top of a plot.\n\n Parameters\n ----------\n manager : Viewer or PlotPlugin.\n Skimage viewer or plot plugin object.\n overlay_shape : shape tuple\n 2D shape tuple used to initialize overlay image.\n alpha : float (between [0, 1])\n Opacity of overlay\n on_move : function\n Function called whenever a control handle is moved.\n This function must accept the end points of line as the only argument.\n on_release : function\n Function called whenever the control handle is released.\n on_enter : function\n Function called whenever the \"enter\" key is pressed.\n rect_props : dict\n Properties for :class:`matplotlib.patches.Rectangle`. This class\n redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.\n\n Attributes\n ----------\n overlay : array\n Overlay of painted labels displayed on top of image.\n label : int\n Current paint color.\n\n Examples\n ----------\n >>> from skimage.data import camera\n >>> import matplotlib.pyplot as plt\n >>> from skimage.viewer.canvastools import PaintTool\n >>> import numpy as np\n\n >>> img = camera() #doctest: +SKIP\n\n >>> ax = plt.subplot(111) #doctest: +SKIP \n >>> plt.imshow(img, cmap=plt.cm.gray) #doctest: +SKIP\n >>> p = PaintTool(ax,np.shape(img[:-1]),10,0.2) #doctest: +SKIP\n >>> plt.show() #doctest: +SKIP\n\n >>> mask = p.overlay #doctest: +SKIP\n >>> plt.imshow(mask,cmap=plt.cm.gray) #doctest: +SKIP\n >>> plt.show() #doctest: +SKIP\n \"\"\"\n def __init__(self, manager, overlay_shape, radius=5, alpha=0.3,\n on_move=None, on_release=None, on_enter=None,\n rect_props=None):\n super(PaintTool, self).__init__(manager, on_move=on_move,\n on_enter=on_enter,\n on_release=on_release)\n\n props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True)\n props.update(rect_props if rect_props is not None else {})\n\n self.alpha = alpha\n self.cmap = LABELS_CMAP\n self._overlay_plot = None\n self.shape = overlay_shape\n\n self._cursor = plt.Rectangle((0, 0), 0, 0, **props)\n self._cursor.set_visible(False)\n self.ax.add_patch(self._cursor)\n\n # `label` and `radius` can only be set after initializing `_cursor`\n self.label = 1\n self.radius = radius\n\n # Note that the order is important: Redraw cursor *after* overlay\n self.artists = [self._overlay_plot, self._cursor]\n self.manager.add_tool(self)\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, value):\n if value >= self.cmap.N:\n raise ValueError('Maximum label value = %s' % len(self.cmap - 1))\n self._label = value\n self._cursor.set_edgecolor(self.cmap(value))\n\n @property\n def radius(self):\n return self._radius\n\n @radius.setter\n def radius(self, r):\n self._radius = r\n self._width = 2 * r + 1\n self._cursor.set_width(self._width)\n self._cursor.set_height(self._width)\n self.window = CenteredWindow(r, self._shape)\n\n @property\n def overlay(self):\n return self._overlay\n\n @overlay.setter\n def overlay(self, image):\n self._overlay = image\n if image is None:\n self.ax.images.remove(self._overlay_plot)\n self._overlay_plot = None\n elif self._overlay_plot is None:\n props = dict(cmap=self.cmap, alpha=self.alpha,\n norm=mcolors.NoNorm(), animated=True)\n self._overlay_plot = self.ax.imshow(image, **props)\n else:\n self._overlay_plot.set_data(image)\n self.redraw()\n\n @property\n def shape(self):\n return self._shape\n\n @shape.setter\n def shape(self, shape):\n self._shape = shape\n if not self._overlay_plot is None:\n self._overlay_plot.set_extent((-0.5, shape[1] + 0.5,\n shape[0] + 0.5, -0.5))\n self.radius = self._radius\n self.overlay = np.zeros(shape, dtype='uint8')\n\n def on_key_press(self, event):\n if event.key == 'enter':\n self.callback_on_enter(self.geometry)\n self.redraw()\n\n def on_mouse_press(self, event):\n if event.button != 1 or not self.ax.in_axes(event):\n return\n self.update_cursor(event.xdata, event.ydata)\n self.update_overlay(event.xdata, event.ydata)\n\n def on_mouse_release(self, event):\n if event.button != 1:\n return\n self.callback_on_release(self.geometry)\n\n def on_move(self, event):\n if not self.ax.in_axes(event):\n self._cursor.set_visible(False)\n self.redraw() # make sure cursor is not visible\n return\n self._cursor.set_visible(True)\n\n self.update_cursor(event.xdata, event.ydata)\n if event.button != 1:\n self.redraw() # update cursor position\n return\n self.update_overlay(event.xdata, event.ydata)\n self.callback_on_move(self.geometry)\n\n def update_overlay(self, x, y):\n overlay = self.overlay\n overlay[self.window.at(y, x)] = self.label\n # Note that overlay calls `redraw`\n self.overlay = overlay\n\n def update_cursor(self, x, y):\n x = x - self.radius - 1\n y = y - self.radius - 1\n self._cursor.set_xy((x, y))\n\n @property\n def geometry(self):\n return self.overlay\n\n\nclass CenteredWindow(object):\n \"\"\"Window that create slices numpy arrays over 2D windows.\n\n Examples\n --------\n >>> a = np.arange(16).reshape(4, 4)\n >>> w = CenteredWindow(1, a.shape)\n >>> a[w.at(1, 1)]\n array([[ 0, 1, 2],\n [ 4, 5, 6],\n [ 8, 9, 10]])\n >>> a[w.at(0, 0)]\n array([[0, 1],\n [4, 5]])\n >>> a[w.at(4, 3)]\n array([[14, 15]])\n \"\"\"\n def __init__(self, radius, array_shape):\n self.radius = radius\n self.array_shape = array_shape\n\n def at(self, row, col):\n h, w = self.array_shape\n r = round(self.radius)\n # Note: the int() cast is necessary because row and col are np.float64,\n # which does not get cast by round(), unlike a normal Python float:\n # >>> round(4.5)\n # 4\n # >>> round(np.float64(4.5))\n # 4.0\n # >>> int(round(np.float64(4.5)))\n # 4\n row, col = int(round(row)), int(round(col))\n xmin = max(0, col - r)\n xmax = min(w, col + r + 1)\n ymin = max(0, row - r)\n ymax = min(h, row + r + 1)\n return (slice(ymin, ymax), slice(xmin, xmax))\n\n\nif __name__ == '__main__': # pragma: no cover\n np.testing.rundocs()\n from ... import data\n from ...viewer import ImageViewer\n\n image = data.camera()\n\n viewer = ImageViewer(image)\n paint_tool = PaintTool(viewer, image.shape)\n viewer.show()\n", "path": "skimage/viewer/canvastools/painttool.py"}]}
3,483
464
gh_patches_debug_24662
rasdani/github-patches
git_diff
hi-primus__optimus-1012
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip install not working **Describe the bug** I am unable to install the optimuspyspark using pip for version 2.2.29 **To Reproduce** Steps to reproduce the behavior: pip install error with message " No such file or directory requirement.txt" **Expected behavior** pip install should not fail --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optimus/version.py` Content: ``` 1 def _safe_int(string): 2 try: 3 return int(string) 4 except ValueError: 5 return string 6 7 8 __version__ = '2.2.31' 9 VERSION = tuple(_safe_int(x) for x in __version__.split('.')) 10 ``` Path: `setup.py` Content: ``` 1 import os 2 import re 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 # from optimus.version import __version__ 9 10 # Get version without importing, which avoids dependency issues 11 def get_version(): 12 with open('optimus/version.py') as version_file: 13 return re.search(r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""", 14 version_file.read()).group('version') 15 16 17 # Requirements 18 try: 19 import google.colab 20 21 IN_COLAB = True 22 except ImportError: 23 IN_COLAB = False 24 25 if "DATABRICKS_RUNTIME_VERSION" in os.environ: 26 with open('requirements-databricks.txt') as f: 27 required = f.read().splitlines() 28 elif IN_COLAB: 29 with open('requirements-google-colab.txt') as f: 30 required = f.read().splitlines() 31 else: 32 with open('requirements.txt') as f: 33 required = f.read().splitlines() 34 35 if sys.version_info < (3, 6): 36 raise RuntimeError('This version requires Python 3.6+') # pragma: no cover 37 38 39 def readme(): 40 with open('README.md') as f: 41 return f.read() 42 43 44 lint_requires = [ 45 'pep8', 46 'pyflakes' 47 ] 48 49 tests_require = ['pytest', 'mock', 'nose'] 50 51 dependency_links = [] 52 setup_requires = ['pytest-runner'] 53 if 'nosetests' in sys.argv[1:]: 54 setup_requires.append('nose') 55 56 setup( 57 name='optimuspyspark', 58 version=get_version(), 59 author='Favio Vazquez and Argenis Leon', 60 author_email='[email protected]', 61 url='https://github.com/ironmussa/Optimus/', 62 download_url='https://github.com/ironmussa/Optimus/archive/2.2.31.tar.gz', 63 description=('Optimus is the missing framework for cleaning and pre-processing data in a distributed fashion with ' 64 'pyspark.'), 65 long_description=readme(), 66 long_description_content_type='text/markdown', 67 license='APACHE', 68 packages=find_packages(), 69 install_requires=required, 70 tests_require=tests_require, 71 setup_requires=setup_requires, 72 extras_require={ 73 'test': tests_require, 74 'all': required + tests_require, 75 'docs': ['sphinx'] + tests_require, 76 'lint': lint_requires 77 }, 78 dependency_links=dependency_links, 79 test_suite='nose.collector', 80 include_package_data=True, 81 classifiers=[ 82 'Development Status :: 5 - Production/Stable', 83 'Intended Audience :: Developers', 84 'Topic :: Scientific/Engineering :: Artificial Intelligence', 85 'License :: OSI Approved :: Apache Software License', 86 'Programming Language :: Python :: 3.5', 87 'Programming Language :: Python :: 3.6', 88 ], 89 keywords=['datacleaner', 'apachespark', 'spark', 'pyspark', 'data-wrangling', 'data-cleansing', 'data-profiling'], 90 ) 91 ``` Path: `docs/source/conf.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Optimus documentation build configuration file, created by 5 # sphinx-quickstart on Wed Oct 11 19:21:00 2017. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 # 20 # import os 21 # import sys 22 # sys.path.insert(0, os.path.abspath('.')) 23 24 25 # -- General configuration ------------------------------------------------ 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 # 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = ['sphinx.ext.autodoc', 35 'sphinx.ext.doctest', 36 'sphinx.ext.intersphinx', 37 'sphinx.ext.mathjax'] 38 39 # Add any paths that contain templates here, relative to this directory. 40 templates_path = ['_templates'] 41 42 # The suffix(es) of source filenames. 43 # You can specify multiple suffix as a list of string: 44 # 45 # source_suffix = ['.rst', '.md'] 46 source_suffix = '.rst' 47 48 # The master toctree document. 49 master_doc = 'index' 50 51 # General information about the project. 52 project = 'Optimus' 53 copyright = '2017, Iron Mussa' 54 author = 'Argenis León and Favio Vázquez' 55 56 # The version info for the project you're documenting, acts as replacement for 57 # |version| and |release|, also used in various other places throughout the 58 # built documents. 59 # 60 # The short X.Y version. 61 version = '2.2' 62 # The full version, including alpha/beta/rc tags. 63 release = "2.2.31" 64 65 # The language for content autogenerated by Sphinx. Refer to documentation 66 # for a list of supported languages. 67 # 68 # This is also used if you do content translation via gettext catalogs. 69 # Usually you set "language" from the command line for these cases. 70 language = None 71 72 # List of patterns, relative to source directory, that match files and 73 # directories to ignore when looking for source files. 74 # This patterns also effect to html_static_path and html_extra_path 75 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 76 77 # The name of the Pygments (syntax highlighting) style to use. 78 pygments_style = 'sphinx' 79 80 # If true, `todo` and `todoList` produce output, else they produce nothing. 81 todo_include_todos = False 82 83 84 # -- Options for HTML output ---------------------------------------------- 85 86 # The theme to use for HTML and HTML Help pages. See the documentation for 87 # a list of builtin themes. 88 # 89 html_theme = "sphinx_rtd_theme" 90 91 # Theme options are theme-specific and customize the look and feel of a theme 92 # further. For a list of options available for each theme, see the 93 # documentation. 94 # 95 # html_theme_options = {} 96 97 # Add any paths that contain custom static files (such as style sheets) here, 98 # relative to this directory. They are copied after the builtin static files, 99 # so a file named "default.css" will overwrite the builtin "default.css". 100 html_static_path = ['_static'] 101 102 103 # -- Options for HTMLHelp output ------------------------------------------ 104 105 # Output file base name for HTML help builder. 106 htmlhelp_basename = 'Optimusdoc' 107 108 109 # -- Options for LaTeX output --------------------------------------------- 110 111 latex_elements = { 112 # The paper size ('letterpaper' or 'a4paper'). 113 # 114 # 'papersize': 'letterpaper', 115 116 # The font size ('10pt', '11pt' or '12pt'). 117 # 118 # 'pointsize': '10pt', 119 120 # Additional stuff for the LaTeX preamble. 121 # 122 # 'preamble': '', 123 124 # Latex figure (float) alignment 125 # 126 # 'figure_align': 'htbp', 127 } 128 129 # Grouping the document tree into LaTeX files. List of tuples 130 # (source start file, target name, title, 131 # author, documentclass [howto, manual, or own class]). 132 latex_documents = [ 133 (master_doc, 'Optimus.tex', 'Optimus Documentation', 134 'Favio Vazquez', 'manual'), 135 ] 136 137 138 # -- Options for manual page output --------------------------------------- 139 140 # One entry per manual page. List of tuples 141 # (source start file, name, description, authors, manual section). 142 man_pages = [ 143 (master_doc, 'optimus', 'Optimus Documentation', 144 [author], 1) 145 ] 146 147 148 # -- Options for Texinfo output ------------------------------------------- 149 150 # Grouping the document tree into Texinfo files. List of tuples 151 # (source start file, target name, title, author, 152 # dir menu entry, description, category) 153 texinfo_documents = [ 154 (master_doc, 'Optimus', 'Optimus Documentation', 155 author, 'Optimus', 'One line description of project.', 156 'Miscellaneous'), 157 ] 158 159 # Example configuration for intersphinx: refer to the Python standard library. 160 intersphinx_mapping = {'https://docs.python.org/': None} 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -60,7 +60,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = "2.2.31" +release = "2.2.32" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/optimus/version.py b/optimus/version.py --- a/optimus/version.py +++ b/optimus/version.py @@ -5,5 +5,5 @@ return string -__version__ = '2.2.31' +__version__ = '2.2.32' VERSION = tuple(_safe_int(x) for x in __version__.split('.')) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -59,7 +59,7 @@ author='Favio Vazquez and Argenis Leon', author_email='[email protected]', url='https://github.com/ironmussa/Optimus/', - download_url='https://github.com/ironmussa/Optimus/archive/2.2.31.tar.gz', + download_url='https://github.com/ironmussa/Optimus/archive/2.2.32.tar.gz', description=('Optimus is the missing framework for cleaning and pre-processing data in a distributed fashion with ' 'pyspark.'), long_description=readme(),
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -60,7 +60,7 @@\n # The short X.Y version.\n version = '2.2'\n # The full version, including alpha/beta/rc tags.\n-release = \"2.2.31\"\n+release = \"2.2.32\"\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/optimus/version.py b/optimus/version.py\n--- a/optimus/version.py\n+++ b/optimus/version.py\n@@ -5,5 +5,5 @@\n return string\n \n \n-__version__ = '2.2.31'\n+__version__ = '2.2.32'\n VERSION = tuple(_safe_int(x) for x in __version__.split('.'))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n author='Favio Vazquez and Argenis Leon',\n author_email='[email protected]',\n url='https://github.com/ironmussa/Optimus/',\n- download_url='https://github.com/ironmussa/Optimus/archive/2.2.31.tar.gz',\n+ download_url='https://github.com/ironmussa/Optimus/archive/2.2.32.tar.gz',\n description=('Optimus is the missing framework for cleaning and pre-processing data in a distributed fashion with '\n 'pyspark.'),\n long_description=readme(),\n", "issue": "pip install not working\n**Describe the bug**\r\nI am unable to install the optimuspyspark using pip for version 2.2.29\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\npip install error with message \" No such file or directory requirement.txt\"\r\n\r\n**Expected behavior**\r\npip install should not fail\n", "before_files": [{"content": "def _safe_int(string):\n try:\n return int(string)\n except ValueError:\n return string\n\n\n__version__ = '2.2.31'\nVERSION = tuple(_safe_int(x) for x in __version__.split('.'))\n", "path": "optimus/version.py"}, {"content": "import os\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\n# from optimus.version import __version__\n\n# Get version without importing, which avoids dependency issues\ndef get_version():\n with open('optimus/version.py') as version_file:\n return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\",\n version_file.read()).group('version')\n\n\n# Requirements\ntry:\n import google.colab\n\n IN_COLAB = True\nexcept ImportError:\n IN_COLAB = False\n\nif \"DATABRICKS_RUNTIME_VERSION\" in os.environ:\n with open('requirements-databricks.txt') as f:\n required = f.read().splitlines()\nelif IN_COLAB:\n with open('requirements-google-colab.txt') as f:\n required = f.read().splitlines()\nelse:\n with open('requirements.txt') as f:\n required = f.read().splitlines()\n\nif sys.version_info < (3, 6):\n raise RuntimeError('This version requires Python 3.6+') # pragma: no cover\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\nlint_requires = [\n 'pep8',\n 'pyflakes'\n]\n\ntests_require = ['pytest', 'mock', 'nose']\n\ndependency_links = []\nsetup_requires = ['pytest-runner']\nif 'nosetests' in sys.argv[1:]:\n setup_requires.append('nose')\n\nsetup(\n name='optimuspyspark',\n version=get_version(),\n author='Favio Vazquez and Argenis Leon',\n author_email='[email protected]',\n url='https://github.com/ironmussa/Optimus/',\n download_url='https://github.com/ironmussa/Optimus/archive/2.2.31.tar.gz',\n description=('Optimus is the missing framework for cleaning and pre-processing data in a distributed fashion with '\n 'pyspark.'),\n long_description=readme(),\n long_description_content_type='text/markdown',\n license='APACHE',\n packages=find_packages(),\n install_requires=required,\n tests_require=tests_require,\n setup_requires=setup_requires,\n extras_require={\n 'test': tests_require,\n 'all': required + tests_require,\n 'docs': ['sphinx'] + tests_require,\n 'lint': lint_requires\n },\n dependency_links=dependency_links,\n test_suite='nose.collector',\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['datacleaner', 'apachespark', 'spark', 'pyspark', 'data-wrangling', 'data-cleansing', 'data-profiling'],\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Optimus documentation build configuration file, created by\n# sphinx-quickstart on Wed Oct 11 19:21:00 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Optimus'\ncopyright = '2017, Iron Mussa'\nauthor = 'Argenis Le\u00f3n and Favio V\u00e1zquez'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '2.2'\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.2.31\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optimusdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optimus.tex', 'Optimus Documentation',\n 'Favio Vazquez', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'optimus', 'Optimus Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optimus', 'Optimus Documentation',\n author, 'Optimus', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None}\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "def _safe_int(string):\n try:\n return int(string)\n except ValueError:\n return string\n\n\n__version__ = '2.2.32'\nVERSION = tuple(_safe_int(x) for x in __version__.split('.'))\n", "path": "optimus/version.py"}, {"content": "import os\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\n# from optimus.version import __version__\n\n# Get version without importing, which avoids dependency issues\ndef get_version():\n with open('optimus/version.py') as version_file:\n return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\",\n version_file.read()).group('version')\n\n\n# Requirements\ntry:\n import google.colab\n\n IN_COLAB = True\nexcept ImportError:\n IN_COLAB = False\n\nif \"DATABRICKS_RUNTIME_VERSION\" in os.environ:\n with open('requirements-databricks.txt') as f:\n required = f.read().splitlines()\nelif IN_COLAB:\n with open('requirements-google-colab.txt') as f:\n required = f.read().splitlines()\nelse:\n with open('requirements.txt') as f:\n required = f.read().splitlines()\n\nif sys.version_info < (3, 6):\n raise RuntimeError('This version requires Python 3.6+') # pragma: no cover\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\nlint_requires = [\n 'pep8',\n 'pyflakes'\n]\n\ntests_require = ['pytest', 'mock', 'nose']\n\ndependency_links = []\nsetup_requires = ['pytest-runner']\nif 'nosetests' in sys.argv[1:]:\n setup_requires.append('nose')\n\nsetup(\n name='optimuspyspark',\n version=get_version(),\n author='Favio Vazquez and Argenis Leon',\n author_email='[email protected]',\n url='https://github.com/ironmussa/Optimus/',\n download_url='https://github.com/ironmussa/Optimus/archive/2.2.32.tar.gz',\n description=('Optimus is the missing framework for cleaning and pre-processing data in a distributed fashion with '\n 'pyspark.'),\n long_description=readme(),\n long_description_content_type='text/markdown',\n license='APACHE',\n packages=find_packages(),\n install_requires=required,\n tests_require=tests_require,\n setup_requires=setup_requires,\n extras_require={\n 'test': tests_require,\n 'all': required + tests_require,\n 'docs': ['sphinx'] + tests_require,\n 'lint': lint_requires\n },\n dependency_links=dependency_links,\n test_suite='nose.collector',\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['datacleaner', 'apachespark', 'spark', 'pyspark', 'data-wrangling', 'data-cleansing', 'data-profiling'],\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Optimus documentation build configuration file, created by\n# sphinx-quickstart on Wed Oct 11 19:21:00 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Optimus'\ncopyright = '2017, Iron Mussa'\nauthor = 'Argenis Le\u00f3n and Favio V\u00e1zquez'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '2.2'\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.2.32\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optimusdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optimus.tex', 'Optimus Documentation',\n 'Favio Vazquez', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'optimus', 'Optimus Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optimus', 'Optimus Documentation',\n author, 'Optimus', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None}\n", "path": "docs/source/conf.py"}]}
2,832
361
gh_patches_debug_6398
rasdani/github-patches
git_diff
yt-dlp__yt-dlp-1858
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Jamando: 'JamendoIE' object has no attribute '_VALID_URL_RE' ### Checklist - [X] I'm reporting a broken site - [X] I've verified that I'm running yt-dlp version **2021.12.01**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) - [X] I've checked that all provided URLs are alive and playable in a browser - [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command) - [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates - [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue) - [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required ### Region wordwide ### Description ``` yt-dlp https://www.jamendo.com/track/1885651/to-aurora ERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE' yt-dlp https://www.jamendo.com/track/1848421/badly ERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE' ``` ## Relevant lines https://github.com/yt-dlp/yt-dlp/blob/1117579b9457f8fbf7a4d7433a92b67ac802bdea/yt_dlp/extractor/jamendo.py#L17 https://github.com/yt-dlp/yt-dlp/blob/1117579b9457f8fbf7a4d7433a92b67ac802bdea/yt_dlp/extractor/jamendo.py#L62 https://github.com/yt-dlp/yt-dlp/blob/ee8dd27a7351841e1de8cebf8311b69fbef09eab/yt_dlp/extractor/common.py#L463-L470 ### Verbose log ```shell yt-dlp -v https://www.jamendo.com/track/1848421/badly[debug] Command-line config: ['-v', 'https://www.jamendo.com/track/1848421/badly'] [debug] Encodings: locale UTF-8, fs utf-8, out utf-8, err utf-8, pref UTF-8 [debug] yt-dlp version 2021.12.01 [91f071af6] [debug] Python version 3.9.7 (CPython 64bit) - Linux-5.10.79-1-MANJARO-x86_64-with-glibc2.33 [debug] exe versions: ffmpeg 4.4.1 (setts), ffprobe 4.4.1, rtmpdump 2.4 [debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets [debug] Proxy map: {} [debug] [Jamendo] Extracting URL: https://www.jamendo.com/track/1848421/badly ERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE' Traceback (most recent call last): File "/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/YoutubeDL.py", line 1329, in wrapper return func(self, *args, **kwargs) File "/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/YoutubeDL.py", line 1398, in __extract_info ie_result = ie.extract(url) File "/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/extractor/common.py", line 597, in extract ie_result = self._real_extract(url) File "/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/extractor/jamendo.py", line 62, in _real_extract track_id, display_id = self._VALID_URL_RE.match(url).groups() AttributeError: 'JamendoIE' object has no attribute '_VALID_URL_RE' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt_dlp/extractor/jamendo.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import hashlib 5 import random 6 7 from ..compat import compat_str 8 from .common import InfoExtractor 9 from ..utils import ( 10 clean_html, 11 int_or_none, 12 try_get, 13 ) 14 15 16 class JamendoIE(InfoExtractor): 17 _VALID_URL = r'''(?x) 18 https?:// 19 (?: 20 licensing\.jamendo\.com/[^/]+| 21 (?:www\.)?jamendo\.com 22 ) 23 /track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))? 24 ''' 25 _TESTS = [{ 26 'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i', 27 'md5': '6e9e82ed6db98678f171c25a8ed09ffd', 28 'info_dict': { 29 'id': '196219', 30 'display_id': 'stories-from-emona-i', 31 'ext': 'flac', 32 # 'title': 'Maya Filipič - Stories from Emona I', 33 'title': 'Stories from Emona I', 34 # 'artist': 'Maya Filipič', 35 'track': 'Stories from Emona I', 36 'duration': 210, 37 'thumbnail': r're:^https?://.*\.jpg', 38 'timestamp': 1217438117, 39 'upload_date': '20080730', 40 'license': 'by-nc-nd', 41 'view_count': int, 42 'like_count': int, 43 'average_rating': int, 44 'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'], 45 } 46 }, { 47 'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock', 48 'only_matching': True, 49 }] 50 51 def _call_api(self, resource, resource_id): 52 path = '/api/%ss' % resource 53 rand = compat_str(random.random()) 54 return self._download_json( 55 'https://www.jamendo.com' + path, resource_id, query={ 56 'id[]': resource_id, 57 }, headers={ 58 'X-Jam-Call': '$%s*%s~' % (hashlib.sha1((path + rand).encode()).hexdigest(), rand) 59 })[0] 60 61 def _real_extract(self, url): 62 track_id, display_id = self._VALID_URL_RE.match(url).groups() 63 # webpage = self._download_webpage( 64 # 'https://www.jamendo.com/track/' + track_id, track_id) 65 # models = self._parse_json(self._html_search_regex( 66 # r"data-bundled-models='([^']+)", 67 # webpage, 'bundled models'), track_id) 68 # track = models['track']['models'][0] 69 track = self._call_api('track', track_id) 70 title = track_name = track['name'] 71 # get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {} 72 # artist = get_model('artist') 73 # artist_name = artist.get('name') 74 # if artist_name: 75 # title = '%s - %s' % (artist_name, title) 76 # album = get_model('album') 77 78 formats = [{ 79 'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294' 80 % (sub_domain, track_id, format_id), 81 'format_id': format_id, 82 'ext': ext, 83 'quality': quality, 84 } for quality, (format_id, sub_domain, ext) in enumerate(( 85 ('mp31', 'mp3l', 'mp3'), 86 ('mp32', 'mp3d', 'mp3'), 87 ('ogg1', 'ogg', 'ogg'), 88 ('flac', 'flac', 'flac'), 89 ))] 90 self._sort_formats(formats) 91 92 urls = [] 93 thumbnails = [] 94 for covers in (track.get('cover') or {}).values(): 95 for cover_id, cover_url in covers.items(): 96 if not cover_url or cover_url in urls: 97 continue 98 urls.append(cover_url) 99 size = int_or_none(cover_id.lstrip('size')) 100 thumbnails.append({ 101 'id': cover_id, 102 'url': cover_url, 103 'width': size, 104 'height': size, 105 }) 106 107 tags = [] 108 for tag in (track.get('tags') or []): 109 tag_name = tag.get('name') 110 if not tag_name: 111 continue 112 tags.append(tag_name) 113 114 stats = track.get('stats') or {} 115 license = track.get('licenseCC') or [] 116 117 return { 118 'id': track_id, 119 'display_id': display_id, 120 'thumbnails': thumbnails, 121 'title': title, 122 'description': track.get('description'), 123 'duration': int_or_none(track.get('duration')), 124 # 'artist': artist_name, 125 'track': track_name, 126 # 'album': album.get('name'), 127 'formats': formats, 128 'license': '-'.join(license) if license else None, 129 'timestamp': int_or_none(track.get('dateCreated')), 130 'view_count': int_or_none(stats.get('listenedAll')), 131 'like_count': int_or_none(stats.get('favorited')), 132 'average_rating': int_or_none(stats.get('averageNote')), 133 'tags': tags, 134 } 135 136 137 class JamendoAlbumIE(JamendoIE): 138 _VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)' 139 _TESTS = [{ 140 'url': 'https://www.jamendo.com/album/121486/duck-on-cover', 141 'info_dict': { 142 'id': '121486', 143 'title': 'Duck On Cover', 144 'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239', 145 }, 146 'playlist': [{ 147 'md5': 'e1a2fcb42bda30dfac990212924149a8', 148 'info_dict': { 149 'id': '1032333', 150 'ext': 'flac', 151 'title': 'Shearer - Warmachine', 152 'artist': 'Shearer', 153 'track': 'Warmachine', 154 'timestamp': 1368089771, 155 'upload_date': '20130509', 156 } 157 }, { 158 'md5': '1f358d7b2f98edfe90fd55dac0799d50', 159 'info_dict': { 160 'id': '1032330', 161 'ext': 'flac', 162 'title': 'Shearer - Without Your Ghost', 163 'artist': 'Shearer', 164 'track': 'Without Your Ghost', 165 'timestamp': 1368089771, 166 'upload_date': '20130509', 167 } 168 }], 169 'params': { 170 'playlistend': 2 171 } 172 }] 173 174 def _real_extract(self, url): 175 album_id = self._match_id(url) 176 album = self._call_api('album', album_id) 177 album_name = album.get('name') 178 179 entries = [] 180 for track in (album.get('tracks') or []): 181 track_id = track.get('id') 182 if not track_id: 183 continue 184 track_id = compat_str(track_id) 185 entries.append({ 186 '_type': 'url_transparent', 187 'url': 'https://www.jamendo.com/track/' + track_id, 188 'ie_key': JamendoIE.ie_key(), 189 'id': track_id, 190 'album': album_name, 191 }) 192 193 return self.playlist_result( 194 entries, album_id, album_name, 195 clean_html(try_get(album, lambda x: x['description']['en'], compat_str))) 196 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt_dlp/extractor/jamendo.py b/yt_dlp/extractor/jamendo.py --- a/yt_dlp/extractor/jamendo.py +++ b/yt_dlp/extractor/jamendo.py @@ -59,7 +59,7 @@ })[0] def _real_extract(self, url): - track_id, display_id = self._VALID_URL_RE.match(url).groups() + track_id, display_id = self._match_valid_url(url).groups() # webpage = self._download_webpage( # 'https://www.jamendo.com/track/' + track_id, track_id) # models = self._parse_json(self._html_search_regex(
{"golden_diff": "diff --git a/yt_dlp/extractor/jamendo.py b/yt_dlp/extractor/jamendo.py\n--- a/yt_dlp/extractor/jamendo.py\n+++ b/yt_dlp/extractor/jamendo.py\n@@ -59,7 +59,7 @@\n })[0]\n \n def _real_extract(self, url):\n- track_id, display_id = self._VALID_URL_RE.match(url).groups()\n+ track_id, display_id = self._match_valid_url(url).groups()\n # webpage = self._download_webpage(\n # 'https://www.jamendo.com/track/' + track_id, track_id)\n # models = self._parse_json(self._html_search_regex(\n", "issue": "Jamando: 'JamendoIE' object has no attribute '_VALID_URL_RE'\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2021.12.01**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nwordwide\n\n### Description\n\n```\r\nyt-dlp https://www.jamendo.com/track/1885651/to-aurora\r\nERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE'\r\n\r\nyt-dlp https://www.jamendo.com/track/1848421/badly\r\nERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE'\r\n```\r\n\r\n## Relevant lines\r\nhttps://github.com/yt-dlp/yt-dlp/blob/1117579b9457f8fbf7a4d7433a92b67ac802bdea/yt_dlp/extractor/jamendo.py#L17\r\n\r\nhttps://github.com/yt-dlp/yt-dlp/blob/1117579b9457f8fbf7a4d7433a92b67ac802bdea/yt_dlp/extractor/jamendo.py#L62\r\n\r\nhttps://github.com/yt-dlp/yt-dlp/blob/ee8dd27a7351841e1de8cebf8311b69fbef09eab/yt_dlp/extractor/common.py#L463-L470\n\n### Verbose log\n\n```shell\nyt-dlp -v https://www.jamendo.com/track/1848421/badly[debug] Command-line config: ['-v', 'https://www.jamendo.com/track/1848421/badly']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, err utf-8, pref UTF-8\r\n[debug] yt-dlp version 2021.12.01 [91f071af6]\r\n[debug] Python version 3.9.7 (CPython 64bit) - Linux-5.10.79-1-MANJARO-x86_64-with-glibc2.33\r\n[debug] exe versions: ffmpeg 4.4.1 (setts), ffprobe 4.4.1, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\n[debug] [Jamendo] Extracting URL: https://www.jamendo.com/track/1848421/badly\r\nERROR: 'JamendoIE' object has no attribute '_VALID_URL_RE'\r\nTraceback (most recent call last):\r\n File \"/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/YoutubeDL.py\", line 1329, in wrapper\r\n return func(self, *args, **kwargs)\r\n File \"/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/YoutubeDL.py\", line 1398, in __extract_info\r\n ie_result = ie.extract(url)\r\n File \"/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/extractor/common.py\", line 597, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/home/jaller94/.local/lib/python3.9/site-packages/yt_dlp/extractor/jamendo.py\", line 62, in _real_extract\r\n track_id, display_id = self._VALID_URL_RE.match(url).groups()\r\nAttributeError: 'JamendoIE' object has no attribute '_VALID_URL_RE'\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport random\n\nfrom ..compat import compat_str\nfrom .common import InfoExtractor\nfrom ..utils import (\n clean_html,\n int_or_none,\n try_get,\n)\n\n\nclass JamendoIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:\n licensing\\.jamendo\\.com/[^/]+|\n (?:www\\.)?jamendo\\.com\n )\n /track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))?\n '''\n _TESTS = [{\n 'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i',\n 'md5': '6e9e82ed6db98678f171c25a8ed09ffd',\n 'info_dict': {\n 'id': '196219',\n 'display_id': 'stories-from-emona-i',\n 'ext': 'flac',\n # 'title': 'Maya Filipi\u010d - Stories from Emona I',\n 'title': 'Stories from Emona I',\n # 'artist': 'Maya Filipi\u010d',\n 'track': 'Stories from Emona I',\n 'duration': 210,\n 'thumbnail': r're:^https?://.*\\.jpg',\n 'timestamp': 1217438117,\n 'upload_date': '20080730',\n 'license': 'by-nc-nd',\n 'view_count': int,\n 'like_count': int,\n 'average_rating': int,\n 'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'],\n }\n }, {\n 'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock',\n 'only_matching': True,\n }]\n\n def _call_api(self, resource, resource_id):\n path = '/api/%ss' % resource\n rand = compat_str(random.random())\n return self._download_json(\n 'https://www.jamendo.com' + path, resource_id, query={\n 'id[]': resource_id,\n }, headers={\n 'X-Jam-Call': '$%s*%s~' % (hashlib.sha1((path + rand).encode()).hexdigest(), rand)\n })[0]\n\n def _real_extract(self, url):\n track_id, display_id = self._VALID_URL_RE.match(url).groups()\n # webpage = self._download_webpage(\n # 'https://www.jamendo.com/track/' + track_id, track_id)\n # models = self._parse_json(self._html_search_regex(\n # r\"data-bundled-models='([^']+)\",\n # webpage, 'bundled models'), track_id)\n # track = models['track']['models'][0]\n track = self._call_api('track', track_id)\n title = track_name = track['name']\n # get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {}\n # artist = get_model('artist')\n # artist_name = artist.get('name')\n # if artist_name:\n # title = '%s - %s' % (artist_name, title)\n # album = get_model('album')\n\n formats = [{\n 'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294'\n % (sub_domain, track_id, format_id),\n 'format_id': format_id,\n 'ext': ext,\n 'quality': quality,\n } for quality, (format_id, sub_domain, ext) in enumerate((\n ('mp31', 'mp3l', 'mp3'),\n ('mp32', 'mp3d', 'mp3'),\n ('ogg1', 'ogg', 'ogg'),\n ('flac', 'flac', 'flac'),\n ))]\n self._sort_formats(formats)\n\n urls = []\n thumbnails = []\n for covers in (track.get('cover') or {}).values():\n for cover_id, cover_url in covers.items():\n if not cover_url or cover_url in urls:\n continue\n urls.append(cover_url)\n size = int_or_none(cover_id.lstrip('size'))\n thumbnails.append({\n 'id': cover_id,\n 'url': cover_url,\n 'width': size,\n 'height': size,\n })\n\n tags = []\n for tag in (track.get('tags') or []):\n tag_name = tag.get('name')\n if not tag_name:\n continue\n tags.append(tag_name)\n\n stats = track.get('stats') or {}\n license = track.get('licenseCC') or []\n\n return {\n 'id': track_id,\n 'display_id': display_id,\n 'thumbnails': thumbnails,\n 'title': title,\n 'description': track.get('description'),\n 'duration': int_or_none(track.get('duration')),\n # 'artist': artist_name,\n 'track': track_name,\n # 'album': album.get('name'),\n 'formats': formats,\n 'license': '-'.join(license) if license else None,\n 'timestamp': int_or_none(track.get('dateCreated')),\n 'view_count': int_or_none(stats.get('listenedAll')),\n 'like_count': int_or_none(stats.get('favorited')),\n 'average_rating': int_or_none(stats.get('averageNote')),\n 'tags': tags,\n }\n\n\nclass JamendoAlbumIE(JamendoIE):\n _VALID_URL = r'https?://(?:www\\.)?jamendo\\.com/album/(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.jamendo.com/album/121486/duck-on-cover',\n 'info_dict': {\n 'id': '121486',\n 'title': 'Duck On Cover',\n 'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239',\n },\n 'playlist': [{\n 'md5': 'e1a2fcb42bda30dfac990212924149a8',\n 'info_dict': {\n 'id': '1032333',\n 'ext': 'flac',\n 'title': 'Shearer - Warmachine',\n 'artist': 'Shearer',\n 'track': 'Warmachine',\n 'timestamp': 1368089771,\n 'upload_date': '20130509',\n }\n }, {\n 'md5': '1f358d7b2f98edfe90fd55dac0799d50',\n 'info_dict': {\n 'id': '1032330',\n 'ext': 'flac',\n 'title': 'Shearer - Without Your Ghost',\n 'artist': 'Shearer',\n 'track': 'Without Your Ghost',\n 'timestamp': 1368089771,\n 'upload_date': '20130509',\n }\n }],\n 'params': {\n 'playlistend': 2\n }\n }]\n\n def _real_extract(self, url):\n album_id = self._match_id(url)\n album = self._call_api('album', album_id)\n album_name = album.get('name')\n\n entries = []\n for track in (album.get('tracks') or []):\n track_id = track.get('id')\n if not track_id:\n continue\n track_id = compat_str(track_id)\n entries.append({\n '_type': 'url_transparent',\n 'url': 'https://www.jamendo.com/track/' + track_id,\n 'ie_key': JamendoIE.ie_key(),\n 'id': track_id,\n 'album': album_name,\n })\n\n return self.playlist_result(\n entries, album_id, album_name,\n clean_html(try_get(album, lambda x: x['description']['en'], compat_str)))\n", "path": "yt_dlp/extractor/jamendo.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport random\n\nfrom ..compat import compat_str\nfrom .common import InfoExtractor\nfrom ..utils import (\n clean_html,\n int_or_none,\n try_get,\n)\n\n\nclass JamendoIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:\n licensing\\.jamendo\\.com/[^/]+|\n (?:www\\.)?jamendo\\.com\n )\n /track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))?\n '''\n _TESTS = [{\n 'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i',\n 'md5': '6e9e82ed6db98678f171c25a8ed09ffd',\n 'info_dict': {\n 'id': '196219',\n 'display_id': 'stories-from-emona-i',\n 'ext': 'flac',\n # 'title': 'Maya Filipi\u010d - Stories from Emona I',\n 'title': 'Stories from Emona I',\n # 'artist': 'Maya Filipi\u010d',\n 'track': 'Stories from Emona I',\n 'duration': 210,\n 'thumbnail': r're:^https?://.*\\.jpg',\n 'timestamp': 1217438117,\n 'upload_date': '20080730',\n 'license': 'by-nc-nd',\n 'view_count': int,\n 'like_count': int,\n 'average_rating': int,\n 'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'],\n }\n }, {\n 'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock',\n 'only_matching': True,\n }]\n\n def _call_api(self, resource, resource_id):\n path = '/api/%ss' % resource\n rand = compat_str(random.random())\n return self._download_json(\n 'https://www.jamendo.com' + path, resource_id, query={\n 'id[]': resource_id,\n }, headers={\n 'X-Jam-Call': '$%s*%s~' % (hashlib.sha1((path + rand).encode()).hexdigest(), rand)\n })[0]\n\n def _real_extract(self, url):\n track_id, display_id = self._match_valid_url(url).groups()\n # webpage = self._download_webpage(\n # 'https://www.jamendo.com/track/' + track_id, track_id)\n # models = self._parse_json(self._html_search_regex(\n # r\"data-bundled-models='([^']+)\",\n # webpage, 'bundled models'), track_id)\n # track = models['track']['models'][0]\n track = self._call_api('track', track_id)\n title = track_name = track['name']\n # get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {}\n # artist = get_model('artist')\n # artist_name = artist.get('name')\n # if artist_name:\n # title = '%s - %s' % (artist_name, title)\n # album = get_model('album')\n\n formats = [{\n 'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294'\n % (sub_domain, track_id, format_id),\n 'format_id': format_id,\n 'ext': ext,\n 'quality': quality,\n } for quality, (format_id, sub_domain, ext) in enumerate((\n ('mp31', 'mp3l', 'mp3'),\n ('mp32', 'mp3d', 'mp3'),\n ('ogg1', 'ogg', 'ogg'),\n ('flac', 'flac', 'flac'),\n ))]\n self._sort_formats(formats)\n\n urls = []\n thumbnails = []\n for covers in (track.get('cover') or {}).values():\n for cover_id, cover_url in covers.items():\n if not cover_url or cover_url in urls:\n continue\n urls.append(cover_url)\n size = int_or_none(cover_id.lstrip('size'))\n thumbnails.append({\n 'id': cover_id,\n 'url': cover_url,\n 'width': size,\n 'height': size,\n })\n\n tags = []\n for tag in (track.get('tags') or []):\n tag_name = tag.get('name')\n if not tag_name:\n continue\n tags.append(tag_name)\n\n stats = track.get('stats') or {}\n license = track.get('licenseCC') or []\n\n return {\n 'id': track_id,\n 'display_id': display_id,\n 'thumbnails': thumbnails,\n 'title': title,\n 'description': track.get('description'),\n 'duration': int_or_none(track.get('duration')),\n # 'artist': artist_name,\n 'track': track_name,\n # 'album': album.get('name'),\n 'formats': formats,\n 'license': '-'.join(license) if license else None,\n 'timestamp': int_or_none(track.get('dateCreated')),\n 'view_count': int_or_none(stats.get('listenedAll')),\n 'like_count': int_or_none(stats.get('favorited')),\n 'average_rating': int_or_none(stats.get('averageNote')),\n 'tags': tags,\n }\n\n\nclass JamendoAlbumIE(JamendoIE):\n _VALID_URL = r'https?://(?:www\\.)?jamendo\\.com/album/(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.jamendo.com/album/121486/duck-on-cover',\n 'info_dict': {\n 'id': '121486',\n 'title': 'Duck On Cover',\n 'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239',\n },\n 'playlist': [{\n 'md5': 'e1a2fcb42bda30dfac990212924149a8',\n 'info_dict': {\n 'id': '1032333',\n 'ext': 'flac',\n 'title': 'Shearer - Warmachine',\n 'artist': 'Shearer',\n 'track': 'Warmachine',\n 'timestamp': 1368089771,\n 'upload_date': '20130509',\n }\n }, {\n 'md5': '1f358d7b2f98edfe90fd55dac0799d50',\n 'info_dict': {\n 'id': '1032330',\n 'ext': 'flac',\n 'title': 'Shearer - Without Your Ghost',\n 'artist': 'Shearer',\n 'track': 'Without Your Ghost',\n 'timestamp': 1368089771,\n 'upload_date': '20130509',\n }\n }],\n 'params': {\n 'playlistend': 2\n }\n }]\n\n def _real_extract(self, url):\n album_id = self._match_id(url)\n album = self._call_api('album', album_id)\n album_name = album.get('name')\n\n entries = []\n for track in (album.get('tracks') or []):\n track_id = track.get('id')\n if not track_id:\n continue\n track_id = compat_str(track_id)\n entries.append({\n '_type': 'url_transparent',\n 'url': 'https://www.jamendo.com/track/' + track_id,\n 'ie_key': JamendoIE.ie_key(),\n 'id': track_id,\n 'album': album_name,\n })\n\n return self.playlist_result(\n entries, album_id, album_name,\n clean_html(try_get(album, lambda x: x['description']['en'], compat_str)))\n", "path": "yt_dlp/extractor/jamendo.py"}]}
3,697
163
gh_patches_debug_33701
rasdani/github-patches
git_diff
piskvorky__gensim-1217
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong calculation for max_iter_dump In https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/wrappers/wordrank.py#L144 Shouldnt this line `max_iter_dump = iter / dump_period * dump_period - 1` just be `max_iter_dump = iter - dump_period` ? To reproduce try these parameters: `model = Wordrank.train(wr_path, data, out_dir, iter=100, dump_period=5)` It will error out with - ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/ajkale/anaconda2/envs/wordrank/lib/python2.7/site-packages/gensim/models/wrappers/wordrank.py", line 146, in train copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words') File "/home/ajkale/anaconda2/envs/wordrank/lib/python2.7/shutil.py", line 82, in copyfile with open(src, 'rb') as fsrc: IOError: [Errno 2] No such file or directory: 'model_word_99.txt' ``` Mainly because `max_iter_dump = iter / dump_period * dump_period - 1` calculates max_iter_dump=99 instead of 95. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gensim/models/wrappers/wordrank.py` Content: ``` 1 # Copyright (C) 2017 Parul Sethi <[email protected]> 2 # Copyright (C) 2017 Radim Rehurek <[email protected]> 3 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html 4 5 """ 6 Python wrapper around word representation learning from Wordrank. 7 The wrapped model can NOT be updated with new documents for online training -- use gensim's 8 `Word2Vec` for that. 9 10 Example: 11 >>> model = gensim.models.wrappers.Wordrank('/Users/dummy/wordrank', corpus_file='text8', out_path='wr_model') 12 >>> print model[word] # prints vector for given words 13 14 .. [1] https://bitbucket.org/shihaoji/wordrank/ 15 .. [2] https://arxiv.org/pdf/1506.02761v3.pdf 16 """ 17 18 from __future__ import division 19 20 import logging 21 import os 22 import sys 23 import copy 24 import multiprocessing 25 26 import numpy as np 27 28 from gensim import utils 29 from gensim.models.keyedvectors import KeyedVectors 30 from gensim.scripts.glove2word2vec import glove2word2vec 31 32 from six import string_types 33 from smart_open import smart_open 34 from shutil import copyfile, rmtree 35 36 37 logger = logging.getLogger(__name__) 38 39 40 class Wordrank(KeyedVectors): 41 """ 42 Class for word vector training using Wordrank. Communication between Wordrank and Python 43 takes place by working with data files on disk and calling the Wordrank binary and glove's 44 helper binaries (for preparing training data) with subprocess module. 45 """ 46 47 @classmethod 48 def train(cls, wr_path, corpus_file, out_path, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0, 49 sgd_num=100, lrate=0.001, period=10, iter=91, epsilon=0.75, dump_period=10, reg=0, alpha=100, 50 beta=99, loss='hinge', memory=4.0, cleanup_files=True, sorted_vocab=1, ensemble=0): 51 """ 52 `wr_path` is the path to the Wordrank directory. 53 `corpus_file` is the filename of the text file to be used for training the Wordrank model. 54 Expects file to contain space-separated tokens in a single line 55 `out_path` is the path to directory which will be created to save embeddings and training data. 56 `size` is the dimensionality of the feature vectors. 57 `window` is the number of context words to the left (and to the right, if symmetric = 1). 58 `symmetric` if 0, only use left context words, else use left and right both. 59 `min_count` = ignore all words with total frequency lower than this. 60 `max_vocab_size` upper bound on vocabulary size, i.e. keep the <int> most frequent words. Default is 0 for no limit. 61 `sgd_num` number of SGD taken for each data point. 62 `lrate` is the learning rate (too high diverges, give Nan). 63 `period` is the period of xi variable updates 64 `iter` = number of iterations (epochs) over the corpus. 65 `epsilon` is the power scaling value for weighting function. 66 `dump_period` is the period after which embeddings should be dumped. 67 `reg` is the value of regularization parameter. 68 `alpha` is the alpha parameter of gamma distribution. 69 `beta` is the beta parameter of gamma distribution. 70 `loss` = name of the loss (logistic, hinge). 71 `memory` = soft limit for memory consumption, in GB. 72 `cleanup_files` if True, delete directory and files used by this wrapper, setting to False can be useful for debugging 73 `sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before assigning word indexes. 74 `ensemble` = 0 (default), use ensemble of word and context vectors 75 """ 76 77 meta_data_path = 'matrix.meta' 78 vocab_file = 'vocab.txt' 79 temp_vocab_file = 'tempvocab.txt' 80 cooccurrence_file = 'cooccurrence' 81 cooccurrence_shuf_file = 'wiki.toy' 82 meta_file = 'meta' 83 84 # prepare training data (cooccurrence matrix and vocab) 85 model_dir = os.path.join(wr_path, out_path) 86 meta_dir = os.path.join(model_dir, 'meta') 87 os.makedirs(meta_dir) 88 logger.info("Dumped data will be stored in '%s'", model_dir) 89 copyfile(corpus_file, os.path.join(meta_dir, corpus_file.split('/')[-1])) 90 os.chdir(meta_dir) 91 92 cmd_vocab_count = ['../../glove/vocab_count', '-min-count', str(min_count), '-max-vocab', str(max_vocab_size)] 93 cmd_cooccurence_count = ['../../glove/cooccur', '-memory', str(memory), '-vocab-file', temp_vocab_file, '-window-size', str(window), '-symmetric', str(symmetric)] 94 cmd_shuffle_cooccurences = ['../../glove/shuffle', '-memory', str(memory)] 95 cmd_del_vocab_freq = ['cut', '-d', " ", '-f', '1', temp_vocab_file] 96 97 commands = [cmd_vocab_count, cmd_cooccurence_count, cmd_shuffle_cooccurences] 98 logger.info("Prepare training data using glove code '%s'", commands) 99 input_fnames = [corpus_file.split('/')[-1], corpus_file.split('/')[-1], cooccurrence_file] 100 output_fnames = [temp_vocab_file, cooccurrence_file, cooccurrence_shuf_file] 101 102 for command, input_fname, output_fname in zip(commands, input_fnames, output_fnames): 103 with smart_open(input_fname, 'rb') as r: 104 with smart_open(output_fname, 'wb') as w: 105 utils.check_output(w, args=command, stdin=r) 106 with smart_open(vocab_file, 'wb') as w: 107 utils.check_output(w, args=cmd_del_vocab_freq) 108 109 with smart_open(vocab_file, 'rb') as f: 110 numwords = sum(1 for line in f) 111 with smart_open(cooccurrence_shuf_file, 'rb') as f: 112 numlines = sum(1 for line in f) 113 with smart_open(meta_file, 'wb') as f: 114 meta_info = "{0} {1}\n{2} {3}\n{4} {5}".format(numwords, numwords, numlines, cooccurrence_shuf_file, numwords, vocab_file) 115 f.write(meta_info.encode('utf-8')) 116 117 wr_args = { 118 'path': 'meta', 119 'nthread': multiprocessing.cpu_count(), 120 'sgd_num': sgd_num, 121 'lrate': lrate, 122 'period': period, 123 'iter': iter, 124 'epsilon': epsilon, 125 'dump_prefix': 'model', 126 'dump_period': dump_period, 127 'dim': size, 128 'reg': reg, 129 'alpha': alpha, 130 'beta': beta, 131 'loss': loss 132 } 133 134 os.chdir('..') 135 # run wordrank executable with wr_args 136 cmd = ['mpirun', '-np', '1', '../wordrank'] 137 for option, value in wr_args.items(): 138 cmd.append("--%s" % option) 139 cmd.append(str(value)) 140 logger.info("Running wordrank binary '%s'", cmd) 141 output = utils.check_output(args=cmd) 142 143 # use embeddings from max. iteration's dump 144 max_iter_dump = iter / dump_period * dump_period - 1 145 copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words') 146 copyfile('model_context_%d.txt' % max_iter_dump, 'wordrank.contexts') 147 model = cls.load_wordrank_model('wordrank.words', os.path.join('meta', vocab_file), 'wordrank.contexts', sorted_vocab, ensemble) 148 os.chdir('../..') 149 150 if cleanup_files: 151 rmtree(model_dir) 152 return model 153 154 @classmethod 155 def load_wordrank_model(cls, model_file, vocab_file=None, context_file=None, sorted_vocab=1, ensemble=1): 156 glove2word2vec(model_file, model_file+'.w2vformat') 157 model = cls.load_word2vec_format('%s.w2vformat' % model_file) 158 if ensemble and context_file: 159 model.ensemble_embedding(model_file, context_file) 160 if sorted_vocab and vocab_file: 161 model.sort_embeddings(vocab_file) 162 return model 163 164 def sort_embeddings(self, vocab_file): 165 """Sort embeddings according to word frequency.""" 166 counts = {} 167 vocab_size = len(self.vocab) 168 prev_syn0 = copy.deepcopy(self.syn0) 169 prev_vocab = copy.deepcopy(self.vocab) 170 self.index2word = [] 171 172 # sort embeddings using frequency sorted vocab file in wordrank 173 with utils.smart_open(vocab_file) as fin: 174 for index, line in enumerate(fin): 175 word, count = utils.to_unicode(line).strip(), vocab_size - index 176 # store word with it's count in a dict 177 counts[word] = int(count) 178 # build new index2word with frequency sorted words 179 self.index2word.append(word) 180 assert len(self.index2word) == vocab_size, 'mismatch between vocab sizes' 181 182 for word_id, word in enumerate(self.index2word): 183 self.syn0[word_id] = prev_syn0[prev_vocab[word].index] 184 self.vocab[word].index = word_id 185 self.vocab[word].count = counts[word] 186 187 def ensemble_embedding(self, word_embedding, context_embedding): 188 """Replace syn0 with the sum of context and word embeddings.""" 189 glove2word2vec(context_embedding, context_embedding+'.w2vformat') 190 w_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % word_embedding) 191 c_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % context_embedding) 192 # compare vocab words using keys of dict vocab 193 assert set(w_emb.vocab) == set(c_emb.vocab), 'Vocabs are not same for both embeddings' 194 195 # sort context embedding to have words in same order as word embedding 196 prev_c_emb = copy.deepcopy(c_emb.syn0) 197 for word_id, word in enumerate(w_emb.index2word): 198 c_emb.syn0[word_id] = prev_c_emb[c_emb.vocab[word].index] 199 # add vectors of the two embeddings 200 new_emb = w_emb.syn0 + c_emb.syn0 201 self.syn0 = new_emb 202 return new_emb 203 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gensim/models/wrappers/wordrank.py b/gensim/models/wrappers/wordrank.py --- a/gensim/models/wrappers/wordrank.py +++ b/gensim/models/wrappers/wordrank.py @@ -46,7 +46,7 @@ @classmethod def train(cls, wr_path, corpus_file, out_path, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0, - sgd_num=100, lrate=0.001, period=10, iter=91, epsilon=0.75, dump_period=10, reg=0, alpha=100, + sgd_num=100, lrate=0.001, period=10, iter=90, epsilon=0.75, dump_period=10, reg=0, alpha=100, beta=99, loss='hinge', memory=4.0, cleanup_files=True, sorted_vocab=1, ensemble=0): """ `wr_path` is the path to the Wordrank directory. @@ -113,6 +113,11 @@ with smart_open(meta_file, 'wb') as f: meta_info = "{0} {1}\n{2} {3}\n{4} {5}".format(numwords, numwords, numlines, cooccurrence_shuf_file, numwords, vocab_file) f.write(meta_info.encode('utf-8')) + + if iter % dump_period == 0: + iter += 1 + else: + logger.warning('Resultant embedding would be from %d iteration', iter - iter % dump_period) wr_args = { 'path': 'meta', @@ -141,7 +146,7 @@ output = utils.check_output(args=cmd) # use embeddings from max. iteration's dump - max_iter_dump = iter / dump_period * dump_period - 1 + max_iter_dump = iter - iter % dump_period copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words') copyfile('model_context_%d.txt' % max_iter_dump, 'wordrank.contexts') model = cls.load_wordrank_model('wordrank.words', os.path.join('meta', vocab_file), 'wordrank.contexts', sorted_vocab, ensemble)
{"golden_diff": "diff --git a/gensim/models/wrappers/wordrank.py b/gensim/models/wrappers/wordrank.py\n--- a/gensim/models/wrappers/wordrank.py\n+++ b/gensim/models/wrappers/wordrank.py\n@@ -46,7 +46,7 @@\n \n @classmethod\n def train(cls, wr_path, corpus_file, out_path, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0,\n- sgd_num=100, lrate=0.001, period=10, iter=91, epsilon=0.75, dump_period=10, reg=0, alpha=100,\n+ sgd_num=100, lrate=0.001, period=10, iter=90, epsilon=0.75, dump_period=10, reg=0, alpha=100,\n beta=99, loss='hinge', memory=4.0, cleanup_files=True, sorted_vocab=1, ensemble=0):\n \"\"\"\n `wr_path` is the path to the Wordrank directory.\n@@ -113,6 +113,11 @@\n with smart_open(meta_file, 'wb') as f:\n meta_info = \"{0} {1}\\n{2} {3}\\n{4} {5}\".format(numwords, numwords, numlines, cooccurrence_shuf_file, numwords, vocab_file)\n f.write(meta_info.encode('utf-8'))\n+ \n+ if iter % dump_period == 0:\n+ iter += 1\n+ else:\n+ logger.warning('Resultant embedding would be from %d iteration', iter - iter % dump_period)\n \n wr_args = {\n 'path': 'meta',\n@@ -141,7 +146,7 @@\n output = utils.check_output(args=cmd)\n \n # use embeddings from max. iteration's dump\n- max_iter_dump = iter / dump_period * dump_period - 1\n+ max_iter_dump = iter - iter % dump_period\n copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words')\n copyfile('model_context_%d.txt' % max_iter_dump, 'wordrank.contexts')\n model = cls.load_wordrank_model('wordrank.words', os.path.join('meta', vocab_file), 'wordrank.contexts', sorted_vocab, ensemble)\n", "issue": "Wrong calculation for max_iter_dump\nIn\r\nhttps://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/wrappers/wordrank.py#L144\r\nShouldnt this line `max_iter_dump = iter / dump_period * dump_period - 1` just be \r\n`max_iter_dump = iter - dump_period` ?\r\n\r\nTo reproduce try these parameters:\r\n`model = Wordrank.train(wr_path, data, out_dir, iter=100, dump_period=5)`\r\nIt will error out with -\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/ajkale/anaconda2/envs/wordrank/lib/python2.7/site-packages/gensim/models/wrappers/wordrank.py\", line 146, in train\r\n copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words')\r\n File \"/home/ajkale/anaconda2/envs/wordrank/lib/python2.7/shutil.py\", line 82, in copyfile\r\n with open(src, 'rb') as fsrc:\r\nIOError: [Errno 2] No such file or directory: 'model_word_99.txt'\r\n```\r\n\r\nMainly because `max_iter_dump = iter / dump_period * dump_period - 1` calculates max_iter_dump=99 instead of 95.\n", "before_files": [{"content": "# Copyright (C) 2017 Parul Sethi <[email protected]>\n# Copyright (C) 2017 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nPython wrapper around word representation learning from Wordrank.\nThe wrapped model can NOT be updated with new documents for online training -- use gensim's\n`Word2Vec` for that.\n\nExample:\n>>> model = gensim.models.wrappers.Wordrank('/Users/dummy/wordrank', corpus_file='text8', out_path='wr_model')\n>>> print model[word] # prints vector for given words\n\n.. [1] https://bitbucket.org/shihaoji/wordrank/\n.. [2] https://arxiv.org/pdf/1506.02761v3.pdf\n\"\"\"\n\nfrom __future__ import division\n\nimport logging\nimport os\nimport sys\nimport copy\nimport multiprocessing\n\nimport numpy as np\n\nfrom gensim import utils\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nfrom six import string_types\nfrom smart_open import smart_open\nfrom shutil import copyfile, rmtree\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Wordrank(KeyedVectors):\n \"\"\"\n Class for word vector training using Wordrank. Communication between Wordrank and Python\n takes place by working with data files on disk and calling the Wordrank binary and glove's\n helper binaries (for preparing training data) with subprocess module.\n \"\"\"\n \n @classmethod\n def train(cls, wr_path, corpus_file, out_path, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0,\n sgd_num=100, lrate=0.001, period=10, iter=91, epsilon=0.75, dump_period=10, reg=0, alpha=100,\n beta=99, loss='hinge', memory=4.0, cleanup_files=True, sorted_vocab=1, ensemble=0):\n \"\"\"\n `wr_path` is the path to the Wordrank directory.\n `corpus_file` is the filename of the text file to be used for training the Wordrank model.\n Expects file to contain space-separated tokens in a single line\n `out_path` is the path to directory which will be created to save embeddings and training data.\n `size` is the dimensionality of the feature vectors.\n `window` is the number of context words to the left (and to the right, if symmetric = 1).\n `symmetric` if 0, only use left context words, else use left and right both.\n `min_count` = ignore all words with total frequency lower than this.\n `max_vocab_size` upper bound on vocabulary size, i.e. keep the <int> most frequent words. Default is 0 for no limit.\n `sgd_num` number of SGD taken for each data point.\n `lrate` is the learning rate (too high diverges, give Nan).\n `period` is the period of xi variable updates\n `iter` = number of iterations (epochs) over the corpus.\n `epsilon` is the power scaling value for weighting function.\n `dump_period` is the period after which embeddings should be dumped.\n `reg` is the value of regularization parameter.\n `alpha` is the alpha parameter of gamma distribution.\n `beta` is the beta parameter of gamma distribution.\n `loss` = name of the loss (logistic, hinge).\n `memory` = soft limit for memory consumption, in GB.\n `cleanup_files` if True, delete directory and files used by this wrapper, setting to False can be useful for debugging\n `sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before assigning word indexes.\n `ensemble` = 0 (default), use ensemble of word and context vectors\n \"\"\"\n\n meta_data_path = 'matrix.meta'\n vocab_file = 'vocab.txt'\n temp_vocab_file = 'tempvocab.txt'\n cooccurrence_file = 'cooccurrence'\n cooccurrence_shuf_file = 'wiki.toy'\n meta_file = 'meta'\n\n # prepare training data (cooccurrence matrix and vocab)\n model_dir = os.path.join(wr_path, out_path)\n meta_dir = os.path.join(model_dir, 'meta')\n os.makedirs(meta_dir)\n logger.info(\"Dumped data will be stored in '%s'\", model_dir)\n copyfile(corpus_file, os.path.join(meta_dir, corpus_file.split('/')[-1]))\n os.chdir(meta_dir)\n\n cmd_vocab_count = ['../../glove/vocab_count', '-min-count', str(min_count), '-max-vocab', str(max_vocab_size)]\n cmd_cooccurence_count = ['../../glove/cooccur', '-memory', str(memory), '-vocab-file', temp_vocab_file, '-window-size', str(window), '-symmetric', str(symmetric)]\n cmd_shuffle_cooccurences = ['../../glove/shuffle', '-memory', str(memory)]\n cmd_del_vocab_freq = ['cut', '-d', \" \", '-f', '1', temp_vocab_file]\n\n commands = [cmd_vocab_count, cmd_cooccurence_count, cmd_shuffle_cooccurences]\n logger.info(\"Prepare training data using glove code '%s'\", commands)\n input_fnames = [corpus_file.split('/')[-1], corpus_file.split('/')[-1], cooccurrence_file]\n output_fnames = [temp_vocab_file, cooccurrence_file, cooccurrence_shuf_file]\n\n for command, input_fname, output_fname in zip(commands, input_fnames, output_fnames):\n with smart_open(input_fname, 'rb') as r:\n with smart_open(output_fname, 'wb') as w:\n utils.check_output(w, args=command, stdin=r)\n with smart_open(vocab_file, 'wb') as w:\n utils.check_output(w, args=cmd_del_vocab_freq)\n\n with smart_open(vocab_file, 'rb') as f:\n numwords = sum(1 for line in f)\n with smart_open(cooccurrence_shuf_file, 'rb') as f:\n numlines = sum(1 for line in f)\n with smart_open(meta_file, 'wb') as f:\n meta_info = \"{0} {1}\\n{2} {3}\\n{4} {5}\".format(numwords, numwords, numlines, cooccurrence_shuf_file, numwords, vocab_file)\n f.write(meta_info.encode('utf-8'))\n\n wr_args = {\n 'path': 'meta',\n 'nthread': multiprocessing.cpu_count(),\n 'sgd_num': sgd_num,\n 'lrate': lrate,\n 'period': period,\n 'iter': iter,\n 'epsilon': epsilon,\n 'dump_prefix': 'model',\n 'dump_period': dump_period,\n 'dim': size,\n 'reg': reg,\n 'alpha': alpha,\n 'beta': beta,\n 'loss': loss\n }\n\n os.chdir('..')\n # run wordrank executable with wr_args\n cmd = ['mpirun', '-np', '1', '../wordrank']\n for option, value in wr_args.items():\n cmd.append(\"--%s\" % option)\n cmd.append(str(value))\n logger.info(\"Running wordrank binary '%s'\", cmd)\n output = utils.check_output(args=cmd)\n\n # use embeddings from max. iteration's dump\n max_iter_dump = iter / dump_period * dump_period - 1\n copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words')\n copyfile('model_context_%d.txt' % max_iter_dump, 'wordrank.contexts')\n model = cls.load_wordrank_model('wordrank.words', os.path.join('meta', vocab_file), 'wordrank.contexts', sorted_vocab, ensemble)\n os.chdir('../..')\n\n if cleanup_files:\n rmtree(model_dir)\n return model\n\n @classmethod\n def load_wordrank_model(cls, model_file, vocab_file=None, context_file=None, sorted_vocab=1, ensemble=1):\n glove2word2vec(model_file, model_file+'.w2vformat')\n model = cls.load_word2vec_format('%s.w2vformat' % model_file)\n if ensemble and context_file:\n model.ensemble_embedding(model_file, context_file)\n if sorted_vocab and vocab_file:\n model.sort_embeddings(vocab_file)\n return model\n\n def sort_embeddings(self, vocab_file):\n \"\"\"Sort embeddings according to word frequency.\"\"\"\n counts = {}\n vocab_size = len(self.vocab)\n prev_syn0 = copy.deepcopy(self.syn0)\n prev_vocab = copy.deepcopy(self.vocab)\n self.index2word = []\n\n # sort embeddings using frequency sorted vocab file in wordrank\n with utils.smart_open(vocab_file) as fin:\n for index, line in enumerate(fin):\n word, count = utils.to_unicode(line).strip(), vocab_size - index\n # store word with it's count in a dict\n counts[word] = int(count)\n # build new index2word with frequency sorted words\n self.index2word.append(word)\n assert len(self.index2word) == vocab_size, 'mismatch between vocab sizes'\n\n for word_id, word in enumerate(self.index2word):\n self.syn0[word_id] = prev_syn0[prev_vocab[word].index]\n self.vocab[word].index = word_id\n self.vocab[word].count = counts[word]\n\n def ensemble_embedding(self, word_embedding, context_embedding):\n \"\"\"Replace syn0 with the sum of context and word embeddings.\"\"\"\n glove2word2vec(context_embedding, context_embedding+'.w2vformat')\n w_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % word_embedding)\n c_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % context_embedding)\n # compare vocab words using keys of dict vocab\n assert set(w_emb.vocab) == set(c_emb.vocab), 'Vocabs are not same for both embeddings'\n\n # sort context embedding to have words in same order as word embedding\n prev_c_emb = copy.deepcopy(c_emb.syn0)\n for word_id, word in enumerate(w_emb.index2word):\n c_emb.syn0[word_id] = prev_c_emb[c_emb.vocab[word].index]\n # add vectors of the two embeddings\n new_emb = w_emb.syn0 + c_emb.syn0\n self.syn0 = new_emb\n return new_emb\n\n", "path": "gensim/models/wrappers/wordrank.py"}], "after_files": [{"content": "# Copyright (C) 2017 Parul Sethi <[email protected]>\n# Copyright (C) 2017 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nPython wrapper around word representation learning from Wordrank.\nThe wrapped model can NOT be updated with new documents for online training -- use gensim's\n`Word2Vec` for that.\n\nExample:\n>>> model = gensim.models.wrappers.Wordrank('/Users/dummy/wordrank', corpus_file='text8', out_path='wr_model')\n>>> print model[word] # prints vector for given words\n\n.. [1] https://bitbucket.org/shihaoji/wordrank/\n.. [2] https://arxiv.org/pdf/1506.02761v3.pdf\n\"\"\"\n\nfrom __future__ import division\n\nimport logging\nimport os\nimport sys\nimport copy\nimport multiprocessing\n\nimport numpy as np\n\nfrom gensim import utils\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nfrom six import string_types\nfrom smart_open import smart_open\nfrom shutil import copyfile, rmtree\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Wordrank(KeyedVectors):\n \"\"\"\n Class for word vector training using Wordrank. Communication between Wordrank and Python\n takes place by working with data files on disk and calling the Wordrank binary and glove's\n helper binaries (for preparing training data) with subprocess module.\n \"\"\"\n \n @classmethod\n def train(cls, wr_path, corpus_file, out_path, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0,\n sgd_num=100, lrate=0.001, period=10, iter=90, epsilon=0.75, dump_period=10, reg=0, alpha=100,\n beta=99, loss='hinge', memory=4.0, cleanup_files=True, sorted_vocab=1, ensemble=0):\n \"\"\"\n `wr_path` is the path to the Wordrank directory.\n `corpus_file` is the filename of the text file to be used for training the Wordrank model.\n Expects file to contain space-separated tokens in a single line\n `out_path` is the path to directory which will be created to save embeddings and training data.\n `size` is the dimensionality of the feature vectors.\n `window` is the number of context words to the left (and to the right, if symmetric = 1).\n `symmetric` if 0, only use left context words, else use left and right both.\n `min_count` = ignore all words with total frequency lower than this.\n `max_vocab_size` upper bound on vocabulary size, i.e. keep the <int> most frequent words. Default is 0 for no limit.\n `sgd_num` number of SGD taken for each data point.\n `lrate` is the learning rate (too high diverges, give Nan).\n `period` is the period of xi variable updates\n `iter` = number of iterations (epochs) over the corpus.\n `epsilon` is the power scaling value for weighting function.\n `dump_period` is the period after which embeddings should be dumped.\n `reg` is the value of regularization parameter.\n `alpha` is the alpha parameter of gamma distribution.\n `beta` is the beta parameter of gamma distribution.\n `loss` = name of the loss (logistic, hinge).\n `memory` = soft limit for memory consumption, in GB.\n `cleanup_files` if True, delete directory and files used by this wrapper, setting to False can be useful for debugging\n `sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before assigning word indexes.\n `ensemble` = 0 (default), use ensemble of word and context vectors\n \"\"\"\n\n meta_data_path = 'matrix.meta'\n vocab_file = 'vocab.txt'\n temp_vocab_file = 'tempvocab.txt'\n cooccurrence_file = 'cooccurrence'\n cooccurrence_shuf_file = 'wiki.toy'\n meta_file = 'meta'\n\n # prepare training data (cooccurrence matrix and vocab)\n model_dir = os.path.join(wr_path, out_path)\n meta_dir = os.path.join(model_dir, 'meta')\n os.makedirs(meta_dir)\n logger.info(\"Dumped data will be stored in '%s'\", model_dir)\n copyfile(corpus_file, os.path.join(meta_dir, corpus_file.split('/')[-1]))\n os.chdir(meta_dir)\n\n cmd_vocab_count = ['../../glove/vocab_count', '-min-count', str(min_count), '-max-vocab', str(max_vocab_size)]\n cmd_cooccurence_count = ['../../glove/cooccur', '-memory', str(memory), '-vocab-file', temp_vocab_file, '-window-size', str(window), '-symmetric', str(symmetric)]\n cmd_shuffle_cooccurences = ['../../glove/shuffle', '-memory', str(memory)]\n cmd_del_vocab_freq = ['cut', '-d', \" \", '-f', '1', temp_vocab_file]\n\n commands = [cmd_vocab_count, cmd_cooccurence_count, cmd_shuffle_cooccurences]\n logger.info(\"Prepare training data using glove code '%s'\", commands)\n input_fnames = [corpus_file.split('/')[-1], corpus_file.split('/')[-1], cooccurrence_file]\n output_fnames = [temp_vocab_file, cooccurrence_file, cooccurrence_shuf_file]\n\n for command, input_fname, output_fname in zip(commands, input_fnames, output_fnames):\n with smart_open(input_fname, 'rb') as r:\n with smart_open(output_fname, 'wb') as w:\n utils.check_output(w, args=command, stdin=r)\n with smart_open(vocab_file, 'wb') as w:\n utils.check_output(w, args=cmd_del_vocab_freq)\n\n with smart_open(vocab_file, 'rb') as f:\n numwords = sum(1 for line in f)\n with smart_open(cooccurrence_shuf_file, 'rb') as f:\n numlines = sum(1 for line in f)\n with smart_open(meta_file, 'wb') as f:\n meta_info = \"{0} {1}\\n{2} {3}\\n{4} {5}\".format(numwords, numwords, numlines, cooccurrence_shuf_file, numwords, vocab_file)\n f.write(meta_info.encode('utf-8'))\n \n if iter % dump_period == 0:\n iter += 1\n else:\n logger.warning('Resultant embedding would be from %d iteration', iter - iter % dump_period)\n\n wr_args = {\n 'path': 'meta',\n 'nthread': multiprocessing.cpu_count(),\n 'sgd_num': sgd_num,\n 'lrate': lrate,\n 'period': period,\n 'iter': iter,\n 'epsilon': epsilon,\n 'dump_prefix': 'model',\n 'dump_period': dump_period,\n 'dim': size,\n 'reg': reg,\n 'alpha': alpha,\n 'beta': beta,\n 'loss': loss\n }\n\n os.chdir('..')\n # run wordrank executable with wr_args\n cmd = ['mpirun', '-np', '1', '../wordrank']\n for option, value in wr_args.items():\n cmd.append(\"--%s\" % option)\n cmd.append(str(value))\n logger.info(\"Running wordrank binary '%s'\", cmd)\n output = utils.check_output(args=cmd)\n\n # use embeddings from max. iteration's dump\n max_iter_dump = iter - iter % dump_period\n copyfile('model_word_%d.txt' % max_iter_dump, 'wordrank.words')\n copyfile('model_context_%d.txt' % max_iter_dump, 'wordrank.contexts')\n model = cls.load_wordrank_model('wordrank.words', os.path.join('meta', vocab_file), 'wordrank.contexts', sorted_vocab, ensemble)\n os.chdir('../..')\n\n if cleanup_files:\n rmtree(model_dir)\n return model\n\n @classmethod\n def load_wordrank_model(cls, model_file, vocab_file=None, context_file=None, sorted_vocab=1, ensemble=1):\n glove2word2vec(model_file, model_file+'.w2vformat')\n model = cls.load_word2vec_format('%s.w2vformat' % model_file)\n if ensemble and context_file:\n model.ensemble_embedding(model_file, context_file)\n if sorted_vocab and vocab_file:\n model.sort_embeddings(vocab_file)\n return model\n\n def sort_embeddings(self, vocab_file):\n \"\"\"Sort embeddings according to word frequency.\"\"\"\n counts = {}\n vocab_size = len(self.vocab)\n prev_syn0 = copy.deepcopy(self.syn0)\n prev_vocab = copy.deepcopy(self.vocab)\n self.index2word = []\n\n # sort embeddings using frequency sorted vocab file in wordrank\n with utils.smart_open(vocab_file) as fin:\n for index, line in enumerate(fin):\n word, count = utils.to_unicode(line).strip(), vocab_size - index\n # store word with it's count in a dict\n counts[word] = int(count)\n # build new index2word with frequency sorted words\n self.index2word.append(word)\n assert len(self.index2word) == vocab_size, 'mismatch between vocab sizes'\n\n for word_id, word in enumerate(self.index2word):\n self.syn0[word_id] = prev_syn0[prev_vocab[word].index]\n self.vocab[word].index = word_id\n self.vocab[word].count = counts[word]\n\n def ensemble_embedding(self, word_embedding, context_embedding):\n \"\"\"Replace syn0 with the sum of context and word embeddings.\"\"\"\n glove2word2vec(context_embedding, context_embedding+'.w2vformat')\n w_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % word_embedding)\n c_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % context_embedding)\n # compare vocab words using keys of dict vocab\n assert set(w_emb.vocab) == set(c_emb.vocab), 'Vocabs are not same for both embeddings'\n\n # sort context embedding to have words in same order as word embedding\n prev_c_emb = copy.deepcopy(c_emb.syn0)\n for word_id, word in enumerate(w_emb.index2word):\n c_emb.syn0[word_id] = prev_c_emb[c_emb.vocab[word].index]\n # add vectors of the two embeddings\n new_emb = w_emb.syn0 + c_emb.syn0\n self.syn0 = new_emb\n return new_emb\n\n", "path": "gensim/models/wrappers/wordrank.py"}]}
3,401
548
gh_patches_debug_17437
rasdani/github-patches
git_diff
GoogleCloudPlatform__PerfKitBenchmarker-922
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- iperf benchmark has race condition in saving process id It's possible for iperf to save the wrong server id if two copies of the benchmark are running on the same machine. Instead of using `pgrep -n`, use `$!` to get the process id. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py` Content: ``` 1 # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Runs plain Iperf. 16 17 Docs: 18 http://iperf.fr/ 19 20 Runs Iperf to collect network throughput. 21 """ 22 23 import logging 24 import re 25 26 from perfkitbenchmarker import configs 27 from perfkitbenchmarker import flags 28 from perfkitbenchmarker import sample 29 from perfkitbenchmarker import vm_util 30 31 flags.DEFINE_integer('iperf_sending_thread_count', 1, 32 'Number of connections to make to the ' 33 'server for sending traffic.', 34 lower_bound=1) 35 flags.DEFINE_integer('iperf_runtime_in_seconds', 60, 36 'Number of seconds to run iperf.', 37 lower_bound=1) 38 39 FLAGS = flags.FLAGS 40 41 BENCHMARK_NAME = 'iperf' 42 BENCHMARK_CONFIG = """ 43 iperf: 44 description: Run iperf 45 vm_groups: 46 vm_1: 47 vm_spec: *default_single_core 48 vm_2: 49 vm_spec: *default_single_core 50 """ 51 52 IPERF_PORT = 20000 53 IPERF_RETRIES = 5 54 55 56 def GetConfig(user_config): 57 return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) 58 59 60 def Prepare(benchmark_spec): 61 """Install iperf and start the server on all machines. 62 63 Args: 64 benchmark_spec: The benchmark specification. Contains all data that is 65 required to run the benchmark. 66 """ 67 vms = benchmark_spec.vms 68 if len(vms) != 2: 69 raise ValueError( 70 'iperf benchmark requires exactly two machines, found {0}'.format(len( 71 vms))) 72 73 for vm in vms: 74 vm.Install('iperf') 75 if vm_util.ShouldRunOnExternalIpAddress(): 76 vm.AllowPort(IPERF_PORT) 77 vm.RemoteCommand('nohup iperf --server --port %s &> /dev/null &' % 78 IPERF_PORT) 79 stdout, _ = vm.RemoteCommand('pgrep -n iperf') 80 # TODO store this in a better place once we have a better place 81 vm.iperf_server_pid = stdout.strip() 82 83 84 @vm_util.Retry(max_retries=IPERF_RETRIES) 85 def _RunIperf(sending_vm, receiving_vm, receiving_ip_address, ip_type): 86 """Run iperf using sending 'vm' to connect to 'ip_address'. 87 88 Args: 89 sending_vm: The VM sending traffic. 90 receiving_vm: The VM receiving traffic. 91 receiving_ip_address: The IP address of the iperf server (ie the receiver). 92 ip_type: The IP type of 'ip_address' (e.g. 'internal', 'external') 93 Returns: 94 A Sample. 95 """ 96 iperf_cmd = ('iperf --client %s --port %s --format m --time %s -P %s' % 97 (receiving_ip_address, IPERF_PORT, 98 FLAGS.iperf_runtime_in_seconds, 99 FLAGS.iperf_sending_thread_count)) 100 # the additional time on top of the iperf runtime is to account for the 101 # time it takes for the iperf process to start and exit 102 timeout_buffer = 30 + FLAGS.iperf_sending_thread_count 103 stdout, _ = sending_vm.RemoteCommand(iperf_cmd, should_log=True, 104 timeout=FLAGS.iperf_runtime_in_seconds + 105 timeout_buffer) 106 107 # Example output from iperf that needs to be parsed 108 # STDOUT: ------------------------------------------------------------ 109 # Client connecting to 10.237.229.201, TCP port 5001 110 # TCP window size: 0.04 MByte (default) 111 # ------------------------------------------------------------ 112 # [ 6] local 10.76.234.115 port 53527 connected with 10.237.229.201 port 5001 113 # [ 3] local 10.76.234.115 port 53524 connected with 10.237.229.201 port 5001 114 # [ 4] local 10.76.234.115 port 53525 connected with 10.237.229.201 port 5001 115 # [ 5] local 10.76.234.115 port 53526 connected with 10.237.229.201 port 5001 116 # [ ID] Interval Transfer Bandwidth 117 # [ 4] 0.0-60.0 sec 3730 MBytes 521.1 Mbits/sec 118 # [ 5] 0.0-60.0 sec 3499 MBytes 489 Mbits/sec 119 # [ 6] 0.0-60.0 sec 3044 MBytes 425 Mbits/sec 120 # [ 3] 0.0-60.0 sec 3738 MBytes 522 Mbits/sec 121 # [SUM] 0.0-60.0 sec 14010 MBytes 1957 Mbits/sec 122 123 thread_values = re.findall(r'\[SUM].*\s+(\d+\.?\d*).Mbits/sec', stdout) 124 if not thread_values: 125 # If there is no sum you have try and figure out an estimate 126 # which happens when threads start at different times. The code 127 # below will tend to overestimate a bit. 128 thread_values = re.findall('\[.*\d+\].*\s+(\d+\.?\d*).Mbits/sec', stdout) 129 130 if len(thread_values) != FLAGS.iperf_sending_thread_count: 131 raise ValueError('Only %s out of %s iperf threads reported a' 132 ' throughput value.' % 133 (len(thread_values), FLAGS.iperf_sending_thread_count)) 134 135 total_throughput = 0.0 136 for value in thread_values: 137 total_throughput += float(value) 138 139 metadata = { 140 # The meta data defining the environment 141 'receiving_machine_type': receiving_vm.machine_type, 142 'receiving_zone': receiving_vm.zone, 143 'sending_machine_type': sending_vm.machine_type, 144 'sending_thread_count': FLAGS.iperf_sending_thread_count, 145 'sending_zone': sending_vm.zone, 146 'runtime_in_seconds': FLAGS.iperf_runtime_in_seconds, 147 'ip_type': ip_type 148 } 149 return sample.Sample('Throughput', total_throughput, 'Mbits/sec', metadata) 150 151 152 def Run(benchmark_spec): 153 """Run iperf on the target vm. 154 155 Args: 156 benchmark_spec: The benchmark specification. Contains all data that is 157 required to run the benchmark. 158 159 Returns: 160 A list of sample.Sample objects. 161 """ 162 vms = benchmark_spec.vms 163 results = [] 164 165 logging.info('Iperf Results:') 166 167 # Send traffic in both directions 168 for sending_vm, receiving_vm in vms, reversed(vms): 169 # Send using external IP addresses 170 if vm_util.ShouldRunOnExternalIpAddress(): 171 results.append(_RunIperf(sending_vm, 172 receiving_vm, 173 receiving_vm.ip_address, 174 'external')) 175 176 # Send using internal IP addresses 177 if vm_util.ShouldRunOnInternalIpAddress(sending_vm, 178 receiving_vm): 179 results.append(_RunIperf(sending_vm, 180 receiving_vm, 181 receiving_vm.internal_ip, 182 'internal')) 183 184 return results 185 186 187 def Cleanup(benchmark_spec): 188 """Cleanup iperf on the target vm (by uninstalling). 189 190 Args: 191 benchmark_spec: The benchmark specification. Contains all data that is 192 required to run the benchmark. 193 """ 194 vms = benchmark_spec.vms 195 for vm in vms: 196 vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid) 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py b/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py --- a/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py @@ -74,9 +74,8 @@ vm.Install('iperf') if vm_util.ShouldRunOnExternalIpAddress(): vm.AllowPort(IPERF_PORT) - vm.RemoteCommand('nohup iperf --server --port %s &> /dev/null &' % - IPERF_PORT) - stdout, _ = vm.RemoteCommand('pgrep -n iperf') + stdout, _ = vm.RemoteCommand(('nohup iperf --server --port %s &> /dev/null' + '& echo $!') % IPERF_PORT) # TODO store this in a better place once we have a better place vm.iperf_server_pid = stdout.strip() @@ -193,4 +192,4 @@ """ vms = benchmark_spec.vms for vm in vms: - vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid) + vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid, ignore_failure=True)
{"golden_diff": "diff --git a/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py b/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py\n--- a/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py\n+++ b/perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py\n@@ -74,9 +74,8 @@\n vm.Install('iperf')\n if vm_util.ShouldRunOnExternalIpAddress():\n vm.AllowPort(IPERF_PORT)\n- vm.RemoteCommand('nohup iperf --server --port %s &> /dev/null &' %\n- IPERF_PORT)\n- stdout, _ = vm.RemoteCommand('pgrep -n iperf')\n+ stdout, _ = vm.RemoteCommand(('nohup iperf --server --port %s &> /dev/null'\n+ '& echo $!') % IPERF_PORT)\n # TODO store this in a better place once we have a better place\n vm.iperf_server_pid = stdout.strip()\n \n@@ -193,4 +192,4 @@\n \"\"\"\n vms = benchmark_spec.vms\n for vm in vms:\n- vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid)\n+ vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid, ignore_failure=True)\n", "issue": "iperf benchmark has race condition in saving process id\nIt's possible for iperf to save the wrong server id if two copies of the benchmark are running on the same machine. Instead of using `pgrep -n`, use `$!` to get the process id.\n\n", "before_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs plain Iperf.\n\nDocs:\nhttp://iperf.fr/\n\nRuns Iperf to collect network throughput.\n\"\"\"\n\nimport logging\nimport re\n\nfrom perfkitbenchmarker import configs\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import sample\nfrom perfkitbenchmarker import vm_util\n\nflags.DEFINE_integer('iperf_sending_thread_count', 1,\n 'Number of connections to make to the '\n 'server for sending traffic.',\n lower_bound=1)\nflags.DEFINE_integer('iperf_runtime_in_seconds', 60,\n 'Number of seconds to run iperf.',\n lower_bound=1)\n\nFLAGS = flags.FLAGS\n\nBENCHMARK_NAME = 'iperf'\nBENCHMARK_CONFIG = \"\"\"\niperf:\n description: Run iperf\n vm_groups:\n vm_1:\n vm_spec: *default_single_core\n vm_2:\n vm_spec: *default_single_core\n\"\"\"\n\nIPERF_PORT = 20000\nIPERF_RETRIES = 5\n\n\ndef GetConfig(user_config):\n return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)\n\n\ndef Prepare(benchmark_spec):\n \"\"\"Install iperf and start the server on all machines.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n \"\"\"\n vms = benchmark_spec.vms\n if len(vms) != 2:\n raise ValueError(\n 'iperf benchmark requires exactly two machines, found {0}'.format(len(\n vms)))\n\n for vm in vms:\n vm.Install('iperf')\n if vm_util.ShouldRunOnExternalIpAddress():\n vm.AllowPort(IPERF_PORT)\n vm.RemoteCommand('nohup iperf --server --port %s &> /dev/null &' %\n IPERF_PORT)\n stdout, _ = vm.RemoteCommand('pgrep -n iperf')\n # TODO store this in a better place once we have a better place\n vm.iperf_server_pid = stdout.strip()\n\n\n@vm_util.Retry(max_retries=IPERF_RETRIES)\ndef _RunIperf(sending_vm, receiving_vm, receiving_ip_address, ip_type):\n \"\"\"Run iperf using sending 'vm' to connect to 'ip_address'.\n\n Args:\n sending_vm: The VM sending traffic.\n receiving_vm: The VM receiving traffic.\n receiving_ip_address: The IP address of the iperf server (ie the receiver).\n ip_type: The IP type of 'ip_address' (e.g. 'internal', 'external')\n Returns:\n A Sample.\n \"\"\"\n iperf_cmd = ('iperf --client %s --port %s --format m --time %s -P %s' %\n (receiving_ip_address, IPERF_PORT,\n FLAGS.iperf_runtime_in_seconds,\n FLAGS.iperf_sending_thread_count))\n # the additional time on top of the iperf runtime is to account for the\n # time it takes for the iperf process to start and exit\n timeout_buffer = 30 + FLAGS.iperf_sending_thread_count\n stdout, _ = sending_vm.RemoteCommand(iperf_cmd, should_log=True,\n timeout=FLAGS.iperf_runtime_in_seconds +\n timeout_buffer)\n\n # Example output from iperf that needs to be parsed\n # STDOUT: ------------------------------------------------------------\n # Client connecting to 10.237.229.201, TCP port 5001\n # TCP window size: 0.04 MByte (default)\n # ------------------------------------------------------------\n # [ 6] local 10.76.234.115 port 53527 connected with 10.237.229.201 port 5001\n # [ 3] local 10.76.234.115 port 53524 connected with 10.237.229.201 port 5001\n # [ 4] local 10.76.234.115 port 53525 connected with 10.237.229.201 port 5001\n # [ 5] local 10.76.234.115 port 53526 connected with 10.237.229.201 port 5001\n # [ ID] Interval Transfer Bandwidth\n # [ 4] 0.0-60.0 sec 3730 MBytes 521.1 Mbits/sec\n # [ 5] 0.0-60.0 sec 3499 MBytes 489 Mbits/sec\n # [ 6] 0.0-60.0 sec 3044 MBytes 425 Mbits/sec\n # [ 3] 0.0-60.0 sec 3738 MBytes 522 Mbits/sec\n # [SUM] 0.0-60.0 sec 14010 MBytes 1957 Mbits/sec\n\n thread_values = re.findall(r'\\[SUM].*\\s+(\\d+\\.?\\d*).Mbits/sec', stdout)\n if not thread_values:\n # If there is no sum you have try and figure out an estimate\n # which happens when threads start at different times. The code\n # below will tend to overestimate a bit.\n thread_values = re.findall('\\[.*\\d+\\].*\\s+(\\d+\\.?\\d*).Mbits/sec', stdout)\n\n if len(thread_values) != FLAGS.iperf_sending_thread_count:\n raise ValueError('Only %s out of %s iperf threads reported a'\n ' throughput value.' %\n (len(thread_values), FLAGS.iperf_sending_thread_count))\n\n total_throughput = 0.0\n for value in thread_values:\n total_throughput += float(value)\n\n metadata = {\n # The meta data defining the environment\n 'receiving_machine_type': receiving_vm.machine_type,\n 'receiving_zone': receiving_vm.zone,\n 'sending_machine_type': sending_vm.machine_type,\n 'sending_thread_count': FLAGS.iperf_sending_thread_count,\n 'sending_zone': sending_vm.zone,\n 'runtime_in_seconds': FLAGS.iperf_runtime_in_seconds,\n 'ip_type': ip_type\n }\n return sample.Sample('Throughput', total_throughput, 'Mbits/sec', metadata)\n\n\ndef Run(benchmark_spec):\n \"\"\"Run iperf on the target vm.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of sample.Sample objects.\n \"\"\"\n vms = benchmark_spec.vms\n results = []\n\n logging.info('Iperf Results:')\n\n # Send traffic in both directions\n for sending_vm, receiving_vm in vms, reversed(vms):\n # Send using external IP addresses\n if vm_util.ShouldRunOnExternalIpAddress():\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.ip_address,\n 'external'))\n\n # Send using internal IP addresses\n if vm_util.ShouldRunOnInternalIpAddress(sending_vm,\n receiving_vm):\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.internal_ip,\n 'internal'))\n\n return results\n\n\ndef Cleanup(benchmark_spec):\n \"\"\"Cleanup iperf on the target vm (by uninstalling).\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n \"\"\"\n vms = benchmark_spec.vms\n for vm in vms:\n vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid)\n", "path": "perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py"}], "after_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs plain Iperf.\n\nDocs:\nhttp://iperf.fr/\n\nRuns Iperf to collect network throughput.\n\"\"\"\n\nimport logging\nimport re\n\nfrom perfkitbenchmarker import configs\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import sample\nfrom perfkitbenchmarker import vm_util\n\nflags.DEFINE_integer('iperf_sending_thread_count', 1,\n 'Number of connections to make to the '\n 'server for sending traffic.',\n lower_bound=1)\nflags.DEFINE_integer('iperf_runtime_in_seconds', 60,\n 'Number of seconds to run iperf.',\n lower_bound=1)\n\nFLAGS = flags.FLAGS\n\nBENCHMARK_NAME = 'iperf'\nBENCHMARK_CONFIG = \"\"\"\niperf:\n description: Run iperf\n vm_groups:\n vm_1:\n vm_spec: *default_single_core\n vm_2:\n vm_spec: *default_single_core\n\"\"\"\n\nIPERF_PORT = 20000\nIPERF_RETRIES = 5\n\n\ndef GetConfig(user_config):\n return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)\n\n\ndef Prepare(benchmark_spec):\n \"\"\"Install iperf and start the server on all machines.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n \"\"\"\n vms = benchmark_spec.vms\n if len(vms) != 2:\n raise ValueError(\n 'iperf benchmark requires exactly two machines, found {0}'.format(len(\n vms)))\n\n for vm in vms:\n vm.Install('iperf')\n if vm_util.ShouldRunOnExternalIpAddress():\n vm.AllowPort(IPERF_PORT)\n stdout, _ = vm.RemoteCommand(('nohup iperf --server --port %s &> /dev/null'\n '& echo $!') % IPERF_PORT)\n # TODO store this in a better place once we have a better place\n vm.iperf_server_pid = stdout.strip()\n\n\n@vm_util.Retry(max_retries=IPERF_RETRIES)\ndef _RunIperf(sending_vm, receiving_vm, receiving_ip_address, ip_type):\n \"\"\"Run iperf using sending 'vm' to connect to 'ip_address'.\n\n Args:\n sending_vm: The VM sending traffic.\n receiving_vm: The VM receiving traffic.\n receiving_ip_address: The IP address of the iperf server (ie the receiver).\n ip_type: The IP type of 'ip_address' (e.g. 'internal', 'external')\n Returns:\n A Sample.\n \"\"\"\n iperf_cmd = ('iperf --client %s --port %s --format m --time %s -P %s' %\n (receiving_ip_address, IPERF_PORT,\n FLAGS.iperf_runtime_in_seconds,\n FLAGS.iperf_sending_thread_count))\n # the additional time on top of the iperf runtime is to account for the\n # time it takes for the iperf process to start and exit\n timeout_buffer = 30 + FLAGS.iperf_sending_thread_count\n stdout, _ = sending_vm.RemoteCommand(iperf_cmd, should_log=True,\n timeout=FLAGS.iperf_runtime_in_seconds +\n timeout_buffer)\n\n # Example output from iperf that needs to be parsed\n # STDOUT: ------------------------------------------------------------\n # Client connecting to 10.237.229.201, TCP port 5001\n # TCP window size: 0.04 MByte (default)\n # ------------------------------------------------------------\n # [ 6] local 10.76.234.115 port 53527 connected with 10.237.229.201 port 5001\n # [ 3] local 10.76.234.115 port 53524 connected with 10.237.229.201 port 5001\n # [ 4] local 10.76.234.115 port 53525 connected with 10.237.229.201 port 5001\n # [ 5] local 10.76.234.115 port 53526 connected with 10.237.229.201 port 5001\n # [ ID] Interval Transfer Bandwidth\n # [ 4] 0.0-60.0 sec 3730 MBytes 521.1 Mbits/sec\n # [ 5] 0.0-60.0 sec 3499 MBytes 489 Mbits/sec\n # [ 6] 0.0-60.0 sec 3044 MBytes 425 Mbits/sec\n # [ 3] 0.0-60.0 sec 3738 MBytes 522 Mbits/sec\n # [SUM] 0.0-60.0 sec 14010 MBytes 1957 Mbits/sec\n\n thread_values = re.findall(r'\\[SUM].*\\s+(\\d+\\.?\\d*).Mbits/sec', stdout)\n if not thread_values:\n # If there is no sum you have try and figure out an estimate\n # which happens when threads start at different times. The code\n # below will tend to overestimate a bit.\n thread_values = re.findall('\\[.*\\d+\\].*\\s+(\\d+\\.?\\d*).Mbits/sec', stdout)\n\n if len(thread_values) != FLAGS.iperf_sending_thread_count:\n raise ValueError('Only %s out of %s iperf threads reported a'\n ' throughput value.' %\n (len(thread_values), FLAGS.iperf_sending_thread_count))\n\n total_throughput = 0.0\n for value in thread_values:\n total_throughput += float(value)\n\n metadata = {\n # The meta data defining the environment\n 'receiving_machine_type': receiving_vm.machine_type,\n 'receiving_zone': receiving_vm.zone,\n 'sending_machine_type': sending_vm.machine_type,\n 'sending_thread_count': FLAGS.iperf_sending_thread_count,\n 'sending_zone': sending_vm.zone,\n 'runtime_in_seconds': FLAGS.iperf_runtime_in_seconds,\n 'ip_type': ip_type\n }\n return sample.Sample('Throughput', total_throughput, 'Mbits/sec', metadata)\n\n\ndef Run(benchmark_spec):\n \"\"\"Run iperf on the target vm.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of sample.Sample objects.\n \"\"\"\n vms = benchmark_spec.vms\n results = []\n\n logging.info('Iperf Results:')\n\n # Send traffic in both directions\n for sending_vm, receiving_vm in vms, reversed(vms):\n # Send using external IP addresses\n if vm_util.ShouldRunOnExternalIpAddress():\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.ip_address,\n 'external'))\n\n # Send using internal IP addresses\n if vm_util.ShouldRunOnInternalIpAddress(sending_vm,\n receiving_vm):\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.internal_ip,\n 'internal'))\n\n return results\n\n\ndef Cleanup(benchmark_spec):\n \"\"\"Cleanup iperf on the target vm (by uninstalling).\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n \"\"\"\n vms = benchmark_spec.vms\n for vm in vms:\n vm.RemoteCommand('kill -9 ' + vm.iperf_server_pid, ignore_failure=True)\n", "path": "perfkitbenchmarker/linux_benchmarks/iperf_benchmark.py"}]}
2,726
302
gh_patches_debug_19083
rasdani/github-patches
git_diff
kubeflow__pipelines-1595
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crate VolumeOP from k8s_resource error **What happened:** when I run ``` dsl.VolumeOP(k8s_resource=my_vpc) ``` ``` Raises: ValueError: if k8s_resource is provided along with other arguments ``` I think the reason is : ``` if "k8s_resource" in kwargs: if resource_name or size or storage_class or modes or annotations: raise ValueError("You cannot provide k8s_resource along with " "other arguments.") ``` ``` def __init__(self, resource_name: str = None, size: str = None, storage_class: str = None, modes: List[str] = VOLUME_MODE_RWM, annotations: Dict[str, str] = None, data_source=None, **kwargs): ``` but the mode has a default value sdk/python/kfp/dsl/_volume_op.py **What did you expect to happen:** I think, I should only put k8s_resource and name in it. **What steps did you take:** [A clear and concise description of what the bug is.] **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/kfp/dsl/_volume_op.py` Content: ``` 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 import re 17 from typing import List, Dict 18 from kubernetes.client.models import ( 19 V1ObjectMeta, V1ResourceRequirements, V1PersistentVolumeClaimSpec, 20 V1PersistentVolumeClaim, V1TypedLocalObjectReference 21 ) 22 23 from ._resource_op import ResourceOp 24 from ._pipeline_param import ( 25 PipelineParam, match_serialized_pipelineparam, sanitize_k8s_name 26 ) 27 from ._pipeline_volume import PipelineVolume 28 29 30 VOLUME_MODE_RWO = ["ReadWriteOnce"] 31 VOLUME_MODE_RWM = ["ReadWriteMany"] 32 VOLUME_MODE_ROM = ["ReadOnlyMany"] 33 34 35 class VolumeOp(ResourceOp): 36 """Represents an op which will be translated into a resource template 37 which will be creating a PVC. 38 """ 39 40 def __init__(self, 41 resource_name: str = None, 42 size: str = None, 43 storage_class: str = None, 44 modes: List[str] = VOLUME_MODE_RWM, 45 annotations: Dict[str, str] = None, 46 data_source=None, 47 **kwargs): 48 """Create a new instance of VolumeOp. 49 50 Args: 51 resource_name: A desired name for the PVC which will be created 52 size: The size of the PVC which will be created 53 storage_class: The storage class to use for the dynamically created 54 PVC 55 modes: The access modes for the PVC 56 annotations: Annotations to be patched in the PVC 57 data_source: May be a V1TypedLocalObjectReference, and then it is 58 used in the data_source field of the PVC as is. Can also be a 59 string/PipelineParam, and in that case it will be used as a 60 VolumeSnapshot name (Alpha feature) 61 kwargs: See ResourceOp definition 62 Raises: 63 ValueError: if k8s_resource is provided along with other arguments 64 if k8s_resource is not a V1PersistentVolumeClaim 65 if size is None 66 if size is an invalid memory string (when not a 67 PipelineParam) 68 if data_source is not one of (str, PipelineParam, 69 V1TypedLocalObjectReference) 70 """ 71 # Add size to attribute outputs 72 self.attribute_outputs = {"size": "{.status.capacity.storage}"} 73 74 if "k8s_resource" in kwargs: 75 if resource_name or size or storage_class or modes or annotations: 76 raise ValueError("You cannot provide k8s_resource along with " 77 "other arguments.") 78 if not isinstance(kwargs["k8s_resource"], V1PersistentVolumeClaim): 79 raise ValueError("k8s_resource in VolumeOp must be an instance" 80 " of V1PersistentVolumeClaim") 81 super().__init__(**kwargs) 82 self.volume = PipelineVolume( 83 name=sanitize_k8s_name(self.name), 84 pvc=self.outputs["name"] 85 ) 86 return 87 88 if not size: 89 raise ValueError("Please provide size") 90 elif not match_serialized_pipelineparam(str(size)): 91 self._validate_memory_string(size) 92 93 if data_source and not isinstance( 94 data_source, (str, PipelineParam, V1TypedLocalObjectReference)): 95 raise ValueError("data_source can be one of (str, PipelineParam, " 96 "V1TypedLocalObjectReference).") 97 if data_source and isinstance(data_source, (str, PipelineParam)): 98 data_source = V1TypedLocalObjectReference( 99 api_group="snapshot.storage.k8s.io", 100 kind="VolumeSnapshot", 101 name=data_source 102 ) 103 104 # Set the k8s_resource 105 if not match_serialized_pipelineparam(str(resource_name)): 106 resource_name = sanitize_k8s_name(resource_name) 107 pvc_metadata = V1ObjectMeta( 108 name="{{workflow.name}}-%s" % resource_name, 109 annotations=annotations 110 ) 111 requested_resources = V1ResourceRequirements( 112 requests={"storage": size} 113 ) 114 pvc_spec = V1PersistentVolumeClaimSpec( 115 access_modes=modes, 116 resources=requested_resources, 117 storage_class_name=storage_class, 118 data_source=data_source 119 ) 120 k8s_resource = V1PersistentVolumeClaim( 121 api_version="v1", 122 kind="PersistentVolumeClaim", 123 metadata=pvc_metadata, 124 spec=pvc_spec 125 ) 126 127 super().__init__( 128 k8s_resource=k8s_resource, 129 **kwargs, 130 ) 131 self.volume = PipelineVolume( 132 name=sanitize_k8s_name(self.name), 133 pvc=self.outputs["name"] 134 ) 135 136 def _validate_memory_string(self, memory_string): 137 """Validate a given string is valid for memory request or limit.""" 138 if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$', 139 memory_string) is None: 140 raise ValueError('Invalid memory string. Should be an integer, ' + 141 'or integer followed by one of ' + 142 '"E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki"') 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/kfp/dsl/_volume_op.py b/sdk/python/kfp/dsl/_volume_op.py --- a/sdk/python/kfp/dsl/_volume_op.py +++ b/sdk/python/kfp/dsl/_volume_op.py @@ -41,7 +41,7 @@ resource_name: str = None, size: str = None, storage_class: str = None, - modes: List[str] = VOLUME_MODE_RWM, + modes: List[str] = None, annotations: Dict[str, str] = None, data_source=None, **kwargs): @@ -112,7 +112,7 @@ requests={"storage": size} ) pvc_spec = V1PersistentVolumeClaimSpec( - access_modes=modes, + access_modes=modes or VOLUME_MODE_RWM, resources=requested_resources, storage_class_name=storage_class, data_source=data_source
{"golden_diff": "diff --git a/sdk/python/kfp/dsl/_volume_op.py b/sdk/python/kfp/dsl/_volume_op.py\n--- a/sdk/python/kfp/dsl/_volume_op.py\n+++ b/sdk/python/kfp/dsl/_volume_op.py\n@@ -41,7 +41,7 @@\n resource_name: str = None,\n size: str = None,\n storage_class: str = None,\n- modes: List[str] = VOLUME_MODE_RWM,\n+ modes: List[str] = None,\n annotations: Dict[str, str] = None,\n data_source=None,\n **kwargs):\n@@ -112,7 +112,7 @@\n requests={\"storage\": size}\n )\n pvc_spec = V1PersistentVolumeClaimSpec(\n- access_modes=modes,\n+ access_modes=modes or VOLUME_MODE_RWM,\n resources=requested_resources,\n storage_class_name=storage_class,\n data_source=data_source\n", "issue": "Crate VolumeOP from k8s_resource error\n**What happened:**\r\nwhen I run\r\n```\r\n dsl.VolumeOP(k8s_resource=my_vpc)\r\n```\r\n\r\n```\r\n Raises:\r\n ValueError: if k8s_resource is provided along with other arguments\r\n```\r\n\r\n\r\nI think the reason is :\r\n``` \r\n if \"k8s_resource\" in kwargs:\r\n if resource_name or size or storage_class or modes or annotations:\r\n raise ValueError(\"You cannot provide k8s_resource along with \"\r\n \"other arguments.\")\r\n```\r\n```\r\n def __init__(self,\r\n resource_name: str = None,\r\n size: str = None,\r\n storage_class: str = None,\r\n modes: List[str] = VOLUME_MODE_RWM,\r\n annotations: Dict[str, str] = None,\r\n data_source=None,\r\n **kwargs):\r\n```\r\n\r\nbut the mode has a default value\r\nsdk/python/kfp/dsl/_volume_op.py\r\n**What did you expect to happen:**\r\nI think, I should only put k8s_resource and name in it.\r\n**What steps did you take:**\r\n[A clear and concise description of what the bug is.]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport re\nfrom typing import List, Dict\nfrom kubernetes.client.models import (\n V1ObjectMeta, V1ResourceRequirements, V1PersistentVolumeClaimSpec,\n V1PersistentVolumeClaim, V1TypedLocalObjectReference\n)\n\nfrom ._resource_op import ResourceOp\nfrom ._pipeline_param import (\n PipelineParam, match_serialized_pipelineparam, sanitize_k8s_name\n)\nfrom ._pipeline_volume import PipelineVolume\n\n\nVOLUME_MODE_RWO = [\"ReadWriteOnce\"]\nVOLUME_MODE_RWM = [\"ReadWriteMany\"]\nVOLUME_MODE_ROM = [\"ReadOnlyMany\"]\n\n\nclass VolumeOp(ResourceOp):\n \"\"\"Represents an op which will be translated into a resource template\n which will be creating a PVC.\n \"\"\"\n\n def __init__(self,\n resource_name: str = None,\n size: str = None,\n storage_class: str = None,\n modes: List[str] = VOLUME_MODE_RWM,\n annotations: Dict[str, str] = None,\n data_source=None,\n **kwargs):\n \"\"\"Create a new instance of VolumeOp.\n\n Args:\n resource_name: A desired name for the PVC which will be created\n size: The size of the PVC which will be created\n storage_class: The storage class to use for the dynamically created\n PVC\n modes: The access modes for the PVC\n annotations: Annotations to be patched in the PVC\n data_source: May be a V1TypedLocalObjectReference, and then it is\n used in the data_source field of the PVC as is. Can also be a\n string/PipelineParam, and in that case it will be used as a\n VolumeSnapshot name (Alpha feature)\n kwargs: See ResourceOp definition\n Raises:\n ValueError: if k8s_resource is provided along with other arguments\n if k8s_resource is not a V1PersistentVolumeClaim\n if size is None\n if size is an invalid memory string (when not a\n PipelineParam)\n if data_source is not one of (str, PipelineParam,\n V1TypedLocalObjectReference)\n \"\"\"\n # Add size to attribute outputs\n self.attribute_outputs = {\"size\": \"{.status.capacity.storage}\"}\n\n if \"k8s_resource\" in kwargs:\n if resource_name or size or storage_class or modes or annotations:\n raise ValueError(\"You cannot provide k8s_resource along with \"\n \"other arguments.\")\n if not isinstance(kwargs[\"k8s_resource\"], V1PersistentVolumeClaim):\n raise ValueError(\"k8s_resource in VolumeOp must be an instance\"\n \" of V1PersistentVolumeClaim\")\n super().__init__(**kwargs)\n self.volume = PipelineVolume(\n name=sanitize_k8s_name(self.name),\n pvc=self.outputs[\"name\"]\n )\n return\n\n if not size:\n raise ValueError(\"Please provide size\")\n elif not match_serialized_pipelineparam(str(size)):\n self._validate_memory_string(size)\n\n if data_source and not isinstance(\n data_source, (str, PipelineParam, V1TypedLocalObjectReference)):\n raise ValueError(\"data_source can be one of (str, PipelineParam, \"\n \"V1TypedLocalObjectReference).\")\n if data_source and isinstance(data_source, (str, PipelineParam)):\n data_source = V1TypedLocalObjectReference(\n api_group=\"snapshot.storage.k8s.io\",\n kind=\"VolumeSnapshot\",\n name=data_source\n )\n\n # Set the k8s_resource\n if not match_serialized_pipelineparam(str(resource_name)):\n resource_name = sanitize_k8s_name(resource_name)\n pvc_metadata = V1ObjectMeta(\n name=\"{{workflow.name}}-%s\" % resource_name,\n annotations=annotations\n )\n requested_resources = V1ResourceRequirements(\n requests={\"storage\": size}\n )\n pvc_spec = V1PersistentVolumeClaimSpec(\n access_modes=modes,\n resources=requested_resources,\n storage_class_name=storage_class,\n data_source=data_source\n )\n k8s_resource = V1PersistentVolumeClaim(\n api_version=\"v1\",\n kind=\"PersistentVolumeClaim\",\n metadata=pvc_metadata,\n spec=pvc_spec\n )\n\n super().__init__(\n k8s_resource=k8s_resource,\n **kwargs,\n )\n self.volume = PipelineVolume(\n name=sanitize_k8s_name(self.name),\n pvc=self.outputs[\"name\"]\n )\n\n def _validate_memory_string(self, memory_string):\n \"\"\"Validate a given string is valid for memory request or limit.\"\"\"\n if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$',\n memory_string) is None:\n raise ValueError('Invalid memory string. Should be an integer, ' +\n 'or integer followed by one of ' +\n '\"E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki\"')\n", "path": "sdk/python/kfp/dsl/_volume_op.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport re\nfrom typing import List, Dict\nfrom kubernetes.client.models import (\n V1ObjectMeta, V1ResourceRequirements, V1PersistentVolumeClaimSpec,\n V1PersistentVolumeClaim, V1TypedLocalObjectReference\n)\n\nfrom ._resource_op import ResourceOp\nfrom ._pipeline_param import (\n PipelineParam, match_serialized_pipelineparam, sanitize_k8s_name\n)\nfrom ._pipeline_volume import PipelineVolume\n\n\nVOLUME_MODE_RWO = [\"ReadWriteOnce\"]\nVOLUME_MODE_RWM = [\"ReadWriteMany\"]\nVOLUME_MODE_ROM = [\"ReadOnlyMany\"]\n\n\nclass VolumeOp(ResourceOp):\n \"\"\"Represents an op which will be translated into a resource template\n which will be creating a PVC.\n \"\"\"\n\n def __init__(self,\n resource_name: str = None,\n size: str = None,\n storage_class: str = None,\n modes: List[str] = None,\n annotations: Dict[str, str] = None,\n data_source=None,\n **kwargs):\n \"\"\"Create a new instance of VolumeOp.\n\n Args:\n resource_name: A desired name for the PVC which will be created\n size: The size of the PVC which will be created\n storage_class: The storage class to use for the dynamically created\n PVC\n modes: The access modes for the PVC\n annotations: Annotations to be patched in the PVC\n data_source: May be a V1TypedLocalObjectReference, and then it is\n used in the data_source field of the PVC as is. Can also be a\n string/PipelineParam, and in that case it will be used as a\n VolumeSnapshot name (Alpha feature)\n kwargs: See ResourceOp definition\n Raises:\n ValueError: if k8s_resource is provided along with other arguments\n if k8s_resource is not a V1PersistentVolumeClaim\n if size is None\n if size is an invalid memory string (when not a\n PipelineParam)\n if data_source is not one of (str, PipelineParam,\n V1TypedLocalObjectReference)\n \"\"\"\n # Add size to attribute outputs\n self.attribute_outputs = {\"size\": \"{.status.capacity.storage}\"}\n\n if \"k8s_resource\" in kwargs:\n if resource_name or size or storage_class or modes or annotations:\n raise ValueError(\"You cannot provide k8s_resource along with \"\n \"other arguments.\")\n if not isinstance(kwargs[\"k8s_resource\"], V1PersistentVolumeClaim):\n raise ValueError(\"k8s_resource in VolumeOp must be an instance\"\n \" of V1PersistentVolumeClaim\")\n super().__init__(**kwargs)\n self.volume = PipelineVolume(\n name=sanitize_k8s_name(self.name),\n pvc=self.outputs[\"name\"]\n )\n return\n\n if not size:\n raise ValueError(\"Please provide size\")\n elif not match_serialized_pipelineparam(str(size)):\n self._validate_memory_string(size)\n\n if data_source and not isinstance(\n data_source, (str, PipelineParam, V1TypedLocalObjectReference)):\n raise ValueError(\"data_source can be one of (str, PipelineParam, \"\n \"V1TypedLocalObjectReference).\")\n if data_source and isinstance(data_source, (str, PipelineParam)):\n data_source = V1TypedLocalObjectReference(\n api_group=\"snapshot.storage.k8s.io\",\n kind=\"VolumeSnapshot\",\n name=data_source\n )\n\n # Set the k8s_resource\n if not match_serialized_pipelineparam(str(resource_name)):\n resource_name = sanitize_k8s_name(resource_name)\n pvc_metadata = V1ObjectMeta(\n name=\"{{workflow.name}}-%s\" % resource_name,\n annotations=annotations\n )\n requested_resources = V1ResourceRequirements(\n requests={\"storage\": size}\n )\n pvc_spec = V1PersistentVolumeClaimSpec(\n access_modes=modes or VOLUME_MODE_RWM,\n resources=requested_resources,\n storage_class_name=storage_class,\n data_source=data_source\n )\n k8s_resource = V1PersistentVolumeClaim(\n api_version=\"v1\",\n kind=\"PersistentVolumeClaim\",\n metadata=pvc_metadata,\n spec=pvc_spec\n )\n\n super().__init__(\n k8s_resource=k8s_resource,\n **kwargs,\n )\n self.volume = PipelineVolume(\n name=sanitize_k8s_name(self.name),\n pvc=self.outputs[\"name\"]\n )\n\n def _validate_memory_string(self, memory_string):\n \"\"\"Validate a given string is valid for memory request or limit.\"\"\"\n if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$',\n memory_string) is None:\n raise ValueError('Invalid memory string. Should be an integer, ' +\n 'or integer followed by one of ' +\n '\"E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki\"')\n", "path": "sdk/python/kfp/dsl/_volume_op.py"}]}
2,056
206
gh_patches_debug_27022
rasdani/github-patches
git_diff
weecology__retriever-1577
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clean up CLI to use the python interface In the python interface, we use the function [def datasets(keywords=None, licenses=None):](https://github.com/weecology/retriever/blob/bb06180a030d34eafa2e1ea13b74a3719df827e1/retriever/lib/datasets.py#L5). This should be able to return what we are doing in the main (CLI interface) at [line](https://github.com/weecology/retriever/blob/main/retriever/__main__.py#L105). and the results can be printed in the away that looks good in CLI(terminal) _Originally posted by @henrykironde in https://github.com/weecology/retriever/issues/1570#issuecomment-810558636_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `retriever/lib/get_opts.py` Content: ``` 1 import argparse 2 3 import argcomplete 4 from argcomplete.completers import ChoicesCompleter 5 6 from retriever.engines import engine_list 7 from retriever.lib.defaults import VERSION, RETRIEVER_REPOSITORY 8 from retriever.lib.scripts import SCRIPT_LIST, get_dataset_names_upstream 9 10 module_list = SCRIPT_LIST() 11 script_list = [] 12 keywords_list = [] 13 licenses_list = [] 14 15 for module in module_list: 16 script_list.append(module.name) 17 18 if hasattr(module, "keywords"): 19 # Add list of keywords to keywords_list 20 if module.keywords: 21 keywords_list += module.keywords 22 23 if hasattr(module, "licenses"): 24 # Append string to list of licenses_list 25 if module.licenses: 26 for dict_items in module.licenses: 27 if dict_items['name']: 28 licenses_list.append(dict_items['name']) 29 30 script_list.extend(get_dataset_names_upstream(repo=RETRIEVER_REPOSITORY)) 31 script_list.extend(get_dataset_names_upstream()) 32 script_list = sorted(set(script_list)) 33 34 # set of all possible licenses and keywords 35 licenses_options = set(licenses_list) 36 keywords_options = set(keywords_list) 37 38 parser = argparse.ArgumentParser(prog="retriever") 39 parser.add_argument('-v', '--version', action='version', version=VERSION) 40 parser.add_argument('-q', 41 '--quiet', 42 help='suppress command-line output', 43 action='store_true') 44 45 # .............................................................. 46 # subparsers 47 # .............................................................. 48 49 # retriever HELP 50 subparsers = parser.add_subparsers(help='sub-command help', dest='command') 51 52 # retriever download/install/update/new help 53 download_parser = subparsers.add_parser('download', 54 help='download raw data files for a dataset') 55 install_parser = subparsers.add_parser('install', help='download and install dataset') 56 default_parser = subparsers.add_parser('defaults', help='displays default options') 57 update_parser = subparsers.add_parser('update', 58 help='download updated versions of scripts') 59 new_parser = subparsers.add_parser('new', help='create a new sample retriever script') 60 autocreate_parser = subparsers.add_parser( 61 'autocreate', help='CLI to automatically create retriever scripts') 62 ls_parser = subparsers.add_parser('ls', 63 help='display a list all available dataset scripts') 64 citation_parser = subparsers.add_parser('citation', help='view citation') 65 license_parser = subparsers.add_parser('license', help='view dataset license') 66 reset_parser = subparsers.add_parser( 67 'reset', 68 help='reset retriever: removes configuration settings, scripts, and cached data') 69 help_parser = subparsers.add_parser('help', help='') 70 commit_parser = subparsers.add_parser('commit', help='commit a dataset') 71 commit_log_parser = subparsers.add_parser('log', help='see log of a committed dataset') 72 73 # .............................................................. 74 # subparsers with Arguments 75 # .............................................................. 76 77 citation_parser.add_argument('dataset', 78 help='dataset name', 79 nargs='?', 80 default=None, 81 choices=script_list + [None]) 82 commit_parser.add_argument('dataset', help='dataset name', choices=script_list) 83 commit_parser.add_argument('-p', 84 '--path', 85 help='path to store committed file', 86 default=None, 87 required=False) 88 commit_parser.add_argument('-m', 89 '--message', 90 help='commit message', 91 default=None, 92 required=True, 93 type=str) 94 commit_log_parser.add_argument('dataset', help='dataset name', choices=script_list) 95 license_parser.add_argument('dataset', 96 help='dataset name', 97 nargs='?', 98 default=None, 99 choices=script_list + [None]) 100 new_parser.add_argument('filename', help='new script filename') 101 reset_parser.add_argument('scope', help='things to reset: all, scripts or data').completer = \ 102 ChoicesCompleter(script_list + ['all', 'scripts', 'data']) 103 install_parser.add_argument('--compile', 104 help='force re-compile of script before downloading', 105 action='store_true') 106 install_parser.add_argument('--debug', help='run in debug mode', action='store_true') 107 install_parser.add_argument('--not-cached', 108 help='overwrites local cache of raw data', 109 action='store_true') 110 download_parser.add_argument('--debug', help='run in debug mode', action='store_true') 111 download_parser.add_argument('--not-cached', 112 help='overwrites local cache of raw data', 113 action='store_true') 114 download_parser.add_argument('-b', 115 '--bbox', 116 nargs=4, 117 help='Set bounding box xmin, ymin, xmax, ymax', 118 required=False) 119 120 ls_parser.add_argument('-l', help='search datasets with specific license(s)', 121 nargs='+').completer = ChoicesCompleter(list(licenses_options)) 122 ls_parser.add_argument('-k', help='search datasets with keyword(s)', 123 nargs='+').completer = ChoicesCompleter(list(keywords_options)) 124 ls_parser.add_argument('-v', 125 help='verbose list of all datasets', 126 nargs='*', 127 default=False) 128 129 autocreate_parser.add_argument('path', help='path to the data file(s)') 130 autocreate_parser.add_argument('-dt', 131 help='datatype for files', 132 nargs='?', 133 default='tabular', 134 choices=['raster', 'vector', 'tabular']) 135 autocreate_parser.add_argument('-d', 136 help='turn a directory and subdirectories into scripts', 137 action='store_true') 138 autocreate_parser.add_argument('-e', 139 help='encoding of the source file', 140 nargs='?', 141 default='utf-8') 142 autocreate_parser.add_argument('-f', help='turn files into scripts', action='store_true') 143 autocreate_parser.add_argument('-o', 144 help='write scripts out to a designated directory', 145 nargs='?', 146 const='') 147 autocreate_parser.add_argument('--skip-lines', 148 help='skip a set number of lines before processing data', 149 nargs=1, 150 type=int) 151 # retriever Install {Engine} .. 152 # retriever download [options] 153 install_subparsers = install_parser.add_subparsers(help='engine-specific help', 154 dest='engine') 155 156 for engine in engine_list: 157 if engine.name == "Download Only": 158 # download engine follows, retriever download [dataset] 159 download_parser.add_argument( 160 'dataset', help='dataset name').completer = ChoicesCompleter(script_list) 161 else: 162 engine_parser = install_subparsers.add_parser(engine.abbreviation, 163 help=engine.name) 164 engine_parser.add_argument( 165 'dataset', help='dataset name').completer = ChoicesCompleter(script_list) 166 engine_parser.add_argument( 167 '--hash-value', 168 help='install dataset from provenance directory using hash value', 169 default=None, 170 required=False, 171 type=str) 172 engine_parser.add_argument('-b', 173 '--bbox', 174 nargs=4, 175 help='Set bounding box xmin, ymin, xmax, ymax', 176 required=False) 177 if engine.name == "JSON": 178 engine_parser.add_argument('-p', 179 '--pretty', 180 help='Add indentation to json file', 181 action='store_true', 182 required=False) 183 184 abbreviations = set('h') 185 186 for arg in engine.required_opts: 187 arg_name, help_msg, default = arg[:3] 188 potential_abbreviations = [char for char in arg_name if char not in abbreviations] 189 if potential_abbreviations: 190 abbreviation = potential_abbreviations[0] 191 abbreviations.add(abbreviation) 192 else: 193 abbreviation = '-%s' % arg_name 194 195 if engine.name == "Download Only" or abbreviation == "download": 196 # add attributes to Download only engine 197 download_parser.add_argument('--%s' % arg_name, 198 '-%s' % abbreviation, 199 help=help_msg, 200 nargs='?', 201 default=default) 202 else: 203 engine_parser.add_argument('--%s' % arg_name, 204 '-%s' % abbreviation, 205 help=help_msg, 206 nargs='?', 207 default=default) 208 209 argcomplete.autocomplete(parser) 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/retriever/lib/get_opts.py b/retriever/lib/get_opts.py --- a/retriever/lib/get_opts.py +++ b/retriever/lib/get_opts.py @@ -31,9 +31,10 @@ script_list.extend(get_dataset_names_upstream()) script_list = sorted(set(script_list)) -# set of all possible licenses and keywords +# set of all possible licenses, keywords and scripts licenses_options = set(licenses_list) keywords_options = set(keywords_list) +scripts_options = script_list parser = argparse.ArgumentParser(prog="retriever") parser.add_argument('-v', '--version', action='version', version=VERSION) @@ -121,10 +122,8 @@ nargs='+').completer = ChoicesCompleter(list(licenses_options)) ls_parser.add_argument('-k', help='search datasets with keyword(s)', nargs='+').completer = ChoicesCompleter(list(keywords_options)) -ls_parser.add_argument('-v', - help='verbose list of all datasets', - nargs='*', - default=False) +ls_parser.add_argument('-v', help='verbose list of specified dataset(s)', + nargs='+').completer = ChoicesCompleter(list(scripts_options)) autocreate_parser.add_argument('path', help='path to the data file(s)') autocreate_parser.add_argument('-dt',
{"golden_diff": "diff --git a/retriever/lib/get_opts.py b/retriever/lib/get_opts.py\n--- a/retriever/lib/get_opts.py\n+++ b/retriever/lib/get_opts.py\n@@ -31,9 +31,10 @@\n script_list.extend(get_dataset_names_upstream())\n script_list = sorted(set(script_list))\n \n-# set of all possible licenses and keywords\n+# set of all possible licenses, keywords and scripts\n licenses_options = set(licenses_list)\n keywords_options = set(keywords_list)\n+scripts_options = script_list\n \n parser = argparse.ArgumentParser(prog=\"retriever\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n@@ -121,10 +122,8 @@\n nargs='+').completer = ChoicesCompleter(list(licenses_options))\n ls_parser.add_argument('-k', help='search datasets with keyword(s)',\n nargs='+').completer = ChoicesCompleter(list(keywords_options))\n-ls_parser.add_argument('-v',\n- help='verbose list of all datasets',\n- nargs='*',\n- default=False)\n+ls_parser.add_argument('-v', help='verbose list of specified dataset(s)',\n+ nargs='+').completer = ChoicesCompleter(list(scripts_options))\n \n autocreate_parser.add_argument('path', help='path to the data file(s)')\n autocreate_parser.add_argument('-dt',\n", "issue": "Clean up CLI to use the python interface\n\r\nIn the python interface, we use the function [def datasets(keywords=None, licenses=None):](https://github.com/weecology/retriever/blob/bb06180a030d34eafa2e1ea13b74a3719df827e1/retriever/lib/datasets.py#L5).\r\nThis should be able to return what we are doing in the main (CLI interface) at [line](https://github.com/weecology/retriever/blob/main/retriever/__main__.py#L105).\r\n and the results can be printed in the away that looks good in CLI(terminal)\r\n\r\n_Originally posted by @henrykironde in https://github.com/weecology/retriever/issues/1570#issuecomment-810558636_\n", "before_files": [{"content": "import argparse\n\nimport argcomplete\nfrom argcomplete.completers import ChoicesCompleter\n\nfrom retriever.engines import engine_list\nfrom retriever.lib.defaults import VERSION, RETRIEVER_REPOSITORY\nfrom retriever.lib.scripts import SCRIPT_LIST, get_dataset_names_upstream\n\nmodule_list = SCRIPT_LIST()\nscript_list = []\nkeywords_list = []\nlicenses_list = []\n\nfor module in module_list:\n script_list.append(module.name)\n\n if hasattr(module, \"keywords\"):\n # Add list of keywords to keywords_list\n if module.keywords:\n keywords_list += module.keywords\n\n if hasattr(module, \"licenses\"):\n # Append string to list of licenses_list\n if module.licenses:\n for dict_items in module.licenses:\n if dict_items['name']:\n licenses_list.append(dict_items['name'])\n\nscript_list.extend(get_dataset_names_upstream(repo=RETRIEVER_REPOSITORY))\nscript_list.extend(get_dataset_names_upstream())\nscript_list = sorted(set(script_list))\n\n# set of all possible licenses and keywords\nlicenses_options = set(licenses_list)\nkeywords_options = set(keywords_list)\n\nparser = argparse.ArgumentParser(prog=\"retriever\")\nparser.add_argument('-v', '--version', action='version', version=VERSION)\nparser.add_argument('-q',\n '--quiet',\n help='suppress command-line output',\n action='store_true')\n\n# ..............................................................\n# subparsers\n# ..............................................................\n\n# retriever HELP\nsubparsers = parser.add_subparsers(help='sub-command help', dest='command')\n\n# retriever download/install/update/new help\ndownload_parser = subparsers.add_parser('download',\n help='download raw data files for a dataset')\ninstall_parser = subparsers.add_parser('install', help='download and install dataset')\ndefault_parser = subparsers.add_parser('defaults', help='displays default options')\nupdate_parser = subparsers.add_parser('update',\n help='download updated versions of scripts')\nnew_parser = subparsers.add_parser('new', help='create a new sample retriever script')\nautocreate_parser = subparsers.add_parser(\n 'autocreate', help='CLI to automatically create retriever scripts')\nls_parser = subparsers.add_parser('ls',\n help='display a list all available dataset scripts')\ncitation_parser = subparsers.add_parser('citation', help='view citation')\nlicense_parser = subparsers.add_parser('license', help='view dataset license')\nreset_parser = subparsers.add_parser(\n 'reset',\n help='reset retriever: removes configuration settings, scripts, and cached data')\nhelp_parser = subparsers.add_parser('help', help='')\ncommit_parser = subparsers.add_parser('commit', help='commit a dataset')\ncommit_log_parser = subparsers.add_parser('log', help='see log of a committed dataset')\n\n# ..............................................................\n# subparsers with Arguments\n# ..............................................................\n\ncitation_parser.add_argument('dataset',\n help='dataset name',\n nargs='?',\n default=None,\n choices=script_list + [None])\ncommit_parser.add_argument('dataset', help='dataset name', choices=script_list)\ncommit_parser.add_argument('-p',\n '--path',\n help='path to store committed file',\n default=None,\n required=False)\ncommit_parser.add_argument('-m',\n '--message',\n help='commit message',\n default=None,\n required=True,\n type=str)\ncommit_log_parser.add_argument('dataset', help='dataset name', choices=script_list)\nlicense_parser.add_argument('dataset',\n help='dataset name',\n nargs='?',\n default=None,\n choices=script_list + [None])\nnew_parser.add_argument('filename', help='new script filename')\nreset_parser.add_argument('scope', help='things to reset: all, scripts or data').completer = \\\n ChoicesCompleter(script_list + ['all', 'scripts', 'data'])\ninstall_parser.add_argument('--compile',\n help='force re-compile of script before downloading',\n action='store_true')\ninstall_parser.add_argument('--debug', help='run in debug mode', action='store_true')\ninstall_parser.add_argument('--not-cached',\n help='overwrites local cache of raw data',\n action='store_true')\ndownload_parser.add_argument('--debug', help='run in debug mode', action='store_true')\ndownload_parser.add_argument('--not-cached',\n help='overwrites local cache of raw data',\n action='store_true')\ndownload_parser.add_argument('-b',\n '--bbox',\n nargs=4,\n help='Set bounding box xmin, ymin, xmax, ymax',\n required=False)\n\nls_parser.add_argument('-l', help='search datasets with specific license(s)',\n nargs='+').completer = ChoicesCompleter(list(licenses_options))\nls_parser.add_argument('-k', help='search datasets with keyword(s)',\n nargs='+').completer = ChoicesCompleter(list(keywords_options))\nls_parser.add_argument('-v',\n help='verbose list of all datasets',\n nargs='*',\n default=False)\n\nautocreate_parser.add_argument('path', help='path to the data file(s)')\nautocreate_parser.add_argument('-dt',\n help='datatype for files',\n nargs='?',\n default='tabular',\n choices=['raster', 'vector', 'tabular'])\nautocreate_parser.add_argument('-d',\n help='turn a directory and subdirectories into scripts',\n action='store_true')\nautocreate_parser.add_argument('-e',\n help='encoding of the source file',\n nargs='?',\n default='utf-8')\nautocreate_parser.add_argument('-f', help='turn files into scripts', action='store_true')\nautocreate_parser.add_argument('-o',\n help='write scripts out to a designated directory',\n nargs='?',\n const='')\nautocreate_parser.add_argument('--skip-lines',\n help='skip a set number of lines before processing data',\n nargs=1,\n type=int)\n# retriever Install {Engine} ..\n# retriever download [options]\ninstall_subparsers = install_parser.add_subparsers(help='engine-specific help',\n dest='engine')\n\nfor engine in engine_list:\n if engine.name == \"Download Only\":\n # download engine follows, retriever download [dataset]\n download_parser.add_argument(\n 'dataset', help='dataset name').completer = ChoicesCompleter(script_list)\n else:\n engine_parser = install_subparsers.add_parser(engine.abbreviation,\n help=engine.name)\n engine_parser.add_argument(\n 'dataset', help='dataset name').completer = ChoicesCompleter(script_list)\n engine_parser.add_argument(\n '--hash-value',\n help='install dataset from provenance directory using hash value',\n default=None,\n required=False,\n type=str)\n engine_parser.add_argument('-b',\n '--bbox',\n nargs=4,\n help='Set bounding box xmin, ymin, xmax, ymax',\n required=False)\n if engine.name == \"JSON\":\n engine_parser.add_argument('-p',\n '--pretty',\n help='Add indentation to json file',\n action='store_true',\n required=False)\n\n abbreviations = set('h')\n\n for arg in engine.required_opts:\n arg_name, help_msg, default = arg[:3]\n potential_abbreviations = [char for char in arg_name if char not in abbreviations]\n if potential_abbreviations:\n abbreviation = potential_abbreviations[0]\n abbreviations.add(abbreviation)\n else:\n abbreviation = '-%s' % arg_name\n\n if engine.name == \"Download Only\" or abbreviation == \"download\":\n # add attributes to Download only engine\n download_parser.add_argument('--%s' % arg_name,\n '-%s' % abbreviation,\n help=help_msg,\n nargs='?',\n default=default)\n else:\n engine_parser.add_argument('--%s' % arg_name,\n '-%s' % abbreviation,\n help=help_msg,\n nargs='?',\n default=default)\n\nargcomplete.autocomplete(parser)\n", "path": "retriever/lib/get_opts.py"}], "after_files": [{"content": "import argparse\n\nimport argcomplete\nfrom argcomplete.completers import ChoicesCompleter\n\nfrom retriever.engines import engine_list\nfrom retriever.lib.defaults import VERSION, RETRIEVER_REPOSITORY\nfrom retriever.lib.scripts import SCRIPT_LIST, get_dataset_names_upstream\n\nmodule_list = SCRIPT_LIST()\nscript_list = []\nkeywords_list = []\nlicenses_list = []\n\nfor module in module_list:\n script_list.append(module.name)\n\n if hasattr(module, \"keywords\"):\n # Add list of keywords to keywords_list\n if module.keywords:\n keywords_list += module.keywords\n\n if hasattr(module, \"licenses\"):\n # Append string to list of licenses_list\n if module.licenses:\n for dict_items in module.licenses:\n if dict_items['name']:\n licenses_list.append(dict_items['name'])\n\nscript_list.extend(get_dataset_names_upstream(repo=RETRIEVER_REPOSITORY))\nscript_list.extend(get_dataset_names_upstream())\nscript_list = sorted(set(script_list))\n\n# set of all possible licenses, keywords and scripts\nlicenses_options = set(licenses_list)\nkeywords_options = set(keywords_list)\nscripts_options = script_list\n\nparser = argparse.ArgumentParser(prog=\"retriever\")\nparser.add_argument('-v', '--version', action='version', version=VERSION)\nparser.add_argument('-q',\n '--quiet',\n help='suppress command-line output',\n action='store_true')\n\n# ..............................................................\n# subparsers\n# ..............................................................\n\n# retriever HELP\nsubparsers = parser.add_subparsers(help='sub-command help', dest='command')\n\n# retriever download/install/update/new help\ndownload_parser = subparsers.add_parser('download',\n help='download raw data files for a dataset')\ninstall_parser = subparsers.add_parser('install', help='download and install dataset')\ndefault_parser = subparsers.add_parser('defaults', help='displays default options')\nupdate_parser = subparsers.add_parser('update',\n help='download updated versions of scripts')\nnew_parser = subparsers.add_parser('new', help='create a new sample retriever script')\nautocreate_parser = subparsers.add_parser(\n 'autocreate', help='CLI to automatically create retriever scripts')\nls_parser = subparsers.add_parser('ls',\n help='display a list all available dataset scripts')\ncitation_parser = subparsers.add_parser('citation', help='view citation')\nlicense_parser = subparsers.add_parser('license', help='view dataset license')\nreset_parser = subparsers.add_parser(\n 'reset',\n help='reset retriever: removes configuration settings, scripts, and cached data')\nhelp_parser = subparsers.add_parser('help', help='')\ncommit_parser = subparsers.add_parser('commit', help='commit a dataset')\ncommit_log_parser = subparsers.add_parser('log', help='see log of a committed dataset')\n\n# ..............................................................\n# subparsers with Arguments\n# ..............................................................\n\ncitation_parser.add_argument('dataset',\n help='dataset name',\n nargs='?',\n default=None,\n choices=script_list + [None])\ncommit_parser.add_argument('dataset', help='dataset name', choices=script_list)\ncommit_parser.add_argument('-p',\n '--path',\n help='path to store committed file',\n default=None,\n required=False)\ncommit_parser.add_argument('-m',\n '--message',\n help='commit message',\n default=None,\n required=True,\n type=str)\ncommit_log_parser.add_argument('dataset', help='dataset name', choices=script_list)\nlicense_parser.add_argument('dataset',\n help='dataset name',\n nargs='?',\n default=None,\n choices=script_list + [None])\nnew_parser.add_argument('filename', help='new script filename')\nreset_parser.add_argument('scope', help='things to reset: all, scripts or data').completer = \\\n ChoicesCompleter(script_list + ['all', 'scripts', 'data'])\ninstall_parser.add_argument('--compile',\n help='force re-compile of script before downloading',\n action='store_true')\ninstall_parser.add_argument('--debug', help='run in debug mode', action='store_true')\ninstall_parser.add_argument('--not-cached',\n help='overwrites local cache of raw data',\n action='store_true')\ndownload_parser.add_argument('--debug', help='run in debug mode', action='store_true')\ndownload_parser.add_argument('--not-cached',\n help='overwrites local cache of raw data',\n action='store_true')\ndownload_parser.add_argument('-b',\n '--bbox',\n nargs=4,\n help='Set bounding box xmin, ymin, xmax, ymax',\n required=False)\n\nls_parser.add_argument('-l', help='search datasets with specific license(s)',\n nargs='+').completer = ChoicesCompleter(list(licenses_options))\nls_parser.add_argument('-k', help='search datasets with keyword(s)',\n nargs='+').completer = ChoicesCompleter(list(keywords_options))\nls_parser.add_argument('-v', help='verbose list of specified dataset(s)',\n nargs='+').completer = ChoicesCompleter(list(scripts_options))\n\nautocreate_parser.add_argument('path', help='path to the data file(s)')\nautocreate_parser.add_argument('-dt',\n help='datatype for files',\n nargs='?',\n default='tabular',\n choices=['raster', 'vector', 'tabular'])\nautocreate_parser.add_argument('-d',\n help='turn a directory and subdirectories into scripts',\n action='store_true')\nautocreate_parser.add_argument('-e',\n help='encoding of the source file',\n nargs='?',\n default='utf-8')\nautocreate_parser.add_argument('-f', help='turn files into scripts', action='store_true')\nautocreate_parser.add_argument('-o',\n help='write scripts out to a designated directory',\n nargs='?',\n const='')\nautocreate_parser.add_argument('--skip-lines',\n help='skip a set number of lines before processing data',\n nargs=1,\n type=int)\n# retriever Install {Engine} ..\n# retriever download [options]\ninstall_subparsers = install_parser.add_subparsers(help='engine-specific help',\n dest='engine')\n\nfor engine in engine_list:\n if engine.name == \"Download Only\":\n # download engine follows, retriever download [dataset]\n download_parser.add_argument(\n 'dataset', help='dataset name').completer = ChoicesCompleter(script_list)\n else:\n engine_parser = install_subparsers.add_parser(engine.abbreviation,\n help=engine.name)\n engine_parser.add_argument(\n 'dataset', help='dataset name').completer = ChoicesCompleter(script_list)\n engine_parser.add_argument(\n '--hash-value',\n help='install dataset from provenance directory using hash value',\n default=None,\n required=False,\n type=str)\n engine_parser.add_argument('-b',\n '--bbox',\n nargs=4,\n help='Set bounding box xmin, ymin, xmax, ymax',\n required=False)\n if engine.name == \"JSON\":\n engine_parser.add_argument('-p',\n '--pretty',\n help='Add indentation to json file',\n action='store_true',\n required=False)\n\n abbreviations = set('h')\n\n for arg in engine.required_opts:\n arg_name, help_msg, default = arg[:3]\n potential_abbreviations = [char for char in arg_name if char not in abbreviations]\n if potential_abbreviations:\n abbreviation = potential_abbreviations[0]\n abbreviations.add(abbreviation)\n else:\n abbreviation = '-%s' % arg_name\n\n if engine.name == \"Download Only\" or abbreviation == \"download\":\n # add attributes to Download only engine\n download_parser.add_argument('--%s' % arg_name,\n '-%s' % abbreviation,\n help=help_msg,\n nargs='?',\n default=default)\n else:\n engine_parser.add_argument('--%s' % arg_name,\n '-%s' % abbreviation,\n help=help_msg,\n nargs='?',\n default=default)\n\nargcomplete.autocomplete(parser)\n", "path": "retriever/lib/get_opts.py"}]}
2,644
293
gh_patches_debug_5240
rasdani/github-patches
git_diff
pytorch__vision-2159
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NMS returns different results on CPU & CUDA ## 🐛 Bug Hi, I noticed that results of `torchvision.ops.nms` in CPU and CUDA have different value ## To Reproduce Steps to reproduce the behavior: 1. `docker run --runtime=nvidia -it pytorch/pytorch:1.5-cuda10.1-cudnn7-devel bash` 2. run this script : ``` import torch import torchvision import random random.seed(0) torch.manual_seed(0) torch.backends.cudnn.deterministic = True boxes_for_nms = torch.tensor([[0.3764, 0.0905, 0.6533, 0.4487],[0.3744, 0.0899, 0.6535, 0.4513],[0.3753, 0.0916, 0.6532, 0.4512]]) scores = torch.tensor([1., 1., 1.]) iou_threshold = 0.2 cpu_keep = torchvision.ops.nms(boxes_for_nms, scores, iou_threshold) gpu_keep = torchvision.ops.nms(boxes_for_nms.to('cuda'), scores.to('cuda'), iou_threshold) print(torch.__version__, torchvision.__version__) print('cpu keep', cpu_keep) print('gpu keep', gpu_keep) print('cpu==gpu', int(cpu_keep)==int(gpu_keep)) ``` 3. output ``` 1.5.0 0.6.0a0+82fd1c8 cpu keep tensor([0]) gpu keep tensor([2], device='cuda:0') cpu==gpu False ``` ## Expected behavior `cpu==gpu True` ## Environment ``` PyTorch version: 1.5.0 Is debug build: No CUDA used to build PyTorch: 10.1 OS: Ubuntu 18.04.3 LTS GCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0 CMake version: Could not collect Python version: 3.7 Is CUDA available: Yes CUDA runtime version: 10.1.243 GPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design Nvidia driver version: 418.56 cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5 Versions of relevant libraries: [pip] numpy==1.18.1 [pip] torch==1.5.0 [pip] torchvision==0.6.0a0+82fd1c8 [conda] blas 1.0 mkl [conda] cudatoolkit 10.1.243 h6bb024c_0 [conda] mkl 2020.0 166 [conda] mkl-service 2.3.0 py37he904b0f_0 [conda] mkl_fft 1.0.15 py37ha843d7b_0 [conda] mkl_random 1.1.0 py37hd6b4f25_0 [conda] numpy 1.18.1 py37h4f9e942_0 [conda] numpy-base 1.18.1 py37hde5b4d6_1 [conda] pytorch 1.5.0 py3.7_cuda10.1.243_cudnn7.6.3_0 pytorch [conda] torchvision 0.6.0 py37_cu101 pytorch ``` ## Additional context testing on pytorch 1.4.0 and torchvision 0.5.0 also yields different result, --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/ops/boxes.py` Content: ``` 1 import torch 2 from torch.jit.annotations import Tuple 3 from torch import Tensor 4 import torchvision 5 6 7 def nms(boxes, scores, iou_threshold): 8 # type: (Tensor, Tensor, float) 9 """ 10 Performs non-maximum suppression (NMS) on the boxes according 11 to their intersection-over-union (IoU). 12 13 NMS iteratively removes lower scoring boxes which have an 14 IoU greater than iou_threshold with another (higher scoring) 15 box. 16 17 Parameters 18 ---------- 19 boxes : Tensor[N, 4]) 20 boxes to perform NMS on. They 21 are expected to be in (x1, y1, x2, y2) format 22 scores : Tensor[N] 23 scores for each one of the boxes 24 iou_threshold : float 25 discards all overlapping 26 boxes with IoU > iou_threshold 27 28 Returns 29 ------- 30 keep : Tensor 31 int64 tensor with the indices 32 of the elements that have been kept 33 by NMS, sorted in decreasing order of scores 34 """ 35 return torch.ops.torchvision.nms(boxes, scores, iou_threshold) 36 37 38 def batched_nms(boxes, scores, idxs, iou_threshold): 39 # type: (Tensor, Tensor, Tensor, float) 40 """ 41 Performs non-maximum suppression in a batched fashion. 42 43 Each index value correspond to a category, and NMS 44 will not be applied between elements of different categories. 45 46 Parameters 47 ---------- 48 boxes : Tensor[N, 4] 49 boxes where NMS will be performed. They 50 are expected to be in (x1, y1, x2, y2) format 51 scores : Tensor[N] 52 scores for each one of the boxes 53 idxs : Tensor[N] 54 indices of the categories for each one of the boxes. 55 iou_threshold : float 56 discards all overlapping boxes 57 with IoU > iou_threshold 58 59 Returns 60 ------- 61 keep : Tensor 62 int64 tensor with the indices of 63 the elements that have been kept by NMS, sorted 64 in decreasing order of scores 65 """ 66 if boxes.numel() == 0: 67 return torch.empty((0,), dtype=torch.int64, device=boxes.device) 68 # strategy: in order to perform NMS independently per class. 69 # we add an offset to all the boxes. The offset is dependent 70 # only on the class idx, and is large enough so that boxes 71 # from different classes do not overlap 72 max_coordinate = boxes.max() 73 offsets = idxs.to(boxes) * (max_coordinate + 1) 74 boxes_for_nms = boxes + offsets[:, None] 75 keep = nms(boxes_for_nms, scores, iou_threshold) 76 return keep 77 78 79 def remove_small_boxes(boxes, min_size): 80 # type: (Tensor, float) 81 """ 82 Remove boxes which contains at least one side smaller than min_size. 83 84 Arguments: 85 boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format 86 min_size (float): minimum size 87 88 Returns: 89 keep (Tensor[K]): indices of the boxes that have both sides 90 larger than min_size 91 """ 92 ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] 93 keep = (ws >= min_size) & (hs >= min_size) 94 keep = keep.nonzero().squeeze(1) 95 return keep 96 97 98 def clip_boxes_to_image(boxes, size): 99 # type: (Tensor, Tuple[int, int]) 100 """ 101 Clip boxes so that they lie inside an image of size `size`. 102 103 Arguments: 104 boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format 105 size (Tuple[height, width]): size of the image 106 107 Returns: 108 clipped_boxes (Tensor[N, 4]) 109 """ 110 dim = boxes.dim() 111 boxes_x = boxes[..., 0::2] 112 boxes_y = boxes[..., 1::2] 113 height, width = size 114 115 if torchvision._is_tracing(): 116 boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) 117 boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device)) 118 boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) 119 boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device)) 120 else: 121 boxes_x = boxes_x.clamp(min=0, max=width) 122 boxes_y = boxes_y.clamp(min=0, max=height) 123 124 clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) 125 return clipped_boxes.reshape(boxes.shape) 126 127 128 def box_area(boxes): 129 """ 130 Computes the area of a set of bounding boxes, which are specified by its 131 (x1, y1, x2, y2) coordinates. 132 133 Arguments: 134 boxes (Tensor[N, 4]): boxes for which the area will be computed. They 135 are expected to be in (x1, y1, x2, y2) format 136 137 Returns: 138 area (Tensor[N]): area for each box 139 """ 140 return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 141 142 143 # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py 144 # with slight modifications 145 def box_iou(boxes1, boxes2): 146 """ 147 Return intersection-over-union (Jaccard index) of boxes. 148 149 Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 150 151 Arguments: 152 boxes1 (Tensor[N, 4]) 153 boxes2 (Tensor[M, 4]) 154 155 Returns: 156 iou (Tensor[N, M]): the NxM matrix containing the pairwise 157 IoU values for every element in boxes1 and boxes2 158 """ 159 area1 = box_area(boxes1) 160 area2 = box_area(boxes2) 161 162 lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] 163 rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] 164 165 wh = (rb - lt).clamp(min=0) # [N,M,2] 166 inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] 167 168 iou = inter / (area1[:, None] + area2 - inter) 169 return iou 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -14,6 +14,11 @@ IoU greater than iou_threshold with another (higher scoring) box. + If multiple boxes have the exact same score and satisfy the IoU + criterion with respect to a reference box, the selected box is + not guaranteed to be the same between CPU and GPU. This is similar + to the behavior of argsort in PyTorch when repeated values are present. + Parameters ---------- boxes : Tensor[N, 4])
{"golden_diff": "diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py\n--- a/torchvision/ops/boxes.py\n+++ b/torchvision/ops/boxes.py\n@@ -14,6 +14,11 @@\n IoU greater than iou_threshold with another (higher scoring)\n box.\n \n+ If multiple boxes have the exact same score and satisfy the IoU \n+ criterion with respect to a reference box, the selected box is \n+ not guaranteed to be the same between CPU and GPU. This is similar \n+ to the behavior of argsort in PyTorch when repeated values are present.\n+\n Parameters\n ----------\n boxes : Tensor[N, 4])\n", "issue": "NMS returns different results on CPU & CUDA\n## \ud83d\udc1b Bug\r\n\r\nHi, I noticed that results of `torchvision.ops.nms` in CPU and CUDA have different value\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. `docker run --runtime=nvidia -it pytorch/pytorch:1.5-cuda10.1-cudnn7-devel bash`\r\n2. run this script :\r\n ```\r\nimport torch\r\nimport torchvision\r\nimport random\r\n\r\nrandom.seed(0)\r\ntorch.manual_seed(0)\r\ntorch.backends.cudnn.deterministic = True\r\n\r\nboxes_for_nms = torch.tensor([[0.3764, 0.0905, 0.6533, 0.4487],[0.3744, 0.0899, 0.6535, 0.4513],[0.3753, 0.0916, 0.6532, 0.4512]])\r\nscores = torch.tensor([1., 1., 1.])\r\niou_threshold = 0.2\r\n\r\ncpu_keep = torchvision.ops.nms(boxes_for_nms, scores, iou_threshold)\r\ngpu_keep = torchvision.ops.nms(boxes_for_nms.to('cuda'), scores.to('cuda'), iou_threshold)\r\n\r\nprint(torch.__version__, torchvision.__version__)\r\nprint('cpu keep', cpu_keep)\r\nprint('gpu keep', gpu_keep)\r\nprint('cpu==gpu', int(cpu_keep)==int(gpu_keep))\r\n ```\r\n3. output\r\n```\r\n1.5.0 0.6.0a0+82fd1c8\r\ncpu keep tensor([0])\r\ngpu keep tensor([2], device='cuda:0')\r\ncpu==gpu False\r\n```\r\n\r\n## Expected behavior\r\n\r\n`cpu==gpu True`\r\n\r\n## Environment\r\n\r\n```\r\nPyTorch version: 1.5.0\r\nIs debug build: No\r\nCUDA used to build PyTorch: 10.1\r\n\r\nOS: Ubuntu 18.04.3 LTS\r\nGCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0\r\nCMake version: Could not collect\r\n\r\nPython version: 3.7\r\nIs CUDA available: Yes\r\nCUDA runtime version: 10.1.243\r\nGPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design\r\nNvidia driver version: 418.56\r\ncuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5\r\n\r\nVersions of relevant libraries:\r\n[pip] numpy==1.18.1\r\n[pip] torch==1.5.0\r\n[pip] torchvision==0.6.0a0+82fd1c8\r\n[conda] blas 1.0 mkl \r\n[conda] cudatoolkit 10.1.243 h6bb024c_0 \r\n[conda] mkl 2020.0 166 \r\n[conda] mkl-service 2.3.0 py37he904b0f_0 \r\n[conda] mkl_fft 1.0.15 py37ha843d7b_0 \r\n[conda] mkl_random 1.1.0 py37hd6b4f25_0 \r\n[conda] numpy 1.18.1 py37h4f9e942_0 \r\n[conda] numpy-base 1.18.1 py37hde5b4d6_1 \r\n[conda] pytorch 1.5.0 py3.7_cuda10.1.243_cudnn7.6.3_0 pytorch\r\n[conda] torchvision 0.6.0 py37_cu101 pytorch\r\n```\r\n\r\n## Additional context\r\n\r\ntesting on pytorch 1.4.0 and torchvision 0.5.0 also yields different result, \r\n\n", "before_files": [{"content": "import torch\nfrom torch.jit.annotations import Tuple\nfrom torch import Tensor\nimport torchvision\n\n\ndef nms(boxes, scores, iou_threshold):\n # type: (Tensor, Tensor, float)\n \"\"\"\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n Parameters\n ----------\n boxes : Tensor[N, 4])\n boxes to perform NMS on. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n iou_threshold : float\n discards all overlapping\n boxes with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices\n of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n return torch.ops.torchvision.nms(boxes, scores, iou_threshold)\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n # type: (Tensor, Tensor, Tensor, float)\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Parameters\n ----------\n boxes : Tensor[N, 4]\n boxes where NMS will be performed. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n idxs : Tensor[N]\n indices of the categories for each one of the boxes.\n iou_threshold : float\n discards all overlapping boxes\n with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices of\n the elements that have been kept by NMS, sorted\n in decreasing order of scores\n \"\"\"\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n keep = nms(boxes_for_nms, scores, iou_threshold)\n return keep\n\n\ndef remove_small_boxes(boxes, min_size):\n # type: (Tensor, float)\n \"\"\"\n Remove boxes which contains at least one side smaller than min_size.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n min_size (float): minimum size\n\n Returns:\n keep (Tensor[K]): indices of the boxes that have both sides\n larger than min_size\n \"\"\"\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\n keep = (ws >= min_size) & (hs >= min_size)\n keep = keep.nonzero().squeeze(1)\n return keep\n\n\ndef clip_boxes_to_image(boxes, size):\n # type: (Tensor, Tuple[int, int])\n \"\"\"\n Clip boxes so that they lie inside an image of size `size`.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n size (Tuple[height, width]): size of the image\n\n Returns:\n clipped_boxes (Tensor[N, 4])\n \"\"\"\n dim = boxes.dim()\n boxes_x = boxes[..., 0::2]\n boxes_y = boxes[..., 1::2]\n height, width = size\n\n if torchvision._is_tracing():\n boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))\n else:\n boxes_x = boxes_x.clamp(min=0, max=width)\n boxes_y = boxes_y.clamp(min=0, max=height)\n\n clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)\n return clipped_boxes.reshape(boxes.shape)\n\n\ndef box_area(boxes):\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\n# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py\n# with slight modifications\ndef box_iou(boxes1, boxes2):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n Arguments:\n boxes1 (Tensor[N, 4])\n boxes2 (Tensor[M, 4])\n\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n", "path": "torchvision/ops/boxes.py"}], "after_files": [{"content": "import torch\nfrom torch.jit.annotations import Tuple\nfrom torch import Tensor\nimport torchvision\n\n\ndef nms(boxes, scores, iou_threshold):\n # type: (Tensor, Tensor, float)\n \"\"\"\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n If multiple boxes have the exact same score and satisfy the IoU \n criterion with respect to a reference box, the selected box is \n not guaranteed to be the same between CPU and GPU. This is similar \n to the behavior of argsort in PyTorch when repeated values are present.\n\n Parameters\n ----------\n boxes : Tensor[N, 4])\n boxes to perform NMS on. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n iou_threshold : float\n discards all overlapping\n boxes with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices\n of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n return torch.ops.torchvision.nms(boxes, scores, iou_threshold)\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n # type: (Tensor, Tensor, Tensor, float)\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Parameters\n ----------\n boxes : Tensor[N, 4]\n boxes where NMS will be performed. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n idxs : Tensor[N]\n indices of the categories for each one of the boxes.\n iou_threshold : float\n discards all overlapping boxes\n with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices of\n the elements that have been kept by NMS, sorted\n in decreasing order of scores\n \"\"\"\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n keep = nms(boxes_for_nms, scores, iou_threshold)\n return keep\n\n\ndef remove_small_boxes(boxes, min_size):\n # type: (Tensor, float)\n \"\"\"\n Remove boxes which contains at least one side smaller than min_size.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n min_size (float): minimum size\n\n Returns:\n keep (Tensor[K]): indices of the boxes that have both sides\n larger than min_size\n \"\"\"\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\n keep = (ws >= min_size) & (hs >= min_size)\n keep = keep.nonzero().squeeze(1)\n return keep\n\n\ndef clip_boxes_to_image(boxes, size):\n # type: (Tensor, Tuple[int, int])\n \"\"\"\n Clip boxes so that they lie inside an image of size `size`.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n size (Tuple[height, width]): size of the image\n\n Returns:\n clipped_boxes (Tensor[N, 4])\n \"\"\"\n dim = boxes.dim()\n boxes_x = boxes[..., 0::2]\n boxes_y = boxes[..., 1::2]\n height, width = size\n\n if torchvision._is_tracing():\n boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))\n else:\n boxes_x = boxes_x.clamp(min=0, max=width)\n boxes_y = boxes_y.clamp(min=0, max=height)\n\n clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)\n return clipped_boxes.reshape(boxes.shape)\n\n\ndef box_area(boxes):\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\n# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py\n# with slight modifications\ndef box_iou(boxes1, boxes2):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n Arguments:\n boxes1 (Tensor[N, 4])\n boxes2 (Tensor[M, 4])\n\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n", "path": "torchvision/ops/boxes.py"}]}
3,073
156
gh_patches_debug_3637
rasdani/github-patches
git_diff
liqd__a4-meinberlin-3246
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- #3491 [mB] add video embed to interactive event **URL:** https://meinberlin-dev.liqd.net/projekte/design-project/ **device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)* **Comment/Question:** *Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good* <img width="1361" alt="Screenshot 2020-11-10 at 16 03 41" src="https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png"> <img width="1389" alt="Screenshot 2020-11-10 at 16 04 07" src="https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png"> <img width="1330" alt="Screenshot 2020-11-10 at 16 04 24" src="https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/livequestions/dashboard.py` Content: ``` 1 from django.urls import reverse 2 from django.utils.translation import ugettext_lazy as _ 3 4 from adhocracy4.dashboard import DashboardComponent 5 from adhocracy4.dashboard import components 6 7 from . import views 8 9 10 class LiveStreamComponent(DashboardComponent): 11 identifier = 'live_stream' 12 weight = 20 13 label = _('Live Stream') 14 15 def is_effective(self, module): 16 module_app = module.phases[0].content().app 17 return (module_app == 'meinberlin_livequestions' and 18 not module.project.is_draft) 19 20 def get_progress(self, module): 21 return 0, 0 22 23 def get_base_url(self, module): 24 return reverse('a4dashboard:livequestions-livestream', kwargs={ 25 'module_slug': module.slug, 26 }) 27 28 def get_urls(self): 29 return [( 30 r'^modules/(?P<module_slug>[-\w_]+)/livestream/$', 31 views.LiveStreamDashboardView.as_view(component=self), 32 'livequestions-livestream' 33 )] 34 35 36 components.register_module(LiveStreamComponent()) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py --- a/meinberlin/apps/livequestions/dashboard.py +++ b/meinberlin/apps/livequestions/dashboard.py @@ -14,8 +14,7 @@ def is_effective(self, module): module_app = module.phases[0].content().app - return (module_app == 'meinberlin_livequestions' and - not module.project.is_draft) + return (module_app == 'meinberlin_livequestions') def get_progress(self, module): return 0, 0
{"golden_diff": "diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py\n--- a/meinberlin/apps/livequestions/dashboard.py\n+++ b/meinberlin/apps/livequestions/dashboard.py\n@@ -14,8 +14,7 @@\n \n def is_effective(self, module):\n module_app = module.phases[0].content().app\n- return (module_app == 'meinberlin_livequestions' and\n- not module.project.is_draft)\n+ return (module_app == 'meinberlin_livequestions')\n \n def get_progress(self, module):\n return 0, 0\n", "issue": "#3491 [mB] add video embed to interactive event \n**URL:** https://meinberlin-dev.liqd.net/projekte/design-project/\r\n**device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)*\r\n**Comment/Question:** \r\n*Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good* \r\n\r\n<img width=\"1361\" alt=\"Screenshot 2020-11-10 at 16 03 41\" src=\"https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png\">\r\n<img width=\"1389\" alt=\"Screenshot 2020-11-10 at 16 04 07\" src=\"https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png\">\r\n<img width=\"1330\" alt=\"Screenshot 2020-11-10 at 16 04 24\" src=\"https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png\">\r\n\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass LiveStreamComponent(DashboardComponent):\n identifier = 'live_stream'\n weight = 20\n label = _('Live Stream')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return (module_app == 'meinberlin_livequestions' and\n not module.project.is_draft)\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse('a4dashboard:livequestions-livestream', kwargs={\n 'module_slug': module.slug,\n })\n\n def get_urls(self):\n return [(\n r'^modules/(?P<module_slug>[-\\w_]+)/livestream/$',\n views.LiveStreamDashboardView.as_view(component=self),\n 'livequestions-livestream'\n )]\n\n\ncomponents.register_module(LiveStreamComponent())\n", "path": "meinberlin/apps/livequestions/dashboard.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass LiveStreamComponent(DashboardComponent):\n identifier = 'live_stream'\n weight = 20\n label = _('Live Stream')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return (module_app == 'meinberlin_livequestions')\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse('a4dashboard:livequestions-livestream', kwargs={\n 'module_slug': module.slug,\n })\n\n def get_urls(self):\n return [(\n r'^modules/(?P<module_slug>[-\\w_]+)/livestream/$',\n views.LiveStreamDashboardView.as_view(component=self),\n 'livequestions-livestream'\n )]\n\n\ncomponents.register_module(LiveStreamComponent())\n", "path": "meinberlin/apps/livequestions/dashboard.py"}]}
962
141
gh_patches_debug_11143
rasdani/github-patches
git_diff
qtile__qtile-2811
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Set version using importlib.metadata <!-- Please do not ask general questions here! There are [community contact](https://github.com/qtile/qtile#community) options for that. If you are suggesting a new feature/enhancement please instead post it on the discussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas --> # Issue description Currently, if setuptools is not installed on the system running qtile, it will run into issues upon start. An Arch user reported this downstream: https://bugs.archlinux.org/task/71804 Apart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions. <!-- A brief discussion of what failed and how it failed. A description of what you tried is helpful, i.e. "When I use lazy.kill() on a window I get the following stack trace" instead of "Closing windows doesn't work". --> # Qtile version 0.18.1 # Stack traces Copied verbatim from the issue reported downstream: ``` Traceback (most recent call last): File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 9, in <module> import pkg_resources ModuleNotFoundError: No module named 'pkg_resources' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/bin/qtile", line 33, in <module> sys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')()) File "/usr/bin/qtile", line 25, in importlib_load_entry_point return next(matches).load() File "/usr/lib/python3.9/importlib/metadata.py", line 77, in load module = import_module(match.group('module')) File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1030, in _gcd_import File "<frozen importlib._bootstrap>", line 1007, in _find_and_load File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 680, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 850, in exec_module File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 11, in <module> except (pkg_resources.DistributionNotFound, ImportError): NameError: name 'pkg_resources' is not defined ``` # Configuration not important for this issue --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/scripts/main.py` Content: ``` 1 import argparse 2 import logging 3 import sys 4 5 from libqtile.log_utils import init_log 6 from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top 7 8 try: 9 import pkg_resources 10 VERSION = pkg_resources.require("qtile")[0].version 11 except (pkg_resources.DistributionNotFound, ImportError): 12 VERSION = 'dev' 13 14 15 def main(): 16 parent_parser = argparse.ArgumentParser(add_help=False) 17 parent_parser.add_argument( 18 '-l', '--log-level', 19 default='WARNING', 20 dest='log_level', 21 type=str.upper, 22 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), 23 help='Set qtile log level' 24 ) 25 26 main_parser = argparse.ArgumentParser( 27 prog='qtile', 28 description='A full-featured, pure-Python tiling window manager.', 29 ) 30 main_parser.add_argument( 31 '-v', '--version', 32 action='version', 33 version=VERSION, 34 ) 35 36 subparsers = main_parser.add_subparsers() 37 start.add_subcommand(subparsers, [parent_parser]) 38 shell.add_subcommand(subparsers, [parent_parser]) 39 top.add_subcommand(subparsers, [parent_parser]) 40 run_cmd.add_subcommand(subparsers, [parent_parser]) 41 cmd_obj.add_subcommand(subparsers, [parent_parser]) 42 check.add_subcommand(subparsers, [parent_parser]) 43 migrate.add_subcommand(subparsers, [parent_parser]) 44 45 # `qtile help` should print help 46 def print_help(options): 47 main_parser.print_help() 48 help_ = subparsers.add_parser("help", help="Print help information and exit") 49 help_.set_defaults(func=print_help) 50 51 options = main_parser.parse_args() 52 try: 53 log_level = getattr(logging, options.log_level) 54 init_log(log_level=log_level, log_color=sys.stdout.isatty()) 55 options.func(options) 56 except AttributeError: 57 main_parser.print_usage() 58 print("") 59 print("Did you mean:") 60 print(" ".join(sys.argv + ['start'])) 61 sys.exit(1) 62 63 64 if __name__ == "__main__": 65 main() 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py --- a/libqtile/scripts/main.py +++ b/libqtile/scripts/main.py @@ -6,10 +6,16 @@ from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top try: - import pkg_resources - VERSION = pkg_resources.require("qtile")[0].version -except (pkg_resources.DistributionNotFound, ImportError): - VERSION = 'dev' + # Python>3.7 can get the version from importlib + from importlib.metadata import distribution + VERSION = distribution("qtile").version +except ModuleNotFoundError: + try: + # pkg_resources is required for 3.7 + import pkg_resources + VERSION = pkg_resources.require("qtile")[0].version + except (pkg_resources.DistributionNotFound, ModuleNotFoundError): + VERSION = 'dev' def main():
{"golden_diff": "diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py\n--- a/libqtile/scripts/main.py\n+++ b/libqtile/scripts/main.py\n@@ -6,10 +6,16 @@\n from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n \n try:\n- import pkg_resources\n- VERSION = pkg_resources.require(\"qtile\")[0].version\n-except (pkg_resources.DistributionNotFound, ImportError):\n- VERSION = 'dev'\n+ # Python>3.7 can get the version from importlib\n+ from importlib.metadata import distribution\n+ VERSION = distribution(\"qtile\").version\n+except ModuleNotFoundError:\n+ try:\n+ # pkg_resources is required for 3.7\n+ import pkg_resources\n+ VERSION = pkg_resources.require(\"qtile\")[0].version\n+ except (pkg_resources.DistributionNotFound, ModuleNotFoundError):\n+ VERSION = 'dev'\n \n \n def main():\n", "issue": "Set version using importlib.metadata\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n\r\nIf you are suggesting a new feature/enhancement please instead post it on the\r\ndiscussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas\r\n-->\r\n\r\n# Issue description\r\n\r\nCurrently, if setuptools is not installed on the system running qtile, it will run into issues upon start.\r\nAn Arch user reported this downstream: https://bugs.archlinux.org/task/71804\r\n\r\nApart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions.\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\n# Qtile version\r\n\r\n0.18.1\r\n\r\n# Stack traces\r\n\r\nCopied verbatim from the issue reported downstream:\r\n\r\n```\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 9, in <module>\r\nimport pkg_resources\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\nFile \"/usr/bin/qtile\", line 33, in <module>\r\nsys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')())\r\nFile \"/usr/bin/qtile\", line 25, in importlib_load_entry_point\r\nreturn next(matches).load()\r\nFile \"/usr/lib/python3.9/importlib/metadata.py\", line 77, in load\r\nmodule = import_module(match.group('module'))\r\nFile \"/usr/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\nreturn _bootstrap._gcd_import(name[level:], package, level)\r\nFile \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\nFile \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\nFile \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\nFile \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\nFile \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\nFile \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 11, in <module>\r\nexcept (pkg_resources.DistributionNotFound, ImportError):\r\nNameError: name 'pkg_resources' is not defined\r\n```\r\n\r\n# Configuration\r\n\r\nnot important for this issue\n", "before_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef main():\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n main_parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n main_parser.add_argument(\n '-v', '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = main_parser.add_subparsers()\n start.add_subcommand(subparsers, [parent_parser])\n shell.add_subcommand(subparsers, [parent_parser])\n top.add_subcommand(subparsers, [parent_parser])\n run_cmd.add_subcommand(subparsers, [parent_parser])\n cmd_obj.add_subcommand(subparsers, [parent_parser])\n check.add_subcommand(subparsers, [parent_parser])\n migrate.add_subcommand(subparsers, [parent_parser])\n\n # `qtile help` should print help\n def print_help(options):\n main_parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = main_parser.parse_args()\n try:\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n except AttributeError:\n main_parser.print_usage()\n print(\"\")\n print(\"Did you mean:\")\n print(\" \".join(sys.argv + ['start']))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "libqtile/scripts/main.py"}], "after_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n\ntry:\n # Python>3.7 can get the version from importlib\n from importlib.metadata import distribution\n VERSION = distribution(\"qtile\").version\nexcept ModuleNotFoundError:\n try:\n # pkg_resources is required for 3.7\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\n except (pkg_resources.DistributionNotFound, ModuleNotFoundError):\n VERSION = 'dev'\n\n\ndef main():\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n main_parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n main_parser.add_argument(\n '-v', '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = main_parser.add_subparsers()\n start.add_subcommand(subparsers, [parent_parser])\n shell.add_subcommand(subparsers, [parent_parser])\n top.add_subcommand(subparsers, [parent_parser])\n run_cmd.add_subcommand(subparsers, [parent_parser])\n cmd_obj.add_subcommand(subparsers, [parent_parser])\n check.add_subcommand(subparsers, [parent_parser])\n migrate.add_subcommand(subparsers, [parent_parser])\n\n # `qtile help` should print help\n def print_help(options):\n main_parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = main_parser.parse_args()\n try:\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n except AttributeError:\n main_parser.print_usage()\n print(\"\")\n print(\"Did you mean:\")\n print(\" \".join(sys.argv + ['start']))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "libqtile/scripts/main.py"}]}
1,520
215
gh_patches_debug_11452
rasdani/github-patches
git_diff
cupy__cupy-1138
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cupy.random.permutation() overwrites its argument. `cupy.random.permutation()` overwrites its argument. This is incompatible with `numpy.random.permutation`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/random/permutations.py` Content: ``` 1 from cupy.random import generator 2 import six 3 4 5 def shuffle(a): 6 """Shuffles an array. 7 8 Args: 9 a (cupy.ndarray): The array to be shuffled. 10 11 .. seealso:: :func:`numpy.random.shuffle` 12 13 """ 14 rs = generator.get_random_state() 15 return rs.shuffle(a) 16 17 18 def permutation(a): 19 """Returns a permuted range or shuffles an array.""" 20 if isinstance(a, six.integer_types): 21 rs = generator.get_random_state() 22 return rs.permutation(a) 23 else: 24 return shuffle(a) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py --- a/cupy/random/permutations.py +++ b/cupy/random/permutations.py @@ -16,9 +16,20 @@ def permutation(a): - """Returns a permuted range or shuffles an array.""" + """Returns a permuted range or a permutation of an array. + + Args: + a (int or cupy.ndarray): The range or the array to be shuffled. + + Returns: + cupy.ndarray: If `a` is an integer, it is permutation range between 0 + and `a` - 1. + Otherwise, it is a permutation of `a`. + + .. seealso:: :func:`numpy.random.permutation` + """ + rs = generator.get_random_state() if isinstance(a, six.integer_types): - rs = generator.get_random_state() return rs.permutation(a) else: - return shuffle(a) + return a[rs.permutation(len(a))]
{"golden_diff": "diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py\n--- a/cupy/random/permutations.py\n+++ b/cupy/random/permutations.py\n@@ -16,9 +16,20 @@\n \n \n def permutation(a):\n- \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n+ \"\"\"Returns a permuted range or a permutation of an array.\n+\n+ Args:\n+ a (int or cupy.ndarray): The range or the array to be shuffled.\n+\n+ Returns:\n+ cupy.ndarray: If `a` is an integer, it is permutation range between 0\n+ and `a` - 1.\n+ Otherwise, it is a permutation of `a`.\n+\n+ .. seealso:: :func:`numpy.random.permutation`\n+ \"\"\"\n+ rs = generator.get_random_state()\n if isinstance(a, six.integer_types):\n- rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n- return shuffle(a)\n+ return a[rs.permutation(len(a))]\n", "issue": "cupy.random.permutation() overwrites its argument.\n`cupy.random.permutation()` overwrites its argument.\r\nThis is incompatible with `numpy.random.permutation`.\r\n\n", "before_files": [{"content": "from cupy.random import generator\nimport six\n\n\ndef shuffle(a):\n \"\"\"Shuffles an array.\n\n Args:\n a (cupy.ndarray): The array to be shuffled.\n\n .. seealso:: :func:`numpy.random.shuffle`\n\n \"\"\"\n rs = generator.get_random_state()\n return rs.shuffle(a)\n\n\ndef permutation(a):\n \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n if isinstance(a, six.integer_types):\n rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n return shuffle(a)\n", "path": "cupy/random/permutations.py"}], "after_files": [{"content": "from cupy.random import generator\nimport six\n\n\ndef shuffle(a):\n \"\"\"Shuffles an array.\n\n Args:\n a (cupy.ndarray): The array to be shuffled.\n\n .. seealso:: :func:`numpy.random.shuffle`\n\n \"\"\"\n rs = generator.get_random_state()\n return rs.shuffle(a)\n\n\ndef permutation(a):\n \"\"\"Returns a permuted range or a permutation of an array.\n\n Args:\n a (int or cupy.ndarray): The range or the array to be shuffled.\n\n Returns:\n cupy.ndarray: If `a` is an integer, it is permutation range between 0\n and `a` - 1.\n Otherwise, it is a permutation of `a`.\n\n .. seealso:: :func:`numpy.random.permutation`\n \"\"\"\n rs = generator.get_random_state()\n if isinstance(a, six.integer_types):\n return rs.permutation(a)\n else:\n return a[rs.permutation(len(a))]\n", "path": "cupy/random/permutations.py"}]}
457
228
gh_patches_debug_31686
rasdani/github-patches
git_diff
translate__translate-4045
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RemovedInTTK2Warning seems strange There is ``RemovedInTTK2Warning`` which apparently was meant to flag feature which will be removed in translate-toolkit 2. However it is already out and that did not happen :-). Either RemovedInTTK2Warning should be renamed as translate-toolkit 2 has already been released, or the deprecation should be applied. However it seems that quite a lot of the code seems to rely on that behavior. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `translate/misc/multistring.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2006 Zuza Software Foundation 4 # 5 # This file is part of translate. 6 # 7 # translate is free software; you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation; either version 2 of the License, or 10 # (at your option) any later version. 11 # 12 # translate is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with this program; if not, see <http://www.gnu.org/licenses/>. 19 20 """Supports a hybrid Unicode string that can also have a list of alternate 21 strings in the strings attribute 22 """ 23 24 import warnings 25 26 27 from .deprecation import RemovedInTTK2Warning 28 29 30 def _create_text_type(newtype, string, encoding): 31 """Helper to construct a text type out of characters or bytes. Required to 32 temporarily preserve backwards compatibility. Must be removed in TTK2. 33 """ 34 if string is None: 35 string = '' 36 if isinstance(string, str): 37 return str.__new__(newtype, string) 38 39 warnings.warn( 40 'Passing non-ASCII bytes as well as the `encoding` argument to ' 41 '`multistring` is deprecated. Always pass unicode characters instead.', 42 RemovedInTTK2Warning, stacklevel=2, 43 ) 44 return str.__new__(newtype, string, encoding) 45 46 47 class multistring(str): 48 49 def __new__(newtype, string=u"", *args, **kwargs): 50 encoding = kwargs.pop('encoding', 'utf-8') 51 if isinstance(string, list): 52 if not string: 53 raise ValueError("multistring must contain at least one string") 54 newstring = _create_text_type(newtype, string[0], encoding) 55 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]] 56 else: 57 newstring = _create_text_type(newtype, string, encoding) 58 newstring.strings = [newstring] 59 return newstring 60 61 def __init__(self, *args, **kwargs): 62 super().__init__() 63 if not hasattr(self, "strings"): 64 self.strings = [] 65 66 def __cmp__(self, otherstring): 67 def cmp_compat(s1, s2): 68 # Python 3 compatible cmp() equivalent 69 return (s1 > s2) - (s1 < s2) 70 if isinstance(otherstring, multistring): 71 parentcompare = cmp_compat(str(self), otherstring) 72 if parentcompare: 73 return parentcompare 74 else: 75 return cmp_compat(self.strings[1:], otherstring.strings[1:]) 76 elif isinstance(otherstring, str): 77 return cmp_compat(str(self), otherstring) 78 elif isinstance(otherstring, bytes): 79 return cmp_compat(self.encode('utf-8'), otherstring) 80 elif isinstance(otherstring, list) and otherstring: 81 return cmp_compat(self, multistring(otherstring)) 82 else: 83 return cmp_compat(str(type(self)), str(type(otherstring))) 84 85 def __hash__(self): 86 return hash(str(self)) 87 88 def __ne__(self, otherstring): 89 return self.__cmp__(otherstring) != 0 90 91 def __eq__(self, otherstring): 92 return self.__cmp__(otherstring) == 0 93 94 def __repr__(self): 95 return u"multistring(%r)" % ( 96 [str(item) for item in self.strings] 97 ) 98 99 def replace(self, old, new, count=None): 100 if count is None: 101 newstr = multistring(super().replace(old, new)) 102 else: 103 newstr = multistring(super().replace(old, new, count)) 104 for s in self.strings[1:]: 105 if count is None: 106 newstr.strings.append(s.replace(old, new)) 107 else: 108 newstr.strings.append(s.replace(old, new, count)) 109 return newstr 110 ``` Path: `translate/misc/deprecation.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2014 Zuza Software Foundation 4 # 5 # This file is part of translate. 6 # 7 # translate is free software; you can redistribute it and/or modify it under 8 # the terms of the GNU General Public License as published by the Free Software 9 # Foundation; either version 2 of the License, or (at your option) any later 10 # version. 11 # 12 # translate is distributed in the hope that it will be useful, but WITHOUT ANY 13 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 14 # A PARTICULAR PURPOSE. See the GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License along with 17 # this program; if not, see <http://www.gnu.org/licenses/>. 18 19 import warnings 20 from functools import wraps 21 22 23 class RemovedInTTK2Warning(DeprecationWarning): 24 pass 25 26 27 def deprecated(message=""): 28 """Decorator that marks functions and methods as deprecated. 29 30 A warning will be emitted when the function or method is used. If a custom 31 message is provided, it will be shown after the default warning message. 32 """ 33 def inner_render(func): 34 @wraps(func) 35 def new_func(*args, **kwargs): 36 msg = message # Hack to avoid UnboundLocalError. 37 if msg: 38 msg = "\n" + msg 39 func_code = func.__code__ 40 warnings.warn_explicit( 41 "Call to deprecated function {0}.{1}".format(func.__name__, 42 msg), 43 category=DeprecationWarning, 44 filename=func_code.co_filename, 45 lineno=func_code.co_firstlineno + 1 46 ) 47 return func(*args, **kwargs) 48 return new_func 49 return inner_render 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/translate/misc/deprecation.py b/translate/misc/deprecation.py --- a/translate/misc/deprecation.py +++ b/translate/misc/deprecation.py @@ -20,10 +20,6 @@ from functools import wraps -class RemovedInTTK2Warning(DeprecationWarning): - pass - - def deprecated(message=""): """Decorator that marks functions and methods as deprecated. diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py --- a/translate/misc/multistring.py +++ b/translate/misc/multistring.py @@ -21,40 +21,17 @@ strings in the strings attribute """ -import warnings - - -from .deprecation import RemovedInTTK2Warning - - -def _create_text_type(newtype, string, encoding): - """Helper to construct a text type out of characters or bytes. Required to - temporarily preserve backwards compatibility. Must be removed in TTK2. - """ - if string is None: - string = '' - if isinstance(string, str): - return str.__new__(newtype, string) - - warnings.warn( - 'Passing non-ASCII bytes as well as the `encoding` argument to ' - '`multistring` is deprecated. Always pass unicode characters instead.', - RemovedInTTK2Warning, stacklevel=2, - ) - return str.__new__(newtype, string, encoding) - class multistring(str): - def __new__(newtype, string=u"", *args, **kwargs): - encoding = kwargs.pop('encoding', 'utf-8') + def __new__(newtype, string=""): if isinstance(string, list): if not string: raise ValueError("multistring must contain at least one string") - newstring = _create_text_type(newtype, string[0], encoding) + newstring = str.__new__(newtype, string[0]) newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]] else: - newstring = _create_text_type(newtype, string, encoding) + newstring = str.__new__(newtype, string) newstring.strings = [newstring] return newstring
{"golden_diff": "diff --git a/translate/misc/deprecation.py b/translate/misc/deprecation.py\n--- a/translate/misc/deprecation.py\n+++ b/translate/misc/deprecation.py\n@@ -20,10 +20,6 @@\n from functools import wraps\n \n \n-class RemovedInTTK2Warning(DeprecationWarning):\n- pass\n-\n-\n def deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n \ndiff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -21,40 +21,17 @@\n strings in the strings attribute\n \"\"\"\n \n-import warnings\n-\n-\n-from .deprecation import RemovedInTTK2Warning\n-\n-\n-def _create_text_type(newtype, string, encoding):\n- \"\"\"Helper to construct a text type out of characters or bytes. Required to\n- temporarily preserve backwards compatibility. Must be removed in TTK2.\n- \"\"\"\n- if string is None:\n- string = ''\n- if isinstance(string, str):\n- return str.__new__(newtype, string)\n-\n- warnings.warn(\n- 'Passing non-ASCII bytes as well as the `encoding` argument to '\n- '`multistring` is deprecated. Always pass unicode characters instead.',\n- RemovedInTTK2Warning, stacklevel=2,\n- )\n- return str.__new__(newtype, string, encoding)\n-\n \n class multistring(str):\n \n- def __new__(newtype, string=u\"\", *args, **kwargs):\n- encoding = kwargs.pop('encoding', 'utf-8')\n+ def __new__(newtype, string=\"\"):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n- newstring = _create_text_type(newtype, string[0], encoding)\n+ newstring = str.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n- newstring = _create_text_type(newtype, string, encoding)\n+ newstring = str.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n", "issue": "RemovedInTTK2Warning seems strange\nThere is ``RemovedInTTK2Warning`` which apparently was meant to flag feature which will be removed in translate-toolkit 2. However it is already out and that did not happen :-).\r\n\r\nEither RemovedInTTK2Warning should be renamed as translate-toolkit 2 has already been released, or the deprecation should be applied.\r\n\r\nHowever it seems that quite a lot of the code seems to rely on that behavior.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport warnings\n\n\nfrom .deprecation import RemovedInTTK2Warning\n\n\ndef _create_text_type(newtype, string, encoding):\n \"\"\"Helper to construct a text type out of characters or bytes. Required to\n temporarily preserve backwards compatibility. Must be removed in TTK2.\n \"\"\"\n if string is None:\n string = ''\n if isinstance(string, str):\n return str.__new__(newtype, string)\n\n warnings.warn(\n 'Passing non-ASCII bytes as well as the `encoding` argument to '\n '`multistring` is deprecated. Always pass unicode characters instead.',\n RemovedInTTK2Warning, stacklevel=2,\n )\n return str.__new__(newtype, string, encoding)\n\n\nclass multistring(str):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n encoding = kwargs.pop('encoding', 'utf-8')\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = _create_text_type(newtype, string[0], encoding)\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = _create_text_type(newtype, string, encoding)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(str(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, str):\n return cmp_compat(str(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(str(self))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n return u\"multistring(%r)\" % (\n [str(item) for item in self.strings]\n )\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super().replace(old, new))\n else:\n newstr = multistring(super().replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2014 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# translate is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport warnings\nfrom functools import wraps\n\n\nclass RemovedInTTK2Warning(DeprecationWarning):\n pass\n\n\ndef deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n\n A warning will be emitted when the function or method is used. If a custom\n message is provided, it will be shown after the default warning message.\n \"\"\"\n def inner_render(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n msg = message # Hack to avoid UnboundLocalError.\n if msg:\n msg = \"\\n\" + msg\n func_code = func.__code__\n warnings.warn_explicit(\n \"Call to deprecated function {0}.{1}\".format(func.__name__,\n msg),\n category=DeprecationWarning,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1\n )\n return func(*args, **kwargs)\n return new_func\n return inner_render\n", "path": "translate/misc/deprecation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\n\nclass multistring(str):\n\n def __new__(newtype, string=\"\"):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = str.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = str.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(str(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, str):\n return cmp_compat(str(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(str(self))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n return u\"multistring(%r)\" % (\n [str(item) for item in self.strings]\n )\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super().replace(old, new))\n else:\n newstr = multistring(super().replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2014 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# translate is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport warnings\nfrom functools import wraps\n\n\ndef deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n\n A warning will be emitted when the function or method is used. If a custom\n message is provided, it will be shown after the default warning message.\n \"\"\"\n def inner_render(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n msg = message # Hack to avoid UnboundLocalError.\n if msg:\n msg = \"\\n\" + msg\n func_code = func.__code__\n warnings.warn_explicit(\n \"Call to deprecated function {0}.{1}\".format(func.__name__,\n msg),\n category=DeprecationWarning,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1\n )\n return func(*args, **kwargs)\n return new_func\n return inner_render\n", "path": "translate/misc/deprecation.py"}]}
1,956
512
gh_patches_debug_64393
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider longhorn_steakhouse is broken During the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/longhorn_steakhouse.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import json 3 import re 4 5 import scrapy 6 7 from locations.items import GeojsonPointItem 8 from locations.hours import OpeningHours 9 10 11 class LongHornSteakhouseSpider(scrapy.Spider): 12 name = "longhorn_steakhouse" 13 item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': "Q3259007"} 14 allowed_domains = [] 15 start_urls = [ 16 'https://www.longhornsteakhouse.com/locations-sitemap.xml', 17 ] 18 custom_settings = { 19 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36', 20 } 21 download_delay = 5 22 23 def parse_hours(self, hours): 24 opening_hours = OpeningHours() 25 26 for hour in hours: 27 day, open_close = hour.split(' ') 28 open_time, close_time = open_close.split('-') 29 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M') 30 return opening_hours.as_opening_hours() 31 32 def parse(self, response): 33 response.selector.remove_namespaces() 34 urls = response.xpath('//url/loc/text()').extract() 35 for url in urls: 36 yield scrapy.Request(url=url, callback=self.parse_store) 37 38 def parse_store(self, response): 39 store_data = response.xpath('//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first() 40 if store_data: 41 data = json.loads(store_data) 42 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1) 43 44 # Handle store pages that are missing the application/ld+json data 45 addr, city_state_zip, phone = response.xpath('//p[@id="info-link-webhead"]/text()').extract() 46 city, state, postcode = re.search(r'(.*?),\s([A-Z]{2})\s([\d-]+)$', city_state_zip).groups() 47 48 properties = { 49 'name': data.get("name") or response.xpath('//h1[@class="style_h1"]/text()').extract_first().strip(), 50 'ref': data["branchCode"] or ref, 51 'addr_full': data["address"]["streetAddress"].strip() or addr.strip(), 52 'city': data["address"]["addressLocality"] or city, 53 'state': data["address"]["addressRegion"] or state, 54 'postcode': data["address"]["postalCode"] or postcode, 55 'country': data["address"]["addressCountry"], 56 'phone': data.get("telephone") or phone.strip(), 57 'website': data.get("url") or response.url, 58 'lat': float(data["geo"]["latitude"]), 59 'lon': float(data["geo"]["longitude"]), 60 } 61 62 hours = data.get("openingHours") 63 if hours: 64 store_hours = self.parse_hours(hours) 65 properties["opening_hours"] = store_hours 66 67 yield GeojsonPointItem(**properties) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py --- a/locations/spiders/longhorn_steakhouse.py +++ b/locations/spiders/longhorn_steakhouse.py @@ -18,7 +18,7 @@ custom_settings = { 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36', } - download_delay = 5 + download_delay = 1 def parse_hours(self, hours): opening_hours = OpeningHours()
{"golden_diff": "diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py\n--- a/locations/spiders/longhorn_steakhouse.py\n+++ b/locations/spiders/longhorn_steakhouse.py\n@@ -18,7 +18,7 @@\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n- download_delay = 5\n+ download_delay = 1\n \n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n", "issue": "Spider longhorn_steakhouse is broken\nDuring the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 5\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 1\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}]}
1,280
167
gh_patches_debug_60375
rasdani/github-patches
git_diff
UTNkar__moore-794
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Translations for footer_en missing in production I noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing ![image](https://user-images.githubusercontent.com/19433606/228253442-09290b66-c344-417f-856a-19e97083a23a.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/branding/models.py` Content: ``` 1 from django.db import models 2 from wagtail.contrib.settings.models import BaseSetting, register_setting 3 4 from django.utils.translation import gettext_lazy as _ 5 from wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \ 6 MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList 7 from wagtail.core import blocks 8 from wagtail.core.fields import StreamField 9 from wagtail.images.edit_handlers import ImageChooserPanel 10 from utils.translation import TranslatedField 11 12 13 @register_setting(icon='fa-window-minimize') 14 class FooterSettings(BaseSetting): 15 class Meta: 16 verbose_name = _('footer_en') # quickfix 17 18 footer_en = StreamField( 19 [('column', blocks.StructBlock([ 20 ('size', blocks.IntegerBlock(min_value=1, max_value=12)), 21 ('content', blocks.RichTextBlock()), 22 ]))], 23 blank=True, 24 ) 25 26 footer_sv = StreamField( 27 [('column', blocks.StructBlock([ 28 ('size', blocks.IntegerBlock(min_value=1, max_value=12)), 29 ('content', blocks.RichTextBlock()), 30 ]))], 31 blank=True, 32 ) 33 34 footer = TranslatedField('footer_en', 'footer_sv') 35 36 panels_sv = [ 37 StreamFieldPanel('footer_sv') 38 ] 39 40 panels_en = [ 41 StreamFieldPanel('footer_en') 42 ] 43 44 edit_handler = TabbedInterface([ 45 ObjectList(panels_en, heading=_("English")), 46 ObjectList(panels_sv, heading=_("Swedish")) 47 ]) 48 49 50 @register_setting(icon='openquote') 51 class SocialMediaSettings(BaseSetting): 52 class Meta: 53 verbose_name = _('social media accounts') 54 55 facebook = models.URLField( 56 help_text=_('Your Facebook page URL'), 57 blank=True, 58 ) 59 instagram = models.CharField( 60 max_length=255, 61 help_text=_('Your Instagram username, without the @'), 62 blank=True, 63 ) 64 twitter = models.CharField( 65 max_length=255, 66 help_text=_('Your Twitter username, without the @'), 67 blank=True, 68 ) 69 70 71 class Logo(models.Model): 72 class Meta: 73 verbose_name = _('logo') 74 verbose_name_plural = _('logos') 75 76 def __str__(self): 77 logotext = str(_('logo')) 78 return logotext.capitalize() 79 80 CATEGORY_CHOICES = ( 81 ('committee', _('Committee')), 82 ('section', _('Section')), 83 ) 84 85 category = models.CharField( 86 max_length=20, 87 choices=CATEGORY_CHOICES, 88 verbose_name=_('category'), 89 blank=False, 90 null=False, 91 ) 92 93 link = models.URLField( 94 verbose_name=_('links to'), 95 null=False, 96 blank=False, 97 ) 98 99 logo = models.ForeignKey( 100 'wagtailimages.Image', 101 verbose_name=_('logo'), 102 null=True, 103 blank=True, 104 on_delete=models.SET_NULL, 105 related_name='+' 106 ) 107 108 logo_white = models.ForeignKey( 109 'wagtailimages.Image', 110 verbose_name=_('white logo'), 111 null=True, 112 blank=True, 113 on_delete=models.SET_NULL, 114 related_name='+' 115 ) 116 117 logo_black = models.ForeignKey( 118 'wagtailimages.Image', 119 verbose_name=_('black logo'), 120 null=True, 121 blank=True, 122 on_delete=models.SET_NULL, 123 related_name='+' 124 ) 125 126 belongs_to = models.ForeignKey( 127 'wagtailcore.Site', 128 verbose_name=_('belongs to'), 129 null=True, 130 blank=True, 131 on_delete=models.SET_NULL, 132 ) 133 134 # ------ Administrator settings ------ 135 panels = [MultiFieldPanel([ 136 FieldRowPanel([ 137 FieldPanel('category'), 138 FieldPanel('link'), 139 ]), 140 ImageChooserPanel('logo'), 141 ImageChooserPanel('logo_white'), 142 ImageChooserPanel('logo_black'), 143 FieldPanel('belongs_to'), 144 ])] 145 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/branding/models.py b/src/branding/models.py --- a/src/branding/models.py +++ b/src/branding/models.py @@ -13,7 +13,7 @@ @register_setting(icon='fa-window-minimize') class FooterSettings(BaseSetting): class Meta: - verbose_name = _('footer_en') # quickfix + verbose_name = _('footer') # quickfix footer_en = StreamField( [('column', blocks.StructBlock([
{"golden_diff": "diff --git a/src/branding/models.py b/src/branding/models.py\n--- a/src/branding/models.py\n+++ b/src/branding/models.py\n@@ -13,7 +13,7 @@\n @register_setting(icon='fa-window-minimize')\n class FooterSettings(BaseSetting):\n class Meta:\n- verbose_name = _('footer_en') # quickfix\n+ verbose_name = _('footer') # quickfix\n \n footer_en = StreamField(\n [('column', blocks.StructBlock([\n", "issue": "Translations for footer_en missing in production\nI noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing\r\n![image](https://user-images.githubusercontent.com/19433606/228253442-09290b66-c344-417f-856a-19e97083a23a.png)\r\n\n", "before_files": [{"content": "from django.db import models\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\n\nfrom django.utils.translation import gettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \\\n MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList\nfrom wagtail.core import blocks\nfrom wagtail.core.fields import StreamField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom utils.translation import TranslatedField\n\n\n@register_setting(icon='fa-window-minimize')\nclass FooterSettings(BaseSetting):\n class Meta:\n verbose_name = _('footer_en') # quickfix\n\n footer_en = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer_sv = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer = TranslatedField('footer_en', 'footer_sv')\n\n panels_sv = [\n StreamFieldPanel('footer_sv')\n ]\n\n panels_en = [\n StreamFieldPanel('footer_en')\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(panels_en, heading=_(\"English\")),\n ObjectList(panels_sv, heading=_(\"Swedish\"))\n ])\n\n\n@register_setting(icon='openquote')\nclass SocialMediaSettings(BaseSetting):\n class Meta:\n verbose_name = _('social media accounts')\n\n facebook = models.URLField(\n help_text=_('Your Facebook page URL'),\n blank=True,\n )\n instagram = models.CharField(\n max_length=255,\n help_text=_('Your Instagram username, without the @'),\n blank=True,\n )\n twitter = models.CharField(\n max_length=255,\n help_text=_('Your Twitter username, without the @'),\n blank=True,\n )\n\n\nclass Logo(models.Model):\n class Meta:\n verbose_name = _('logo')\n verbose_name_plural = _('logos')\n\n def __str__(self):\n logotext = str(_('logo'))\n return logotext.capitalize()\n\n CATEGORY_CHOICES = (\n ('committee', _('Committee')),\n ('section', _('Section')),\n )\n\n category = models.CharField(\n max_length=20,\n choices=CATEGORY_CHOICES,\n verbose_name=_('category'),\n blank=False,\n null=False,\n )\n\n link = models.URLField(\n verbose_name=_('links to'),\n null=False,\n blank=False,\n )\n\n logo = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_white = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('white logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_black = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('black logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n belongs_to = models.ForeignKey(\n 'wagtailcore.Site',\n verbose_name=_('belongs to'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n # ------ Administrator settings ------\n panels = [MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('category'),\n FieldPanel('link'),\n ]),\n ImageChooserPanel('logo'),\n ImageChooserPanel('logo_white'),\n ImageChooserPanel('logo_black'),\n FieldPanel('belongs_to'),\n ])]\n", "path": "src/branding/models.py"}], "after_files": [{"content": "from django.db import models\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\n\nfrom django.utils.translation import gettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \\\n MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList\nfrom wagtail.core import blocks\nfrom wagtail.core.fields import StreamField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom utils.translation import TranslatedField\n\n\n@register_setting(icon='fa-window-minimize')\nclass FooterSettings(BaseSetting):\n class Meta:\n verbose_name = _('footer') # quickfix\n\n footer_en = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer_sv = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer = TranslatedField('footer_en', 'footer_sv')\n\n panels_sv = [\n StreamFieldPanel('footer_sv')\n ]\n\n panels_en = [\n StreamFieldPanel('footer_en')\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(panels_en, heading=_(\"English\")),\n ObjectList(panels_sv, heading=_(\"Swedish\"))\n ])\n\n\n@register_setting(icon='openquote')\nclass SocialMediaSettings(BaseSetting):\n class Meta:\n verbose_name = _('social media accounts')\n\n facebook = models.URLField(\n help_text=_('Your Facebook page URL'),\n blank=True,\n )\n instagram = models.CharField(\n max_length=255,\n help_text=_('Your Instagram username, without the @'),\n blank=True,\n )\n twitter = models.CharField(\n max_length=255,\n help_text=_('Your Twitter username, without the @'),\n blank=True,\n )\n\n\nclass Logo(models.Model):\n class Meta:\n verbose_name = _('logo')\n verbose_name_plural = _('logos')\n\n def __str__(self):\n logotext = str(_('logo'))\n return logotext.capitalize()\n\n CATEGORY_CHOICES = (\n ('committee', _('Committee')),\n ('section', _('Section')),\n )\n\n category = models.CharField(\n max_length=20,\n choices=CATEGORY_CHOICES,\n verbose_name=_('category'),\n blank=False,\n null=False,\n )\n\n link = models.URLField(\n verbose_name=_('links to'),\n null=False,\n blank=False,\n )\n\n logo = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_white = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('white logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_black = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('black logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n belongs_to = models.ForeignKey(\n 'wagtailcore.Site',\n verbose_name=_('belongs to'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n # ------ Administrator settings ------\n panels = [MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('category'),\n FieldPanel('link'),\n ]),\n ImageChooserPanel('logo'),\n ImageChooserPanel('logo_white'),\n ImageChooserPanel('logo_black'),\n FieldPanel('belongs_to'),\n ])]\n", "path": "src/branding/models.py"}]}
1,497
110
gh_patches_debug_12188
rasdani/github-patches
git_diff
encode__uvicorn-895
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Subprocess returncode is not detected when running Gunicorn with Uvicorn (with fix PR companion) ### Checklist <!-- Please make sure you check all these items before submitting your bug report. --> - [x] The bug is reproducible against the latest release and/or `master`. - [x] There are no similar issues or pull requests to fix it yet. ### Describe the bug <!-- A clear and concise description of what the bug is. --> When starting Gunicorn with Uvicorn worker(s), if the app uses `subprocess` to start other processes and captures the output, their `returncode` is in most cases `0`, even if the actual exit code was `1`. ### To reproduce <!-- Provide a *minimal* example with steps to reproduce the bug locally. NOTE: try to keep any external dependencies *at an absolute minimum* . In other words, remove anything that doesn't make the bug go away. --> Take this minimal FastAPI app (or replace with Starlette), `main.py`: ```Python import subprocess from fastapi import FastAPI app = FastAPI() @app.post("/run") def run_subprocess(): result = subprocess.run( ["python", "-c", "import sys; sys.exit(1)"], capture_output=True ) return {"returncode": result.returncode} ``` Then run it with: ```console $ gunicorn -k uvicorn.workers.UvicornWorker main:app ``` Open the browser at http:127.0.0.1:8000/docs and send a request to `/run`. ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> The detected `returncode` should always be `1`, as the subprocess always exits with `1`. ### Actual behavior <!-- A clear and concise description of what actually happens. --> In most of the cases it will return a `returncode` of `0`. Strangely enough, in some cases, it will return a `returncode` of `1`. ### Debugging material <!-- Any tracebacks, screenshots, etc. that can help understanding the problem. NOTE: - Please list tracebacks in full (don't truncate them). - If relevant, consider turning on DEBUG or TRACE logs for additional details (see the Logging section on https://www.uvicorn.org/settings/ specifically the `log-level` flag). - Consider using `<details>` to make tracebacks/logs collapsible if they're very large (see https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d). --> This is because the `UvicornWorker`, which inherits from the base Gunicorn worker, declares a method `init_signals()` (overriding the parent method) but doesn't do anything. I suspect it's because the signal handlers are declared in the `Server.install_signal_handlers()` with compatibility with `asyncio`. But the `UvicornWorker` process is started with `os.fork()` by Gunicorn (if I understand correctly) and by the point it is forked, the Gunicorn "Arbiter" class (that handles worker processes) already set its own signal handlers. And the signal handlers in the Gunicorn base worker reset those handlers, but the `UvicornWorker` doesn't. So, when a process started with `subprocessing` is terminated, the `SIGCHLD` signal is handled by the Gunicorn `Arbiter` (as if the terminated process was a worker) instead of by the `UvicornWorker`. Disclaimer: why the `SIGCHLD` signal handling in the Gunicorn `Arbiter` alters the `returncode` of a process run with `subprocess`, when capturing output, is still a mystery to me. But I realized the signal handler in the `Arbiter` is expected to handle dead worker processes. And worker subclasses all seem to reset the signal handlers to revert those signals set by the `Arbiter`. I'm also submitting a PR to fix this: https://github.com/encode/uvicorn/pull/895. It's just 3 lines of code. But debugging it and finding it took me almost a week. :sweat_smile: ### Environment - OS / Python / Uvicorn version: just run `uvicorn --version`: `Running uvicorn 0.13.1 with CPython 3.8.5 on Linux` (it's actually installed from source, for debugging) - Gunicorn version (also installed from source, for debugging): `gunicorn (version 20.0.4)` - The exact command you're running uvicorn with, all flags you passed included. If you run it with gunicorn please do the same. If there is a reverse-proxy involved and you cannot reproduce without it please give the minimal config of it to reproduce. ```console $ gunicorn -k uvicorn.workers.UvicornWorker main:app ``` ### Additional context <!-- Any additional information that can help understanding the problem. Eg. linked issues, or a description of what you were trying to achieve. --> I'm pretty sure this issue https://github.com/encode/uvicorn/issues/584 is related to the same problem. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `uvicorn/workers.py` Content: ``` 1 import asyncio 2 import logging 3 4 from gunicorn.workers.base import Worker 5 6 from uvicorn.config import Config 7 from uvicorn.main import Server 8 9 10 class UvicornWorker(Worker): 11 """ 12 A worker class for Gunicorn that interfaces with an ASGI consumer callable, 13 rather than a WSGI callable. 14 """ 15 16 CONFIG_KWARGS = {"loop": "uvloop", "http": "httptools"} 17 18 def __init__(self, *args, **kwargs): 19 super(UvicornWorker, self).__init__(*args, **kwargs) 20 21 logger = logging.getLogger("uvicorn.error") 22 logger.handlers = self.log.error_log.handlers 23 logger.setLevel(self.log.error_log.level) 24 logger.propagate = False 25 26 logger = logging.getLogger("uvicorn.access") 27 logger.handlers = self.log.access_log.handlers 28 logger.setLevel(self.log.access_log.level) 29 logger.propagate = False 30 31 config_kwargs = { 32 "app": None, 33 "log_config": None, 34 "timeout_keep_alive": self.cfg.keepalive, 35 "timeout_notify": self.timeout, 36 "callback_notify": self.callback_notify, 37 "limit_max_requests": self.max_requests, 38 "forwarded_allow_ips": self.cfg.forwarded_allow_ips, 39 } 40 41 if self.cfg.is_ssl: 42 ssl_kwargs = { 43 "ssl_keyfile": self.cfg.ssl_options.get("keyfile"), 44 "ssl_certfile": self.cfg.ssl_options.get("certfile"), 45 "ssl_keyfile_password": self.cfg.ssl_options.get("password"), 46 "ssl_version": self.cfg.ssl_options.get("ssl_version"), 47 "ssl_cert_reqs": self.cfg.ssl_options.get("cert_reqs"), 48 "ssl_ca_certs": self.cfg.ssl_options.get("ca_certs"), 49 "ssl_ciphers": self.cfg.ssl_options.get("ciphers"), 50 } 51 config_kwargs.update(ssl_kwargs) 52 53 if self.cfg.settings["backlog"].value: 54 config_kwargs["backlog"] = self.cfg.settings["backlog"].value 55 56 config_kwargs.update(self.CONFIG_KWARGS) 57 58 self.config = Config(**config_kwargs) 59 60 def init_process(self): 61 self.config.setup_event_loop() 62 super(UvicornWorker, self).init_process() 63 64 def init_signals(self): 65 pass 66 67 def run(self): 68 self.config.app = self.wsgi 69 server = Server(config=self.config) 70 loop = asyncio.get_event_loop() 71 loop.run_until_complete(server.serve(sockets=self.sockets)) 72 73 async def callback_notify(self): 74 self.notify() 75 76 77 class UvicornH11Worker(UvicornWorker): 78 CONFIG_KWARGS = {"loop": "asyncio", "http": "h11"} 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/uvicorn/workers.py b/uvicorn/workers.py --- a/uvicorn/workers.py +++ b/uvicorn/workers.py @@ -1,5 +1,6 @@ import asyncio import logging +import signal from gunicorn.workers.base import Worker @@ -62,7 +63,11 @@ super(UvicornWorker, self).init_process() def init_signals(self): - pass + # Reset signals so Gunicorn doesn't swallow subprocess return codes + # other signals are set up by Server.install_signal_handlers() + # See: https://github.com/encode/uvicorn/issues/894 + for s in self.SIGNALS: + signal.signal(s, signal.SIG_DFL) def run(self): self.config.app = self.wsgi
{"golden_diff": "diff --git a/uvicorn/workers.py b/uvicorn/workers.py\n--- a/uvicorn/workers.py\n+++ b/uvicorn/workers.py\n@@ -1,5 +1,6 @@\n import asyncio\n import logging\n+import signal\n \n from gunicorn.workers.base import Worker\n \n@@ -62,7 +63,11 @@\n super(UvicornWorker, self).init_process()\n \n def init_signals(self):\n- pass\n+ # Reset signals so Gunicorn doesn't swallow subprocess return codes\n+ # other signals are set up by Server.install_signal_handlers()\n+ # See: https://github.com/encode/uvicorn/issues/894\n+ for s in self.SIGNALS:\n+ signal.signal(s, signal.SIG_DFL)\n \n def run(self):\n self.config.app = self.wsgi\n", "issue": "Subprocess returncode is not detected when running Gunicorn with Uvicorn (with fix PR companion)\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nWhen starting Gunicorn with Uvicorn worker(s), if the app uses `subprocess` to start other processes and captures the output, their `returncode` is in most cases `0`, even if the actual exit code was `1`.\r\n\r\n### To reproduce\r\n\r\n<!-- Provide a *minimal* example with steps to reproduce the bug locally.\r\n\r\nNOTE: try to keep any external dependencies *at an absolute minimum* .\r\nIn other words, remove anything that doesn't make the bug go away.\r\n\r\n-->\r\n\r\nTake this minimal FastAPI app (or replace with Starlette), `main.py`:\r\n\r\n```Python\r\nimport subprocess\r\n\r\nfrom fastapi import FastAPI\r\n\r\napp = FastAPI()\r\n\r\[email protected](\"/run\")\r\ndef run_subprocess():\r\n result = subprocess.run(\r\n [\"python\", \"-c\", \"import sys; sys.exit(1)\"], capture_output=True\r\n )\r\n return {\"returncode\": result.returncode}\r\n```\r\n\r\nThen run it with:\r\n\r\n```console\r\n$ gunicorn -k uvicorn.workers.UvicornWorker main:app\r\n```\r\n\r\nOpen the browser at http:127.0.0.1:8000/docs and send a request to `/run`.\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nThe detected `returncode` should always be `1`, as the subprocess always exits with `1`.\r\n\r\n### Actual behavior\r\n\r\n<!-- A clear and concise description of what actually happens. -->\r\n\r\nIn most of the cases it will return a `returncode` of `0`. Strangely enough, in some cases, it will return a `returncode` of `1`.\r\n\r\n### Debugging material\r\n\r\n<!-- Any tracebacks, screenshots, etc. that can help understanding the problem.\r\n\r\nNOTE:\r\n- Please list tracebacks in full (don't truncate them).\r\n- If relevant, consider turning on DEBUG or TRACE logs for additional details (see the Logging section on https://www.uvicorn.org/settings/ specifically the `log-level` flag).\r\n- Consider using `<details>` to make tracebacks/logs collapsible if they're very large (see https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d).\r\n-->\r\n\r\nThis is because the `UvicornWorker`, which inherits from the base Gunicorn worker, declares a method `init_signals()` (overriding the parent method) but doesn't do anything. I suspect it's because the signal handlers are declared in the `Server.install_signal_handlers()` with compatibility with `asyncio`.\r\n\r\nBut the `UvicornWorker` process is started with `os.fork()` by Gunicorn (if I understand correctly) and by the point it is forked, the Gunicorn \"Arbiter\" class (that handles worker processes) already set its own signal handlers.\r\n\r\nAnd the signal handlers in the Gunicorn base worker reset those handlers, but the `UvicornWorker` doesn't. So, when a process started with `subprocessing` is terminated, the `SIGCHLD` signal is handled by the Gunicorn `Arbiter` (as if the terminated process was a worker) instead of by the `UvicornWorker`.\r\n\r\nDisclaimer: why the `SIGCHLD` signal handling in the Gunicorn `Arbiter` alters the `returncode` of a process run with `subprocess`, when capturing output, is still a mystery to me. But I realized the signal handler in the `Arbiter` is expected to handle dead worker processes. And worker subclasses all seem to reset the signal handlers to revert those signals set by the `Arbiter`.\r\n\r\nI'm also submitting a PR to fix this: https://github.com/encode/uvicorn/pull/895. It's just 3 lines of code. But debugging it and finding it took me almost a week. :sweat_smile: \r\n\r\n### Environment\r\n\r\n- OS / Python / Uvicorn version: just run `uvicorn --version`: `Running uvicorn 0.13.1 with CPython 3.8.5 on Linux` (it's actually installed from source, for debugging)\r\n- Gunicorn version (also installed from source, for debugging): `gunicorn (version 20.0.4)`\r\n- The exact command you're running uvicorn with, all flags you passed included. If you run it with gunicorn please do the same. If there is a reverse-proxy involved and you cannot reproduce without it please give the minimal config of it to reproduce.\r\n\r\n```console\r\n$ gunicorn -k uvicorn.workers.UvicornWorker main:app\r\n```\r\n\r\n### Additional context\r\n\r\n<!-- Any additional information that can help understanding the problem.\r\n\r\nEg. linked issues, or a description of what you were trying to achieve. -->\r\n\r\nI'm pretty sure this issue https://github.com/encode/uvicorn/issues/584 is related to the same problem.\n", "before_files": [{"content": "import asyncio\nimport logging\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n logger.propagate = False\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n logger.propagate = False\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_keyfile_password\": self.cfg.ssl_options.get(\"password\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n pass\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport signal\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n logger.propagate = False\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n logger.propagate = False\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_keyfile_password\": self.cfg.ssl_options.get(\"password\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n # Reset signals so Gunicorn doesn't swallow subprocess return codes\n # other signals are set up by Server.install_signal_handlers()\n # See: https://github.com/encode/uvicorn/issues/894\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}]}
2,116
186
gh_patches_debug_296
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-959
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PSNR - Higher is better. ## 🐛 Bug `PSNR.higher_is_better` should be `True` ### Additional context This is a simple change, created [PR#959](https://github.com/PyTorchLightning/metrics/pull/959) with the change. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/image/psnr.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Any, Dict, Optional, Sequence, Tuple, Union 15 16 import torch 17 from torch import Tensor, tensor 18 from typing_extensions import Literal 19 20 from torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update 21 from torchmetrics.metric import Metric 22 from torchmetrics.utilities import rank_zero_warn 23 24 25 class PeakSignalNoiseRatio(Metric): 26 r""" 27 Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR): 28 29 .. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right) 30 31 Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function. 32 33 Args: 34 data_range: 35 the range of the data. If None, it is determined from the data (max - min). 36 The ``data_range`` must be given when ``dim`` is not None. 37 base: a base of a logarithm to use. 38 reduction: a method to reduce metric score over labels. 39 40 - ``'elementwise_mean'``: takes the mean (default) 41 - ``'sum'``: takes the sum 42 - ``'none'`` or ``None``: no reduction will be applied 43 44 dim: 45 Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is 46 None meaning scores will be reduced across all dimensions and all batches. 47 compute_on_step: 48 Forward only calls ``update()`` and returns None if this is set to False. 49 50 .. deprecated:: v0.8 51 Argument has no use anymore and will be removed v0.9. 52 53 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. 54 55 Raises: 56 ValueError: 57 If ``dim`` is not ``None`` and ``data_range`` is not given. 58 59 Example: 60 >>> from torchmetrics import PeakSignalNoiseRatio 61 >>> psnr = PeakSignalNoiseRatio() 62 >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]]) 63 >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]]) 64 >>> psnr(preds, target) 65 tensor(2.5527) 66 67 .. note:: 68 Half precision is only support on GPU for this metric 69 70 """ 71 min_target: Tensor 72 max_target: Tensor 73 higher_is_better = False 74 75 def __init__( 76 self, 77 data_range: Optional[float] = None, 78 base: float = 10.0, 79 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean", 80 dim: Optional[Union[int, Tuple[int, ...]]] = None, 81 compute_on_step: Optional[bool] = None, 82 **kwargs: Dict[str, Any], 83 ) -> None: 84 super().__init__(compute_on_step=compute_on_step, **kwargs) 85 86 if dim is None and reduction != "elementwise_mean": 87 rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.") 88 89 if dim is None: 90 self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum") 91 self.add_state("total", default=tensor(0), dist_reduce_fx="sum") 92 else: 93 self.add_state("sum_squared_error", default=[]) 94 self.add_state("total", default=[]) 95 96 if data_range is None: 97 if dim is not None: 98 # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to 99 # calculate `data_range` in the future. 100 raise ValueError("The `data_range` must be given when `dim` is not None.") 101 102 self.data_range = None 103 self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=torch.min) 104 self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=torch.max) 105 else: 106 self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean") 107 self.base = base 108 self.reduction = reduction 109 self.dim = tuple(dim) if isinstance(dim, Sequence) else dim 110 111 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore 112 """Update state with predictions and targets. 113 114 Args: 115 preds: Predictions from model 116 target: Ground truth values 117 """ 118 sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim) 119 if self.dim is None: 120 if self.data_range is None: 121 # keep track of min and max target values 122 self.min_target = min(target.min(), self.min_target) 123 self.max_target = max(target.max(), self.max_target) 124 125 self.sum_squared_error += sum_squared_error 126 self.total += n_obs 127 else: 128 self.sum_squared_error.append(sum_squared_error) 129 self.total.append(n_obs) 130 131 def compute(self) -> Tensor: 132 """Compute peak signal-to-noise ratio over state.""" 133 if self.data_range is not None: 134 data_range = self.data_range 135 else: 136 data_range = self.max_target - self.min_target 137 138 if self.dim is None: 139 sum_squared_error = self.sum_squared_error 140 total = self.total 141 else: 142 sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error]) 143 total = torch.cat([values.flatten() for values in self.total]) 144 return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction) 145 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/image/psnr.py b/torchmetrics/image/psnr.py --- a/torchmetrics/image/psnr.py +++ b/torchmetrics/image/psnr.py @@ -70,7 +70,7 @@ """ min_target: Tensor max_target: Tensor - higher_is_better = False + higher_is_better = True def __init__( self,
{"golden_diff": "diff --git a/torchmetrics/image/psnr.py b/torchmetrics/image/psnr.py\n--- a/torchmetrics/image/psnr.py\n+++ b/torchmetrics/image/psnr.py\n@@ -70,7 +70,7 @@\n \"\"\"\n min_target: Tensor\n max_target: Tensor\n- higher_is_better = False\n+ higher_is_better = True\n \n def __init__(\n self,\n", "issue": "PSNR - Higher is better.\n## \ud83d\udc1b Bug\r\n\r\n`PSNR.higher_is_better` should be `True`\r\n\r\n### Additional context\r\n\r\nThis is a simple change, created [PR#959](https://github.com/PyTorchLightning/metrics/pull/959) with the change.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PeakSignalNoiseRatio(Metric):\n r\"\"\"\n Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PeakSignalNoiseRatio\n >>> psnr = PeakSignalNoiseRatio()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n min_target: Tensor\n max_target: Tensor\n higher_is_better = False\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n\n if dim is None and reduction != \"elementwise_mean\":\n rank_zero_warn(f\"The `reduction={reduction}` will not have any effect when `dim` is None.\")\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx=\"mean\")\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute peak signal-to-noise ratio over state.\"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n", "path": "torchmetrics/image/psnr.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PeakSignalNoiseRatio(Metric):\n r\"\"\"\n Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PeakSignalNoiseRatio\n >>> psnr = PeakSignalNoiseRatio()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n min_target: Tensor\n max_target: Tensor\n higher_is_better = True\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n\n if dim is None and reduction != \"elementwise_mean\":\n rank_zero_warn(f\"The `reduction={reduction}` will not have any effect when `dim` is None.\")\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx=\"mean\")\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute peak signal-to-noise ratio over state.\"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n", "path": "torchmetrics/image/psnr.py"}]}
2,059
96
gh_patches_debug_12948
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-308
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- invalid keyword argument 'range' testing on Windows Qt5.5.1/ PyQtgraph github of 20160102, I have the following error on the "Custom Flowchart Nodes" test: ``` Using PyQt5 (default graphics system) QWindowsWindow::setGeometryDp: Unable to set geometry 600x900+480+210 on QWidget Window/'QMainWindowClassWindow'. Resulting geometry: 600x874+480+210 (frame: 8, 30, 8, 8, custom margin: 0, 0, 0, 0, minimum size: 69x69, maximum size: 1677721 5x16777215). Using PyQt5 (default graphics system) D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64\lib\sit e-packages\pyqtgraph\flowchart\eq.py:11: FutureWarning: comparison to `None` wil l result in an elementwise object comparison in the future. e = a==b Traceback (most recent call last): File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\examples\FlowchartCustomNode.py", line 147, in <mod ule> fNode = fc.createNode('UnsharpMask', pos=(0, 0)) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\flowchart\Flowchart.py", line 177, in createNode node = self.library.getNodeType(nodeType)(name) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\examples\FlowchartCustomNode.py", line 106, in __in it__ CtrlNode.__init__(self, name, terminals=terminals) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\flowchart\library\common.py", line 97, in __init__ self.ui, self.stateGroup, self.ctrls = generateUi(ui) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\flowchart\library\common.py", line 51, in generateU i w.setOpts(**o) File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64 \lib\site-packages\pyqtgraph\widgets\SpinBox.py", line 160, in setOpts raise TypeError("Invalid keyword argument '%s'." % k) TypeError: Invalid keyword argument 'range'. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/FlowchartCustomNode.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 This example demonstrates writing a custom Node subclass for use with flowcharts. 4 5 We implement a couple of simple image processing nodes. 6 """ 7 import initExample ## Add path to library (just for examples; you do not need this) 8 9 from pyqtgraph.flowchart import Flowchart, Node 10 import pyqtgraph.flowchart.library as fclib 11 from pyqtgraph.flowchart.library.common import CtrlNode 12 from pyqtgraph.Qt import QtGui, QtCore 13 import pyqtgraph as pg 14 import numpy as np 15 16 app = QtGui.QApplication([]) 17 18 ## Create main window with a grid layout inside 19 win = QtGui.QMainWindow() 20 win.setWindowTitle('pyqtgraph example: FlowchartCustomNode') 21 cw = QtGui.QWidget() 22 win.setCentralWidget(cw) 23 layout = QtGui.QGridLayout() 24 cw.setLayout(layout) 25 26 ## Create an empty flowchart with a single input and output 27 fc = Flowchart(terminals={ 28 'dataIn': {'io': 'in'}, 29 'dataOut': {'io': 'out'} 30 }) 31 w = fc.widget() 32 33 layout.addWidget(fc.widget(), 0, 0, 2, 1) 34 35 ## Create two ImageView widgets to display the raw and processed data with contrast 36 ## and color control. 37 v1 = pg.ImageView() 38 v2 = pg.ImageView() 39 layout.addWidget(v1, 0, 1) 40 layout.addWidget(v2, 1, 1) 41 42 win.show() 43 44 ## generate random input data 45 data = np.random.normal(size=(100,100)) 46 data = 25 * pg.gaussianFilter(data, (5,5)) 47 data += np.random.normal(size=(100,100)) 48 data[40:60, 40:60] += 15.0 49 data[30:50, 30:50] += 15.0 50 #data += np.sin(np.linspace(0, 100, 1000)) 51 #data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}]) 52 53 ## Set the raw data as the input value to the flowchart 54 fc.setInput(dataIn=data) 55 56 57 ## At this point, we need some custom Node classes since those provided in the library 58 ## are not sufficient. Each node will define a set of input/output terminals, a 59 ## processing function, and optionally a control widget (to be displayed in the 60 ## flowchart control panel) 61 62 class ImageViewNode(Node): 63 """Node that displays image data in an ImageView widget""" 64 nodeName = 'ImageView' 65 66 def __init__(self, name): 67 self.view = None 68 ## Initialize node with only a single input terminal 69 Node.__init__(self, name, terminals={'data': {'io':'in'}}) 70 71 def setView(self, view): ## setView must be called by the program 72 self.view = view 73 74 def process(self, data, display=True): 75 ## if process is called with display=False, then the flowchart is being operated 76 ## in batch processing mode, so we should skip displaying to improve performance. 77 78 if display and self.view is not None: 79 ## the 'data' argument is the value given to the 'data' terminal 80 if data is None: 81 self.view.setImage(np.zeros((1,1))) # give a blank array to clear the view 82 else: 83 self.view.setImage(data) 84 85 86 87 88 ## We will define an unsharp masking filter node as a subclass of CtrlNode. 89 ## CtrlNode is just a convenience class that automatically creates its 90 ## control widget based on a simple data structure. 91 class UnsharpMaskNode(CtrlNode): 92 """Return the input data passed through an unsharp mask.""" 93 nodeName = "UnsharpMask" 94 uiTemplate = [ 95 ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'range': [0.0, None]}), 96 ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'range': [0.0, None]}), 97 ] 98 def __init__(self, name): 99 ## Define the input / output terminals available on this node 100 terminals = { 101 'dataIn': dict(io='in'), # each terminal needs at least a name and 102 'dataOut': dict(io='out'), # to specify whether it is input or output 103 } # other more advanced options are available 104 # as well.. 105 106 CtrlNode.__init__(self, name, terminals=terminals) 107 108 def process(self, dataIn, display=True): 109 # CtrlNode has created self.ctrls, which is a dict containing {ctrlName: widget} 110 sigma = self.ctrls['sigma'].value() 111 strength = self.ctrls['strength'].value() 112 output = dataIn - (strength * pg.gaussianFilter(dataIn, (sigma,sigma))) 113 return {'dataOut': output} 114 115 116 ## To make our custom node classes available in the flowchart context menu, 117 ## we can either register them with the default node library or make a 118 ## new library. 119 120 121 ## Method 1: Register to global default library: 122 #fclib.registerNodeType(ImageViewNode, [('Display',)]) 123 #fclib.registerNodeType(UnsharpMaskNode, [('Image',)]) 124 125 ## Method 2: If we want to make our custom node available only to this flowchart, 126 ## then instead of registering the node type globally, we can create a new 127 ## NodeLibrary: 128 library = fclib.LIBRARY.copy() # start with the default node set 129 library.addNodeType(ImageViewNode, [('Display',)]) 130 # Add the unsharp mask node to two locations in the menu to demonstrate 131 # that we can create arbitrary menu structures 132 library.addNodeType(UnsharpMaskNode, [('Image',), 133 ('Submenu_test','submenu2','submenu3')]) 134 fc.setLibrary(library) 135 136 137 ## Now we will programmatically add nodes to define the function of the flowchart. 138 ## Normally, the user will do this manually or by loading a pre-generated 139 ## flowchart file. 140 141 v1Node = fc.createNode('ImageView', pos=(0, -150)) 142 v1Node.setView(v1) 143 144 v2Node = fc.createNode('ImageView', pos=(150, -150)) 145 v2Node.setView(v2) 146 147 fNode = fc.createNode('UnsharpMask', pos=(0, 0)) 148 fc.connectTerminals(fc['dataIn'], fNode['dataIn']) 149 fc.connectTerminals(fc['dataIn'], v1Node['data']) 150 fc.connectTerminals(fNode['dataOut'], v2Node['data']) 151 fc.connectTerminals(fNode['dataOut'], fc['dataOut']) 152 153 154 155 ## Start Qt event loop unless running in interactive mode or using pyside. 156 if __name__ == '__main__': 157 import sys 158 if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): 159 QtGui.QApplication.instance().exec_() 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/FlowchartCustomNode.py b/examples/FlowchartCustomNode.py --- a/examples/FlowchartCustomNode.py +++ b/examples/FlowchartCustomNode.py @@ -92,8 +92,8 @@ """Return the input data passed through an unsharp mask.""" nodeName = "UnsharpMask" uiTemplate = [ - ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'range': [0.0, None]}), - ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'range': [0.0, None]}), + ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'bounds': [0.0, None]}), + ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'bounds': [0.0, None]}), ] def __init__(self, name): ## Define the input / output terminals available on this node
{"golden_diff": "diff --git a/examples/FlowchartCustomNode.py b/examples/FlowchartCustomNode.py\n--- a/examples/FlowchartCustomNode.py\n+++ b/examples/FlowchartCustomNode.py\n@@ -92,8 +92,8 @@\n \"\"\"Return the input data passed through an unsharp mask.\"\"\"\n nodeName = \"UnsharpMask\"\n uiTemplate = [\n- ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'range': [0.0, None]}),\n- ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'range': [0.0, None]}),\n+ ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'bounds': [0.0, None]}),\n+ ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'bounds': [0.0, None]}),\n ]\n def __init__(self, name):\n ## Define the input / output terminals available on this node\n", "issue": "invalid keyword argument 'range'\ntesting on Windows Qt5.5.1/ PyQtgraph github of 20160102, I have the following error on the \"Custom Flowchart Nodes\" test:\n\n```\nUsing PyQt5 (default graphics system)\nQWindowsWindow::setGeometryDp: Unable to set geometry 600x900+480+210 on QWidget\nWindow/'QMainWindowClassWindow'. Resulting geometry: 600x874+480+210 (frame: 8,\n 30, 8, 8, custom margin: 0, 0, 0, 0, minimum size: 69x69, maximum size: 1677721\n5x16777215).\nUsing PyQt5 (default graphics system)\nD:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\\lib\\sit\ne-packages\\pyqtgraph\\flowchart\\eq.py:11: FutureWarning: comparison to `None` wil\nl result in an elementwise object comparison in the future.\n e = a==b\nTraceback (most recent call last):\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\examples\\FlowchartCustomNode.py\", line 147, in <mod\nule>\n fNode = fc.createNode('UnsharpMask', pos=(0, 0))\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\flowchart\\Flowchart.py\", line 177, in createNode\n node = self.library.getNodeType(nodeType)(name)\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\examples\\FlowchartCustomNode.py\", line 106, in __in\nit__\n CtrlNode.__init__(self, name, terminals=terminals)\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\flowchart\\library\\common.py\", line 97, in __init__\n self.ui, self.stateGroup, self.ctrls = generateUi(ui)\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\flowchart\\library\\common.py\", line 51, in generateU\ni\n w.setOpts(**o)\n File \"D:\\WinPython\\basedir34\\buildQt5\\winpython-3.4.4.amd64\\python-3.4.4.amd64\n\\lib\\site-packages\\pyqtgraph\\widgets\\SpinBox.py\", line 160, in setOpts\n raise TypeError(\"Invalid keyword argument '%s'.\" % k)\nTypeError: Invalid keyword argument 'range'.\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis example demonstrates writing a custom Node subclass for use with flowcharts.\n\nWe implement a couple of simple image processing nodes.\n\"\"\"\nimport initExample ## Add path to library (just for examples; you do not need this)\n\nfrom pyqtgraph.flowchart import Flowchart, Node\nimport pyqtgraph.flowchart.library as fclib\nfrom pyqtgraph.flowchart.library.common import CtrlNode\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nimport numpy as np\n\napp = QtGui.QApplication([])\n\n## Create main window with a grid layout inside\nwin = QtGui.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: FlowchartCustomNode')\ncw = QtGui.QWidget()\nwin.setCentralWidget(cw)\nlayout = QtGui.QGridLayout()\ncw.setLayout(layout)\n\n## Create an empty flowchart with a single input and output\nfc = Flowchart(terminals={\n 'dataIn': {'io': 'in'},\n 'dataOut': {'io': 'out'} \n})\nw = fc.widget()\n\nlayout.addWidget(fc.widget(), 0, 0, 2, 1)\n\n## Create two ImageView widgets to display the raw and processed data with contrast\n## and color control.\nv1 = pg.ImageView()\nv2 = pg.ImageView()\nlayout.addWidget(v1, 0, 1)\nlayout.addWidget(v2, 1, 1)\n\nwin.show()\n\n## generate random input data\ndata = np.random.normal(size=(100,100))\ndata = 25 * pg.gaussianFilter(data, (5,5))\ndata += np.random.normal(size=(100,100))\ndata[40:60, 40:60] += 15.0\ndata[30:50, 30:50] += 15.0\n#data += np.sin(np.linspace(0, 100, 1000))\n#data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])\n\n## Set the raw data as the input value to the flowchart\nfc.setInput(dataIn=data)\n\n\n## At this point, we need some custom Node classes since those provided in the library\n## are not sufficient. Each node will define a set of input/output terminals, a \n## processing function, and optionally a control widget (to be displayed in the \n## flowchart control panel)\n\nclass ImageViewNode(Node):\n \"\"\"Node that displays image data in an ImageView widget\"\"\"\n nodeName = 'ImageView'\n \n def __init__(self, name):\n self.view = None\n ## Initialize node with only a single input terminal\n Node.__init__(self, name, terminals={'data': {'io':'in'}})\n \n def setView(self, view): ## setView must be called by the program\n self.view = view\n \n def process(self, data, display=True):\n ## if process is called with display=False, then the flowchart is being operated\n ## in batch processing mode, so we should skip displaying to improve performance.\n \n if display and self.view is not None:\n ## the 'data' argument is the value given to the 'data' terminal\n if data is None:\n self.view.setImage(np.zeros((1,1))) # give a blank array to clear the view\n else:\n self.view.setImage(data)\n\n\n\n \n## We will define an unsharp masking filter node as a subclass of CtrlNode.\n## CtrlNode is just a convenience class that automatically creates its\n## control widget based on a simple data structure.\nclass UnsharpMaskNode(CtrlNode):\n \"\"\"Return the input data passed through an unsharp mask.\"\"\"\n nodeName = \"UnsharpMask\"\n uiTemplate = [\n ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'range': [0.0, None]}),\n ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'range': [0.0, None]}),\n ]\n def __init__(self, name):\n ## Define the input / output terminals available on this node\n terminals = {\n 'dataIn': dict(io='in'), # each terminal needs at least a name and\n 'dataOut': dict(io='out'), # to specify whether it is input or output\n } # other more advanced options are available\n # as well..\n \n CtrlNode.__init__(self, name, terminals=terminals)\n \n def process(self, dataIn, display=True):\n # CtrlNode has created self.ctrls, which is a dict containing {ctrlName: widget}\n sigma = self.ctrls['sigma'].value()\n strength = self.ctrls['strength'].value()\n output = dataIn - (strength * pg.gaussianFilter(dataIn, (sigma,sigma)))\n return {'dataOut': output}\n\n\n## To make our custom node classes available in the flowchart context menu,\n## we can either register them with the default node library or make a\n## new library.\n\n \n## Method 1: Register to global default library:\n#fclib.registerNodeType(ImageViewNode, [('Display',)])\n#fclib.registerNodeType(UnsharpMaskNode, [('Image',)])\n\n## Method 2: If we want to make our custom node available only to this flowchart,\n## then instead of registering the node type globally, we can create a new \n## NodeLibrary:\nlibrary = fclib.LIBRARY.copy() # start with the default node set\nlibrary.addNodeType(ImageViewNode, [('Display',)])\n# Add the unsharp mask node to two locations in the menu to demonstrate\n# that we can create arbitrary menu structures\nlibrary.addNodeType(UnsharpMaskNode, [('Image',), \n ('Submenu_test','submenu2','submenu3')])\nfc.setLibrary(library)\n\n\n## Now we will programmatically add nodes to define the function of the flowchart.\n## Normally, the user will do this manually or by loading a pre-generated\n## flowchart file.\n\nv1Node = fc.createNode('ImageView', pos=(0, -150))\nv1Node.setView(v1)\n\nv2Node = fc.createNode('ImageView', pos=(150, -150))\nv2Node.setView(v2)\n\nfNode = fc.createNode('UnsharpMask', pos=(0, 0))\nfc.connectTerminals(fc['dataIn'], fNode['dataIn'])\nfc.connectTerminals(fc['dataIn'], v1Node['data'])\nfc.connectTerminals(fNode['dataOut'], v2Node['data'])\nfc.connectTerminals(fNode['dataOut'], fc['dataOut'])\n\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n", "path": "examples/FlowchartCustomNode.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis example demonstrates writing a custom Node subclass for use with flowcharts.\n\nWe implement a couple of simple image processing nodes.\n\"\"\"\nimport initExample ## Add path to library (just for examples; you do not need this)\n\nfrom pyqtgraph.flowchart import Flowchart, Node\nimport pyqtgraph.flowchart.library as fclib\nfrom pyqtgraph.flowchart.library.common import CtrlNode\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nimport numpy as np\n\napp = QtGui.QApplication([])\n\n## Create main window with a grid layout inside\nwin = QtGui.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: FlowchartCustomNode')\ncw = QtGui.QWidget()\nwin.setCentralWidget(cw)\nlayout = QtGui.QGridLayout()\ncw.setLayout(layout)\n\n## Create an empty flowchart with a single input and output\nfc = Flowchart(terminals={\n 'dataIn': {'io': 'in'},\n 'dataOut': {'io': 'out'} \n})\nw = fc.widget()\n\nlayout.addWidget(fc.widget(), 0, 0, 2, 1)\n\n## Create two ImageView widgets to display the raw and processed data with contrast\n## and color control.\nv1 = pg.ImageView()\nv2 = pg.ImageView()\nlayout.addWidget(v1, 0, 1)\nlayout.addWidget(v2, 1, 1)\n\nwin.show()\n\n## generate random input data\ndata = np.random.normal(size=(100,100))\ndata = 25 * pg.gaussianFilter(data, (5,5))\ndata += np.random.normal(size=(100,100))\ndata[40:60, 40:60] += 15.0\ndata[30:50, 30:50] += 15.0\n#data += np.sin(np.linspace(0, 100, 1000))\n#data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])\n\n## Set the raw data as the input value to the flowchart\nfc.setInput(dataIn=data)\n\n\n## At this point, we need some custom Node classes since those provided in the library\n## are not sufficient. Each node will define a set of input/output terminals, a \n## processing function, and optionally a control widget (to be displayed in the \n## flowchart control panel)\n\nclass ImageViewNode(Node):\n \"\"\"Node that displays image data in an ImageView widget\"\"\"\n nodeName = 'ImageView'\n \n def __init__(self, name):\n self.view = None\n ## Initialize node with only a single input terminal\n Node.__init__(self, name, terminals={'data': {'io':'in'}})\n \n def setView(self, view): ## setView must be called by the program\n self.view = view\n \n def process(self, data, display=True):\n ## if process is called with display=False, then the flowchart is being operated\n ## in batch processing mode, so we should skip displaying to improve performance.\n \n if display and self.view is not None:\n ## the 'data' argument is the value given to the 'data' terminal\n if data is None:\n self.view.setImage(np.zeros((1,1))) # give a blank array to clear the view\n else:\n self.view.setImage(data)\n\n\n\n \n## We will define an unsharp masking filter node as a subclass of CtrlNode.\n## CtrlNode is just a convenience class that automatically creates its\n## control widget based on a simple data structure.\nclass UnsharpMaskNode(CtrlNode):\n \"\"\"Return the input data passed through an unsharp mask.\"\"\"\n nodeName = \"UnsharpMask\"\n uiTemplate = [\n ('sigma', 'spin', {'value': 1.0, 'step': 1.0, 'bounds': [0.0, None]}),\n ('strength', 'spin', {'value': 1.0, 'dec': True, 'step': 0.5, 'minStep': 0.01, 'bounds': [0.0, None]}),\n ]\n def __init__(self, name):\n ## Define the input / output terminals available on this node\n terminals = {\n 'dataIn': dict(io='in'), # each terminal needs at least a name and\n 'dataOut': dict(io='out'), # to specify whether it is input or output\n } # other more advanced options are available\n # as well..\n \n CtrlNode.__init__(self, name, terminals=terminals)\n \n def process(self, dataIn, display=True):\n # CtrlNode has created self.ctrls, which is a dict containing {ctrlName: widget}\n sigma = self.ctrls['sigma'].value()\n strength = self.ctrls['strength'].value()\n output = dataIn - (strength * pg.gaussianFilter(dataIn, (sigma,sigma)))\n return {'dataOut': output}\n\n\n## To make our custom node classes available in the flowchart context menu,\n## we can either register them with the default node library or make a\n## new library.\n\n \n## Method 1: Register to global default library:\n#fclib.registerNodeType(ImageViewNode, [('Display',)])\n#fclib.registerNodeType(UnsharpMaskNode, [('Image',)])\n\n## Method 2: If we want to make our custom node available only to this flowchart,\n## then instead of registering the node type globally, we can create a new \n## NodeLibrary:\nlibrary = fclib.LIBRARY.copy() # start with the default node set\nlibrary.addNodeType(ImageViewNode, [('Display',)])\n# Add the unsharp mask node to two locations in the menu to demonstrate\n# that we can create arbitrary menu structures\nlibrary.addNodeType(UnsharpMaskNode, [('Image',), \n ('Submenu_test','submenu2','submenu3')])\nfc.setLibrary(library)\n\n\n## Now we will programmatically add nodes to define the function of the flowchart.\n## Normally, the user will do this manually or by loading a pre-generated\n## flowchart file.\n\nv1Node = fc.createNode('ImageView', pos=(0, -150))\nv1Node.setView(v1)\n\nv2Node = fc.createNode('ImageView', pos=(150, -150))\nv2Node.setView(v2)\n\nfNode = fc.createNode('UnsharpMask', pos=(0, 0))\nfc.connectTerminals(fc['dataIn'], fNode['dataIn'])\nfc.connectTerminals(fc['dataIn'], v1Node['data'])\nfc.connectTerminals(fNode['dataOut'], v2Node['data'])\nfc.connectTerminals(fNode['dataOut'], fc['dataOut'])\n\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n", "path": "examples/FlowchartCustomNode.py"}]}
2,955
274
gh_patches_debug_86
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-2754
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Transitive import of mitmproxy.version causes warning Since #1837, we import `.script`, will imports `.flow`, which imports `.version`. This causes the following warning in pytest: ``` test/mitmproxy/test_version.py::test_version /Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125: RuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package 'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in unpredictable behaviour warn(RuntimeWarning(msg)) -- Docs: http://doc.pytest.org/en/latest/warnings.html ``` [Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap) > This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: “Never add a package directory, or any directory inside a package, directly to the Python path”. > The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path). Maybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/version.py` Content: ``` 1 import os 2 import subprocess 3 4 # The actual version string. For precompiled binaries, this will be changed to include the build 5 # tag, e.g. "3.0.0.dev0042-0xcafeabc" 6 VERSION = "3.0.0" 7 PATHOD = "pathod " + VERSION 8 MITMPROXY = "mitmproxy " + VERSION 9 10 # Serialization format version. This is displayed nowhere, it just needs to be incremented by one 11 # for each change in the file format. 12 FLOW_FORMAT_VERSION = 5 13 14 15 def get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str: 16 """ 17 Return a detailed version string, sourced either from a hardcoded VERSION constant 18 or obtained dynamically using git. 19 20 Args: 21 dev: If True, non-tagged releases will include a ".devXXXX" suffix, where XXXX is the number 22 of commits since the last tagged release. 23 build: If True, non-tagged releases will include a "-0xXXXXXXX" suffix, where XXXXXXX are 24 the first seven digits of the commit hash. 25 refresh: If True, always try to use git instead of a potentially hardcoded constant. 26 """ 27 28 mitmproxy_version = VERSION 29 30 if "dev" in VERSION and not refresh: 31 pass # There is a hardcoded build tag, so we just use what's there. 32 elif dev or build: 33 here = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 34 try: 35 git_describe = subprocess.check_output( 36 ['git', 'describe', '--tags', '--long'], 37 stderr=subprocess.STDOUT, 38 cwd=here, 39 ) 40 last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2) 41 commit = commit.lstrip("g")[:7] 42 tag_dist = int(tag_dist) 43 except Exception: 44 pass 45 else: 46 # Remove current suffix 47 mitmproxy_version = mitmproxy_version.split(".dev")[0] 48 49 # Add suffix for non-tagged releases 50 if tag_dist > 0: 51 mitmproxy_version += ".dev{tag_dist}".format(tag_dist=tag_dist) 52 # The wheel build tag (we use the commit) must start with a digit, so we include "0x" 53 mitmproxy_version += "-0x{commit}".format(commit=commit) 54 55 if not dev: 56 mitmproxy_version = mitmproxy_version.split(".dev")[0] 57 elif not build: 58 mitmproxy_version = mitmproxy_version.split("-0x")[0] 59 60 return mitmproxy_version 61 62 63 if __name__ == "__main__": 64 print(VERSION) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/version.py b/mitmproxy/version.py --- a/mitmproxy/version.py +++ b/mitmproxy/version.py @@ -60,5 +60,5 @@ return mitmproxy_version -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover print(VERSION)
{"golden_diff": "diff --git a/mitmproxy/version.py b/mitmproxy/version.py\n--- a/mitmproxy/version.py\n+++ b/mitmproxy/version.py\n@@ -60,5 +60,5 @@\n return mitmproxy_version\n \n \n-if __name__ == \"__main__\":\n+if __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "issue": "Transitive import of mitmproxy.version causes warning\nSince #1837, we import `.script`, will imports `.flow`, which imports `.version`.\r\nThis causes the following warning in pytest:\r\n\r\n```\r\ntest/mitmproxy/test_version.py::test_version\r\n /Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125: \r\nRuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package \r\n'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in \r\nunpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\n\r\n-- Docs: http://doc.pytest.org/en/latest/warnings.html\r\n```\r\n\r\n[Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap)\r\n> This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: \u201cNever add a package directory, or any directory inside a package, directly to the Python path\u201d.\r\n\r\n> The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path).\r\n\r\nMaybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?\n", "before_files": [{"content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\":\n print(VERSION)\n", "path": "mitmproxy/version.py"}], "after_files": [{"content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "path": "mitmproxy/version.py"}]}
1,301
82
gh_patches_debug_3742
rasdani/github-patches
git_diff
netket__netket-1193
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bose-Hubbard model fails in extended Hilbert space for newer versions Hello, we are working on an extended Hilbert space in order to calculate the ground state energy of the Bose-Hubbard model. We added spin 1/2 sites between every boson site in the one-dimensional chain, but we left the Hamiltonian unchanged. We expected that the ground state energy should be the same in this extended Hilbert space, but in the newer versions of Netket (post 3.3.3), this energy differs from the actual solution. Even more, every time we run the exact diagonalization of the Hamiltonian matrix (either with the full_ed or lanczos_ed method), its eigenvalues change; we also diagonalized the same Hamiltonian matrix with the linalg package of numpy and the result also differs every time we do it. This doesn't happen in the older versions; the problem begins in the 3.3.4 version of Netket. We claim that the problem resides in the way that the newer versions of Netket interpret the Hamiltonian matrices with mixed Hilbert spaces. When we do the same Bose-Hubbard model in a Fock-Boson space, the calculation gives the correct value of the ground state energy. Here is the code that we run in both versions of Netket, and a video of the discrepancy. Notebook ran in Nektet 3.3.3: https://www.youtube.com/watch?v=ENhRJfYg7dg Notebook ran in Netket 3.4.1: https://www.youtube.com/watch?v=Q3XfWrnR7LU You can download the Jupyter Script here: https://www.dropbox.com/s/dza4kbyem2ycg6v/BoseHubbardNetket.ipynb?dl=0 ``` # Extended Hilbert space n_max = 3 L = 3 # Number of spin sites hil_gen = nk.hilbert.Fock(n_max = n_max)*nk.hilbert.Spin(1/2) hi_ext = hil_gen for i in range(L-1): hi_ext = hi_ext * hil_gen hi_ext = hi_ext * nk.hilbert.Fock(n_max = n_max) # Boson-Fock Hilbert space N = 4 # Number of bosons in the chain # Chain graph g = nk.graph.Chain(length=N, pbc=False) hi_fock = nk.hilbert.Fock(n_max=n_max, N=N) # Bose-Hubbard Hamiltonian for extended Hilbert space J = 0.5 U = 0.5 h = create(hi_ext,0)*destroy(hi_ext,2) + create(hi_ext,2)*destroy(hi_ext,4) + create(hi_ext,4)*destroy(hi_ext,6) h_hc = h.H.collect() h_u = (number(hi_ext,0)*(number(hi_ext,0)-1) + number(hi_ext,2)*(number(hi_ext,2)-1) + number(hi_ext,4)*(number(hi_ext,4)-1) + number(hi_ext,6)*(number(hi_ext,6)-1)) H_bh = -J*(h + h_hc) + (U/2)*h_u # Bose-Hubbard Hamiltonian for the Fock-Bose Hilbert space H_bh2 = nk.operator.BoseHubbard(hilbert=hi_fock,U=0.5,J=0.5,graph=g) # lanczos computation of the eigenvalues of the Hamiltonian with the extended Hilbert space E_bh, ket_bh = nk.exact.lanczos_ed(H_bh, compute_eigenvectors=True) print("Exact ground state energy = {0:.3f}".format(E_bh[0])) # full computation of the eigenvalues of the Hamiltonian with the extended Hilbert space E_bhfull = nk.exact.full_ed(H_bh) print("Exact ground state energy = {0:.3f}".format(E_bhfull[0])) # numpy.linalg computation of the eigenvalues of the Hamiltonian with the extended Hilbert space M_bh = H_bh.to_dense() E_bhla = min(np.linalg.eig(M_bh)[0]) print("Ground state energy:", E_bhla.real) # scipy sparse computation of the eigenvalues of the Hamiltonian with the extended Hilbert space H_bhsparse = H_bh.to_sparse() eig_vals, eig_vecs = eigsh(H_bhsparse, k=1, which="SA") print("eigenvalues with scipy sparse:", eig_vals) # lanczos computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space E_bh2, ket_bh2 = nk.exact.lanczos_ed(H_bh2, compute_eigenvectors=True) print("Exact ground state energy = {0:.3f}".format(E_bh2[0])) # full computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space E_bhfull2= nk.exact.full_ed(H_bh2) print("Exact ground state energy = {0:.3f}".format(E_bhfull2[0])) # numpy.linalg computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space M_bh2 = H_bh2.to_dense() E_bhla2 = min(np.linalg.eig(M_bh2)[0]) print("Ground state energy:", E_bhla2.real) # scipy sparse computation of the eigenvalues of the Hamiltonian with the extended Hilbert space H_bhsparse2 = H_bh2.to_sparse() eig_vals2, eig_vecs2 = eigsh(H_bhsparse2, k=1, which="SA") print("eigenvalues with scipy sparse:", eig_vals2) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netket/operator/_local_operator_compile_helpers.py` Content: ``` 1 # Copyright 2022 The NetKet Authors - All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This file contains functions generating the numba-packed representation of local 17 operators. 18 """ 19 20 import numpy as np 21 import numba 22 23 from netket.hilbert import AbstractHilbert 24 from netket.utils.types import DType 25 26 27 def pack_internals( 28 hilbert: AbstractHilbert, 29 operators_dict: dict, 30 constant, 31 dtype: DType, 32 mel_cutoff: float, 33 ): 34 """ 35 Take the internal lazy representation of a local operator and returns the arrays 36 needed for the numba implementation. 37 38 This takes as input a dictionary with Tuples as keys, the `acting_on` and matrices as values. 39 The keys represent the sites upon which the matrix acts. 40 It is assumed that the integer in the tuples are sorted. 41 42 Returns a dictionary with all the data fields 43 """ 44 op_acting_on = list(operators_dict.keys()) 45 operators = list(operators_dict.values()) 46 n_operators = len(operators_dict) 47 48 """Analyze the operator strings and precompute arrays for get_conn inference""" 49 acting_size = np.array([len(aon) for aon in op_acting_on], dtype=np.intp) 50 max_acting_on_sz = np.max(acting_size) 51 max_local_hilbert_size = max( 52 [max(map(hilbert.size_at_index, aon)) for aon in op_acting_on] 53 ) 54 max_op_size = max(map(lambda x: x.shape[0], operators)) 55 56 acting_on = np.full((n_operators, max_acting_on_sz), -1, dtype=np.intp) 57 for (i, aon) in enumerate(op_acting_on): 58 acting_on[i][: len(aon)] = aon 59 60 local_states = np.full( 61 (n_operators, max_acting_on_sz, max_local_hilbert_size), np.nan 62 ) 63 basis = np.full((n_operators, max_acting_on_sz), 1e10, dtype=np.int64) 64 65 diag_mels = np.full((n_operators, max_op_size), np.nan, dtype=dtype) 66 mels = np.full( 67 (n_operators, max_op_size, max_op_size - 1), 68 np.nan, 69 dtype=dtype, 70 ) 71 x_prime = np.full( 72 (n_operators, max_op_size, max_op_size - 1, max_acting_on_sz), 73 -1, 74 dtype=np.float64, 75 ) 76 n_conns = np.full((n_operators, max_op_size), -1, dtype=np.intp) 77 78 for (i, (aon, op)) in enumerate(operators_dict.items()): 79 aon_size = len(aon) 80 n_local_states_per_site = np.asarray([hilbert.size_at_index(i) for i in aon]) 81 82 ## add an operator to local_states 83 for (j, site) in enumerate(aon): 84 local_states[i, j, : hilbert.shape[site]] = np.asarray( 85 hilbert.states_at_index(site) 86 ) 87 88 ba = 1 89 for s in range(aon_size): 90 basis[i, s] = ba 91 ba *= hilbert.shape[aon_size - s - 1] 92 93 # eventually could support sparse matrices 94 # if isinstance(op, sparse.spmatrix): 95 # op = op.todense() 96 97 _append_matrix( 98 op, 99 diag_mels[i], 100 mels[i], 101 x_prime[i], 102 n_conns[i], 103 aon_size, 104 local_states[i], 105 mel_cutoff, 106 n_local_states_per_site, 107 ) 108 109 nonzero_diagonal = ( 110 np.any(np.abs(diag_mels) >= mel_cutoff) or np.abs(constant) >= mel_cutoff 111 ) 112 113 max_conn_size = 1 if nonzero_diagonal else 0 114 for op in operators: 115 nnz_mat = np.abs(op) > mel_cutoff 116 nnz_mat[np.diag_indices(nnz_mat.shape[0])] = False 117 nnz_rows = np.sum(nnz_mat, axis=1) 118 max_conn_size += np.max(nnz_rows) 119 120 return { 121 "acting_on": acting_on, 122 "acting_size": acting_size, 123 "diag_mels": diag_mels, 124 "mels": mels, 125 "x_prime": x_prime, 126 "n_conns": n_conns, 127 "local_states": local_states, 128 "basis": basis, 129 "nonzero_diagonal": nonzero_diagonal, 130 "max_conn_size": max_conn_size, 131 } 132 133 134 @numba.jit(nopython=True) 135 def _append_matrix( 136 operator, 137 diag_mels, 138 mels, 139 x_prime, 140 n_conns, 141 acting_size, 142 local_states_per_site, 143 epsilon, 144 hilb_size_per_site, 145 ): 146 op_size = operator.shape[0] 147 assert op_size == operator.shape[1] 148 for i in range(op_size): 149 diag_mels[i] = operator[i, i] 150 n_conns[i] = 0 151 for j in range(op_size): 152 if i != j and np.abs(operator[i, j]) > epsilon: 153 k_conn = n_conns[i] 154 mels[i, k_conn] = operator[i, j] 155 _number_to_state( 156 j, 157 hilb_size_per_site, 158 local_states_per_site[:acting_size, :], 159 x_prime[i, k_conn, :acting_size], 160 ) 161 n_conns[i] += 1 162 163 164 @numba.jit(nopython=True) 165 def _number_to_state(number, hilbert_size_per_site, local_states_per_site, out): 166 167 out[:] = local_states_per_site[:, 0] 168 size = out.shape[0] 169 170 ip = number 171 k = size - 1 172 while ip > 0: 173 local_size = hilbert_size_per_site[k] 174 out[k] = local_states_per_site[k, ip % local_size] 175 ip = ip // local_size 176 k -= 1 177 178 return out 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netket/operator/_local_operator_compile_helpers.py b/netket/operator/_local_operator_compile_helpers.py --- a/netket/operator/_local_operator_compile_helpers.py +++ b/netket/operator/_local_operator_compile_helpers.py @@ -88,7 +88,7 @@ ba = 1 for s in range(aon_size): basis[i, s] = ba - ba *= hilbert.shape[aon_size - s - 1] + ba *= hilbert.shape[aon[aon_size - s - 1]] # eventually could support sparse matrices # if isinstance(op, sparse.spmatrix):
{"golden_diff": "diff --git a/netket/operator/_local_operator_compile_helpers.py b/netket/operator/_local_operator_compile_helpers.py\n--- a/netket/operator/_local_operator_compile_helpers.py\n+++ b/netket/operator/_local_operator_compile_helpers.py\n@@ -88,7 +88,7 @@\n ba = 1\n for s in range(aon_size):\n basis[i, s] = ba\n- ba *= hilbert.shape[aon_size - s - 1]\n+ ba *= hilbert.shape[aon[aon_size - s - 1]]\n \n # eventually could support sparse matrices\n # if isinstance(op, sparse.spmatrix):\n", "issue": "Bose-Hubbard model fails in extended Hilbert space for newer versions\nHello, \r\nwe are working on an extended Hilbert space in order to calculate the ground state energy of the Bose-Hubbard model. We added spin 1/2 sites between every boson site in the one-dimensional chain, but we left the Hamiltonian unchanged. We expected that the ground state energy should be the same in this extended Hilbert space, but in the newer versions of Netket (post 3.3.3), this energy differs from the actual solution. Even more, every time we run the exact diagonalization of the Hamiltonian matrix (either with the full_ed or lanczos_ed method), its eigenvalues change; we also diagonalized the same Hamiltonian matrix with the linalg package of numpy and the result also differs every time we do it. This doesn't happen in the older versions; the problem begins in the 3.3.4 version of Netket.\r\n\r\nWe claim that the problem resides in the way that the newer versions of Netket interpret the Hamiltonian matrices with mixed Hilbert spaces. When we do the same Bose-Hubbard model in a Fock-Boson space, the calculation gives the correct value of the ground state energy.\r\n\r\nHere is the code that we run in both versions of Netket, and a video of the discrepancy. \r\n\r\nNotebook ran in Nektet 3.3.3: https://www.youtube.com/watch?v=ENhRJfYg7dg\r\nNotebook ran in Netket 3.4.1: https://www.youtube.com/watch?v=Q3XfWrnR7LU\r\n\r\nYou can download the Jupyter Script here:\r\nhttps://www.dropbox.com/s/dza4kbyem2ycg6v/BoseHubbardNetket.ipynb?dl=0\r\n\r\n```\r\n# Extended Hilbert space\r\nn_max = 3\r\nL = 3 # Number of spin sites\r\nhil_gen = nk.hilbert.Fock(n_max = n_max)*nk.hilbert.Spin(1/2)\r\nhi_ext = hil_gen\r\nfor i in range(L-1):\r\n hi_ext = hi_ext * hil_gen \r\nhi_ext = hi_ext * nk.hilbert.Fock(n_max = n_max)\r\n# Boson-Fock Hilbert space\r\nN = 4 # Number of bosons in the chain\r\n# Chain graph\r\ng = nk.graph.Chain(length=N, pbc=False)\r\nhi_fock = nk.hilbert.Fock(n_max=n_max, N=N)\r\n\r\n# Bose-Hubbard Hamiltonian for extended Hilbert space\r\nJ = 0.5\r\nU = 0.5\r\nh = create(hi_ext,0)*destroy(hi_ext,2) + create(hi_ext,2)*destroy(hi_ext,4) + create(hi_ext,4)*destroy(hi_ext,6)\r\nh_hc = h.H.collect()\r\nh_u = (number(hi_ext,0)*(number(hi_ext,0)-1) + number(hi_ext,2)*(number(hi_ext,2)-1) + number(hi_ext,4)*(number(hi_ext,4)-1) + \r\n number(hi_ext,6)*(number(hi_ext,6)-1))\r\nH_bh = -J*(h + h_hc) + (U/2)*h_u\r\n\r\n# Bose-Hubbard Hamiltonian for the Fock-Bose Hilbert space\r\nH_bh2 = nk.operator.BoseHubbard(hilbert=hi_fock,U=0.5,J=0.5,graph=g)\r\n\r\n# lanczos computation of the eigenvalues of the Hamiltonian with the extended Hilbert space\r\nE_bh, ket_bh = nk.exact.lanczos_ed(H_bh, compute_eigenvectors=True)\r\nprint(\"Exact ground state energy = {0:.3f}\".format(E_bh[0]))\r\n\r\n# full computation of the eigenvalues of the Hamiltonian with the extended Hilbert space\r\nE_bhfull = nk.exact.full_ed(H_bh)\r\nprint(\"Exact ground state energy = {0:.3f}\".format(E_bhfull[0]))\r\n\r\n# numpy.linalg computation of the eigenvalues of the Hamiltonian with the extended Hilbert space\r\nM_bh = H_bh.to_dense()\r\nE_bhla = min(np.linalg.eig(M_bh)[0])\r\nprint(\"Ground state energy:\", E_bhla.real)\r\n\r\n# scipy sparse computation of the eigenvalues of the Hamiltonian with the extended Hilbert space\r\nH_bhsparse = H_bh.to_sparse()\r\neig_vals, eig_vecs = eigsh(H_bhsparse, k=1, which=\"SA\")\r\nprint(\"eigenvalues with scipy sparse:\", eig_vals)\r\n\r\n# lanczos computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space\r\nE_bh2, ket_bh2 = nk.exact.lanczos_ed(H_bh2, compute_eigenvectors=True)\r\nprint(\"Exact ground state energy = {0:.3f}\".format(E_bh2[0]))\r\n\r\n# full computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space\r\nE_bhfull2= nk.exact.full_ed(H_bh2)\r\nprint(\"Exact ground state energy = {0:.3f}\".format(E_bhfull2[0]))\r\n\r\n# numpy.linalg computation of the eigenvalues of the Hamiltonian with the Fock Hilbert space\r\nM_bh2 = H_bh2.to_dense()\r\nE_bhla2 = min(np.linalg.eig(M_bh2)[0])\r\nprint(\"Ground state energy:\", E_bhla2.real)\r\n\r\n# scipy sparse computation of the eigenvalues of the Hamiltonian with the extended Hilbert space\r\nH_bhsparse2 = H_bh2.to_sparse()\r\neig_vals2, eig_vecs2 = eigsh(H_bhsparse2, k=1, which=\"SA\")\r\nprint(\"eigenvalues with scipy sparse:\", eig_vals2)\r\n```\n", "before_files": [{"content": "# Copyright 2022 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains functions generating the numba-packed representation of local\noperators.\n\"\"\"\n\nimport numpy as np\nimport numba\n\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils.types import DType\n\n\ndef pack_internals(\n hilbert: AbstractHilbert,\n operators_dict: dict,\n constant,\n dtype: DType,\n mel_cutoff: float,\n):\n \"\"\"\n Take the internal lazy representation of a local operator and returns the arrays\n needed for the numba implementation.\n\n This takes as input a dictionary with Tuples as keys, the `acting_on` and matrices as values.\n The keys represent the sites upon which the matrix acts.\n It is assumed that the integer in the tuples are sorted.\n\n Returns a dictionary with all the data fields\n \"\"\"\n op_acting_on = list(operators_dict.keys())\n operators = list(operators_dict.values())\n n_operators = len(operators_dict)\n\n \"\"\"Analyze the operator strings and precompute arrays for get_conn inference\"\"\"\n acting_size = np.array([len(aon) for aon in op_acting_on], dtype=np.intp)\n max_acting_on_sz = np.max(acting_size)\n max_local_hilbert_size = max(\n [max(map(hilbert.size_at_index, aon)) for aon in op_acting_on]\n )\n max_op_size = max(map(lambda x: x.shape[0], operators))\n\n acting_on = np.full((n_operators, max_acting_on_sz), -1, dtype=np.intp)\n for (i, aon) in enumerate(op_acting_on):\n acting_on[i][: len(aon)] = aon\n\n local_states = np.full(\n (n_operators, max_acting_on_sz, max_local_hilbert_size), np.nan\n )\n basis = np.full((n_operators, max_acting_on_sz), 1e10, dtype=np.int64)\n\n diag_mels = np.full((n_operators, max_op_size), np.nan, dtype=dtype)\n mels = np.full(\n (n_operators, max_op_size, max_op_size - 1),\n np.nan,\n dtype=dtype,\n )\n x_prime = np.full(\n (n_operators, max_op_size, max_op_size - 1, max_acting_on_sz),\n -1,\n dtype=np.float64,\n )\n n_conns = np.full((n_operators, max_op_size), -1, dtype=np.intp)\n\n for (i, (aon, op)) in enumerate(operators_dict.items()):\n aon_size = len(aon)\n n_local_states_per_site = np.asarray([hilbert.size_at_index(i) for i in aon])\n\n ## add an operator to local_states\n for (j, site) in enumerate(aon):\n local_states[i, j, : hilbert.shape[site]] = np.asarray(\n hilbert.states_at_index(site)\n )\n\n ba = 1\n for s in range(aon_size):\n basis[i, s] = ba\n ba *= hilbert.shape[aon_size - s - 1]\n\n # eventually could support sparse matrices\n # if isinstance(op, sparse.spmatrix):\n # op = op.todense()\n\n _append_matrix(\n op,\n diag_mels[i],\n mels[i],\n x_prime[i],\n n_conns[i],\n aon_size,\n local_states[i],\n mel_cutoff,\n n_local_states_per_site,\n )\n\n nonzero_diagonal = (\n np.any(np.abs(diag_mels) >= mel_cutoff) or np.abs(constant) >= mel_cutoff\n )\n\n max_conn_size = 1 if nonzero_diagonal else 0\n for op in operators:\n nnz_mat = np.abs(op) > mel_cutoff\n nnz_mat[np.diag_indices(nnz_mat.shape[0])] = False\n nnz_rows = np.sum(nnz_mat, axis=1)\n max_conn_size += np.max(nnz_rows)\n\n return {\n \"acting_on\": acting_on,\n \"acting_size\": acting_size,\n \"diag_mels\": diag_mels,\n \"mels\": mels,\n \"x_prime\": x_prime,\n \"n_conns\": n_conns,\n \"local_states\": local_states,\n \"basis\": basis,\n \"nonzero_diagonal\": nonzero_diagonal,\n \"max_conn_size\": max_conn_size,\n }\n\n\[email protected](nopython=True)\ndef _append_matrix(\n operator,\n diag_mels,\n mels,\n x_prime,\n n_conns,\n acting_size,\n local_states_per_site,\n epsilon,\n hilb_size_per_site,\n):\n op_size = operator.shape[0]\n assert op_size == operator.shape[1]\n for i in range(op_size):\n diag_mels[i] = operator[i, i]\n n_conns[i] = 0\n for j in range(op_size):\n if i != j and np.abs(operator[i, j]) > epsilon:\n k_conn = n_conns[i]\n mels[i, k_conn] = operator[i, j]\n _number_to_state(\n j,\n hilb_size_per_site,\n local_states_per_site[:acting_size, :],\n x_prime[i, k_conn, :acting_size],\n )\n n_conns[i] += 1\n\n\[email protected](nopython=True)\ndef _number_to_state(number, hilbert_size_per_site, local_states_per_site, out):\n\n out[:] = local_states_per_site[:, 0]\n size = out.shape[0]\n\n ip = number\n k = size - 1\n while ip > 0:\n local_size = hilbert_size_per_site[k]\n out[k] = local_states_per_site[k, ip % local_size]\n ip = ip // local_size\n k -= 1\n\n return out\n", "path": "netket/operator/_local_operator_compile_helpers.py"}], "after_files": [{"content": "# Copyright 2022 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains functions generating the numba-packed representation of local\noperators.\n\"\"\"\n\nimport numpy as np\nimport numba\n\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils.types import DType\n\n\ndef pack_internals(\n hilbert: AbstractHilbert,\n operators_dict: dict,\n constant,\n dtype: DType,\n mel_cutoff: float,\n):\n \"\"\"\n Take the internal lazy representation of a local operator and returns the arrays\n needed for the numba implementation.\n\n This takes as input a dictionary with Tuples as keys, the `acting_on` and matrices as values.\n The keys represent the sites upon which the matrix acts.\n It is assumed that the integer in the tuples are sorted.\n\n Returns a dictionary with all the data fields\n \"\"\"\n op_acting_on = list(operators_dict.keys())\n operators = list(operators_dict.values())\n n_operators = len(operators_dict)\n\n \"\"\"Analyze the operator strings and precompute arrays for get_conn inference\"\"\"\n acting_size = np.array([len(aon) for aon in op_acting_on], dtype=np.intp)\n max_acting_on_sz = np.max(acting_size)\n max_local_hilbert_size = max(\n [max(map(hilbert.size_at_index, aon)) for aon in op_acting_on]\n )\n max_op_size = max(map(lambda x: x.shape[0], operators))\n\n acting_on = np.full((n_operators, max_acting_on_sz), -1, dtype=np.intp)\n for (i, aon) in enumerate(op_acting_on):\n acting_on[i][: len(aon)] = aon\n\n local_states = np.full(\n (n_operators, max_acting_on_sz, max_local_hilbert_size), np.nan\n )\n basis = np.full((n_operators, max_acting_on_sz), 1e10, dtype=np.int64)\n\n diag_mels = np.full((n_operators, max_op_size), np.nan, dtype=dtype)\n mels = np.full(\n (n_operators, max_op_size, max_op_size - 1),\n np.nan,\n dtype=dtype,\n )\n x_prime = np.full(\n (n_operators, max_op_size, max_op_size - 1, max_acting_on_sz),\n -1,\n dtype=np.float64,\n )\n n_conns = np.full((n_operators, max_op_size), -1, dtype=np.intp)\n\n for (i, (aon, op)) in enumerate(operators_dict.items()):\n aon_size = len(aon)\n n_local_states_per_site = np.asarray([hilbert.size_at_index(i) for i in aon])\n\n ## add an operator to local_states\n for (j, site) in enumerate(aon):\n local_states[i, j, : hilbert.shape[site]] = np.asarray(\n hilbert.states_at_index(site)\n )\n\n ba = 1\n for s in range(aon_size):\n basis[i, s] = ba\n ba *= hilbert.shape[aon[aon_size - s - 1]]\n\n # eventually could support sparse matrices\n # if isinstance(op, sparse.spmatrix):\n # op = op.todense()\n\n _append_matrix(\n op,\n diag_mels[i],\n mels[i],\n x_prime[i],\n n_conns[i],\n aon_size,\n local_states[i],\n mel_cutoff,\n n_local_states_per_site,\n )\n\n nonzero_diagonal = (\n np.any(np.abs(diag_mels) >= mel_cutoff) or np.abs(constant) >= mel_cutoff\n )\n\n max_conn_size = 1 if nonzero_diagonal else 0\n for op in operators:\n nnz_mat = np.abs(op) > mel_cutoff\n nnz_mat[np.diag_indices(nnz_mat.shape[0])] = False\n nnz_rows = np.sum(nnz_mat, axis=1)\n max_conn_size += np.max(nnz_rows)\n\n return {\n \"acting_on\": acting_on,\n \"acting_size\": acting_size,\n \"diag_mels\": diag_mels,\n \"mels\": mels,\n \"x_prime\": x_prime,\n \"n_conns\": n_conns,\n \"local_states\": local_states,\n \"basis\": basis,\n \"nonzero_diagonal\": nonzero_diagonal,\n \"max_conn_size\": max_conn_size,\n }\n\n\[email protected](nopython=True)\ndef _append_matrix(\n operator,\n diag_mels,\n mels,\n x_prime,\n n_conns,\n acting_size,\n local_states_per_site,\n epsilon,\n hilb_size_per_site,\n):\n op_size = operator.shape[0]\n assert op_size == operator.shape[1]\n for i in range(op_size):\n diag_mels[i] = operator[i, i]\n n_conns[i] = 0\n for j in range(op_size):\n if i != j and np.abs(operator[i, j]) > epsilon:\n k_conn = n_conns[i]\n mels[i, k_conn] = operator[i, j]\n _number_to_state(\n j,\n hilb_size_per_site,\n local_states_per_site[:acting_size, :],\n x_prime[i, k_conn, :acting_size],\n )\n n_conns[i] += 1\n\n\[email protected](nopython=True)\ndef _number_to_state(number, hilbert_size_per_site, local_states_per_site, out):\n\n out[:] = local_states_per_site[:, 0]\n size = out.shape[0]\n\n ip = number\n k = size - 1\n while ip > 0:\n local_size = hilbert_size_per_site[k]\n out[k] = local_states_per_site[k, ip % local_size]\n ip = ip // local_size\n k -= 1\n\n return out\n", "path": "netket/operator/_local_operator_compile_helpers.py"}]}
3,372
138
gh_patches_debug_31447
rasdani/github-patches
git_diff
sunpy__sunpy-1551
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove the need to have astropy installed before installing SunPy Currently you can not have a clean python environment and do a `pip install sunpy` you have to have astropy + numpy installed first. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/io/setup_package.py` Content: ``` 1 from __future__ import absolute_import 2 3 import os 4 import platform 5 6 from distutils.core import Extension 7 from glob import glob 8 9 from astropy_helpers import setup_helpers 10 from astropy.extern import six 11 12 13 def get_extensions(): 14 15 if platform.system() == 'Windows' or six.PY3: 16 return list() 17 else: 18 # 'numpy' will be replaced with the proper path to the numpy includes 19 cfg = setup_helpers.DistutilsExtensionArgs() 20 cfg['include_dirs'].append('numpy') 21 cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c'))) 22 cfg['extra_compile_args'].extend(['-std=c99', '-O3']) 23 # Squash some warnings 24 cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable', 25 '-Wno-unused-variable', 26 '-Wno-unused-result']) 27 28 e = Extension('sunpy.io._pyana', **cfg) 29 return [e] 30 31 def requires_2to3(): 32 return False 33 ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # This file is based havily on the astropy version here: 3 # https://github.com/astropy/package-template/blob/master/setup.py 4 # Which is licensed under the astropy license. 5 6 import glob 7 import os 8 import sys 9 10 import ah_bootstrap 11 from setuptools import setup 12 13 # A dirty hack to get around some early import/configurations ambiguities 14 if sys.version_info[0] >= 3: 15 import builtins 16 else: 17 import __builtin__ as builtins 18 builtins._ASTROPY_SETUP_ = True 19 20 # -- Read the Docs Setup ----------------------------------------------------- 21 22 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 23 24 if on_rtd: 25 os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/' 26 os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/' 27 28 from astropy_helpers.setup_helpers import ( 29 register_commands, adjust_compiler, get_debug_option, get_package_info) 30 from astropy_helpers.git_helpers import get_git_devstr 31 from astropy_helpers.version_helpers import generate_version_py 32 from sunpy.tests.setup_command import SunPyTest 33 34 # Get some values from the setup.cfg 35 from distutils import config 36 conf = config.ConfigParser() 37 conf.read(['setup.cfg']) 38 metadata = dict(conf.items('metadata')) 39 40 PACKAGENAME = metadata.get('package_name', 'packagename') 41 DESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics') 42 AUTHOR = metadata.get('author', 'The SunPy Community') 43 AUTHOR_EMAIL = metadata.get('author_email', '[email protected]') 44 LICENSE = metadata.get('license', 'BSD 2-Clause') 45 URL = metadata.get('url', 'http://sunpy.org') 46 47 LONG_DESCRIPTION = "SunPy is a Python library for solar physics data analysis." 48 49 # Store the package name in a built-in variable so it's easy 50 # to get from other parts of the setup infrastructure 51 builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME 52 53 # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) 54 VERSION = '0.7.dev' 55 56 # Indicates if this version is a release version 57 RELEASE = 'dev' not in VERSION 58 59 if not RELEASE: 60 VERSION += get_git_devstr(False) 61 62 # Populate the dict of setup command overrides; this should be done before 63 # invoking any other functionality from distutils since it can potentially 64 # modify distutils' behavior. 65 cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) 66 67 # Overwrite the Astropy Testing framework 68 cmdclassd['test'] = type('SunPyTest', (SunPyTest,), 69 {'package_name': 'sunpy'}) 70 71 # Adjust the compiler in case the default on this platform is to use a 72 # broken one. 73 adjust_compiler(PACKAGENAME) 74 75 # Freeze build information in version.py 76 generate_version_py(PACKAGENAME, VERSION, RELEASE, 77 get_debug_option(PACKAGENAME)) 78 79 # Treat everything in scripts except README.rst as a script to be installed 80 scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) 81 if os.path.basename(fname) != 'README.rst'] 82 83 84 # Get configuration information from all of the various subpackages. 85 # See the docstring for setup_helpers.update_package_files for more 86 # details. 87 package_info = get_package_info() 88 89 # Add the project-global data 90 package_info['package_data'].setdefault(PACKAGENAME, []) 91 92 # Include all .c files, recursively, including those generated by 93 # Cython, since we can not do this in MANIFEST.in with a "dynamic" 94 # directory name. 95 c_files = [] 96 for root, dirs, files in os.walk(PACKAGENAME): 97 for filename in files: 98 if filename.endswith('.c'): 99 c_files.append( 100 os.path.join( 101 os.path.relpath(root, PACKAGENAME), filename)) 102 package_info['package_data'][PACKAGENAME].extend(c_files) 103 104 extras_require = {'database': ["sqlalchemy"], 105 'image': ["scikit-image"], 106 'jpeg2000': ["glymur"], 107 'net': ["suds", "beautifulsoup4", "requests"]} 108 extras_require['all'] = extras_require['database'] + extras_require['image'] + \ 109 extras_require['net'] + ["wcsaxes>=0.6"] 110 111 setup(name=PACKAGENAME, 112 version=VERSION, 113 description=DESCRIPTION, 114 scripts=scripts, 115 setup_requires=['numpy>1.7.1'], 116 install_requires=['numpy>1.7.1', 117 'astropy>=1.0.0', 118 'scipy', 119 'pandas>=0.12.0', 120 'matplotlib>=1.1'], 121 extras_require=extras_require, 122 provides=[PACKAGENAME], 123 author=AUTHOR, 124 author_email=AUTHOR_EMAIL, 125 license=LICENSE, 126 url=URL, 127 long_description=LONG_DESCRIPTION, 128 cmdclass=cmdclassd, 129 zip_safe=False, 130 use_2to3=False, 131 include_package_data=True, 132 **package_info 133 ) 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,7 +29,6 @@ register_commands, adjust_compiler, get_debug_option, get_package_info) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py -from sunpy.tests.setup_command import SunPyTest # Get some values from the setup.cfg from distutils import config @@ -64,9 +63,14 @@ # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) -# Overwrite the Astropy Testing framework -cmdclassd['test'] = type('SunPyTest', (SunPyTest,), - {'package_name': 'sunpy'}) +try: + from sunpy.tests.setup_command import SunPyTest + # Overwrite the Astropy Testing framework + cmdclassd['test'] = type('SunPyTest', (SunPyTest,), + {'package_name': 'sunpy'}) +except Exception: + # Catch everything, if it doesn't work, we still want SunPy to install. + pass # Adjust the compiler in case the default on this platform is to use a # broken one. diff --git a/sunpy/io/setup_package.py b/sunpy/io/setup_package.py --- a/sunpy/io/setup_package.py +++ b/sunpy/io/setup_package.py @@ -1,18 +1,18 @@ from __future__ import absolute_import import os +import sys import platform from distutils.core import Extension from glob import glob from astropy_helpers import setup_helpers -from astropy.extern import six def get_extensions(): - if platform.system() == 'Windows' or six.PY3: + if platform.system() == 'Windows' or sys.version_info.major == 3: return list() else: # 'numpy' will be replaced with the proper path to the numpy includes
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,6 @@\n register_commands, adjust_compiler, get_debug_option, get_package_info)\n from astropy_helpers.git_helpers import get_git_devstr\n from astropy_helpers.version_helpers import generate_version_py\n-from sunpy.tests.setup_command import SunPyTest\n \n # Get some values from the setup.cfg\n from distutils import config\n@@ -64,9 +63,14 @@\n # modify distutils' behavior.\n cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n \n-# Overwrite the Astropy Testing framework\n-cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n- {'package_name': 'sunpy'})\n+try:\n+ from sunpy.tests.setup_command import SunPyTest\n+ # Overwrite the Astropy Testing framework\n+ cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n+ {'package_name': 'sunpy'})\n+except Exception:\n+ # Catch everything, if it doesn't work, we still want SunPy to install.\n+ pass\n \n # Adjust the compiler in case the default on this platform is to use a\n # broken one.\ndiff --git a/sunpy/io/setup_package.py b/sunpy/io/setup_package.py\n--- a/sunpy/io/setup_package.py\n+++ b/sunpy/io/setup_package.py\n@@ -1,18 +1,18 @@\n from __future__ import absolute_import\n \n import os\n+import sys\n import platform\n \n from distutils.core import Extension\n from glob import glob\n \n from astropy_helpers import setup_helpers\n-from astropy.extern import six\n \n \n def get_extensions():\n \n- if platform.system() == 'Windows' or six.PY3:\n+ if platform.system() == 'Windows' or sys.version_info.major == 3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n", "issue": "Remove the need to have astropy installed before installing SunPy\nCurrently you can not have a clean python environment and do a `pip install sunpy` you have to have astropy + numpy installed first.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom distutils.core import Extension\nfrom glob import glob\n\nfrom astropy_helpers import setup_helpers\nfrom astropy.extern import six\n\n\ndef get_extensions():\n\n if platform.system() == 'Windows' or six.PY3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n cfg = setup_helpers.DistutilsExtensionArgs()\n cfg['include_dirs'].append('numpy')\n cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c')))\n cfg['extra_compile_args'].extend(['-std=c99', '-O3'])\n # Squash some warnings\n cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable',\n '-Wno-unused-variable',\n '-Wno-unused-result'])\n\n e = Extension('sunpy.io._pyana', **cfg)\n return [e]\n\ndef requires_2to3():\n return False\n", "path": "sunpy/io/setup_package.py"}, {"content": "#!/usr/bin/env python\n# This file is based havily on the astropy version here:\n# https://github.com/astropy/package-template/blob/master/setup.py\n# Which is licensed under the astropy license.\n\nimport glob\nimport os\nimport sys\n\nimport ah_bootstrap\nfrom setuptools import setup\n\n# A dirty hack to get around some early import/configurations ambiguities\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nbuiltins._ASTROPY_SETUP_ = True\n\n# -- Read the Docs Setup -----------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n\nfrom astropy_helpers.setup_helpers import (\n register_commands, adjust_compiler, get_debug_option, get_package_info)\nfrom astropy_helpers.git_helpers import get_git_devstr\nfrom astropy_helpers.version_helpers import generate_version_py\nfrom sunpy.tests.setup_command import SunPyTest\n\n# Get some values from the setup.cfg\nfrom distutils import config\nconf = config.ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'packagename')\nDESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')\nAUTHOR = metadata.get('author', 'The SunPy Community')\nAUTHOR_EMAIL = metadata.get('author_email', '[email protected]')\nLICENSE = metadata.get('license', 'BSD 2-Clause')\nURL = metadata.get('url', 'http://sunpy.org')\n\nLONG_DESCRIPTION = \"SunPy is a Python library for solar physics data analysis.\"\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)\nVERSION = '0.7.dev'\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behavior.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\n# Overwrite the Astropy Testing framework\ncmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n {'package_name': 'sunpy'})\n\n# Adjust the compiler in case the default on this platform is to use a\n# broken one.\nadjust_compiler(PACKAGENAME)\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README.rst as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if os.path.basename(fname) != 'README.rst']\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\nextras_require = {'database': [\"sqlalchemy\"],\n 'image': [\"scikit-image\"],\n 'jpeg2000': [\"glymur\"],\n 'net': [\"suds\", \"beautifulsoup4\", \"requests\"]}\nextras_require['all'] = extras_require['database'] + extras_require['image'] + \\\n extras_require['net'] + [\"wcsaxes>=0.6\"]\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=['numpy>1.7.1'],\n install_requires=['numpy>1.7.1',\n 'astropy>=1.0.0',\n 'scipy',\n 'pandas>=0.12.0',\n 'matplotlib>=1.1'],\n extras_require=extras_require,\n provides=[PACKAGENAME],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n **package_info\n )\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport sys\nimport platform\n\nfrom distutils.core import Extension\nfrom glob import glob\n\nfrom astropy_helpers import setup_helpers\n\n\ndef get_extensions():\n\n if platform.system() == 'Windows' or sys.version_info.major == 3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n cfg = setup_helpers.DistutilsExtensionArgs()\n cfg['include_dirs'].append('numpy')\n cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c')))\n cfg['extra_compile_args'].extend(['-std=c99', '-O3'])\n # Squash some warnings\n cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable',\n '-Wno-unused-variable',\n '-Wno-unused-result'])\n\n e = Extension('sunpy.io._pyana', **cfg)\n return [e]\n\ndef requires_2to3():\n return False\n", "path": "sunpy/io/setup_package.py"}, {"content": "#!/usr/bin/env python\n# This file is based havily on the astropy version here:\n# https://github.com/astropy/package-template/blob/master/setup.py\n# Which is licensed under the astropy license.\n\nimport glob\nimport os\nimport sys\n\nimport ah_bootstrap\nfrom setuptools import setup\n\n# A dirty hack to get around some early import/configurations ambiguities\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nbuiltins._ASTROPY_SETUP_ = True\n\n# -- Read the Docs Setup -----------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n\nfrom astropy_helpers.setup_helpers import (\n register_commands, adjust_compiler, get_debug_option, get_package_info)\nfrom astropy_helpers.git_helpers import get_git_devstr\nfrom astropy_helpers.version_helpers import generate_version_py\n\n# Get some values from the setup.cfg\nfrom distutils import config\nconf = config.ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'packagename')\nDESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')\nAUTHOR = metadata.get('author', 'The SunPy Community')\nAUTHOR_EMAIL = metadata.get('author_email', '[email protected]')\nLICENSE = metadata.get('license', 'BSD 2-Clause')\nURL = metadata.get('url', 'http://sunpy.org')\n\nLONG_DESCRIPTION = \"SunPy is a Python library for solar physics data analysis.\"\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)\nVERSION = '0.7.dev'\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behavior.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\ntry:\n from sunpy.tests.setup_command import SunPyTest\n # Overwrite the Astropy Testing framework\n cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n {'package_name': 'sunpy'})\nexcept Exception:\n # Catch everything, if it doesn't work, we still want SunPy to install.\n pass\n\n# Adjust the compiler in case the default on this platform is to use a\n# broken one.\nadjust_compiler(PACKAGENAME)\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README.rst as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if os.path.basename(fname) != 'README.rst']\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\nextras_require = {'database': [\"sqlalchemy\"],\n 'image': [\"scikit-image\"],\n 'jpeg2000': [\"glymur\"],\n 'net': [\"suds\", \"beautifulsoup4\", \"requests\"]}\nextras_require['all'] = extras_require['database'] + extras_require['image'] + \\\n extras_require['net'] + [\"wcsaxes>=0.6\"]\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=['numpy>1.7.1'],\n install_requires=['numpy>1.7.1',\n 'astropy>=1.0.0',\n 'scipy',\n 'pandas>=0.12.0',\n 'matplotlib>=1.1'],\n extras_require=extras_require,\n provides=[PACKAGENAME],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n **package_info\n )\n", "path": "setup.py"}]}
2,017
438
gh_patches_debug_44613
rasdani/github-patches
git_diff
encode__starlette-1377
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StaticFiles middleware doesn't follow symlinks ### Checklist - [x] The bug is reproducible against the latest release and/or `master`. - [x] There are no similar issues or pull requests to fix it yet. ### Describe the bug The StaticFiles middleware is checking the `os.realpath` of a file and returning a 404 for symlinks that lead outside the static directory. ### To reproduce 1. create a minimal app with a staticfiles middleware 1. put a symlink in your static directory. the link's target must be above the static directory. 1. you'll get a 404 ### Expected behavior Support symlinks in static directory. The use case for symlinks in static is to target frontend assets that are being generated in file-watch mode. ### Actual behavior 404. ### Debugging material It's happening here: https://github.com/encode/starlette/blob/b95acea973c20eea3e7cbbca42d09b1f5d4a3412/starlette/staticfiles.py#L147-L149 ### Environment - OS: linux - Python version: 3.7.5 - Starlette version: 0.13.8 ### Additional context I'm happy to post a PR for this if useful, ideally adding a bool param to the StaticFiles middleware that allows symlinks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlette/staticfiles.py` Content: ``` 1 import importlib.util 2 import os 3 import stat 4 import typing 5 from email.utils import parsedate 6 7 import anyio 8 9 from starlette.datastructures import URL, Headers 10 from starlette.exceptions import HTTPException 11 from starlette.responses import FileResponse, RedirectResponse, Response 12 from starlette.types import Receive, Scope, Send 13 14 PathLike = typing.Union[str, "os.PathLike[str]"] 15 16 17 class NotModifiedResponse(Response): 18 NOT_MODIFIED_HEADERS = ( 19 "cache-control", 20 "content-location", 21 "date", 22 "etag", 23 "expires", 24 "vary", 25 ) 26 27 def __init__(self, headers: Headers): 28 super().__init__( 29 status_code=304, 30 headers={ 31 name: value 32 for name, value in headers.items() 33 if name in self.NOT_MODIFIED_HEADERS 34 }, 35 ) 36 37 38 class StaticFiles: 39 def __init__( 40 self, 41 *, 42 directory: typing.Optional[PathLike] = None, 43 packages: typing.Optional[ 44 typing.List[typing.Union[str, typing.Tuple[str, str]]] 45 ] = None, 46 html: bool = False, 47 check_dir: bool = True, 48 ) -> None: 49 self.directory = directory 50 self.packages = packages 51 self.all_directories = self.get_directories(directory, packages) 52 self.html = html 53 self.config_checked = False 54 if check_dir and directory is not None and not os.path.isdir(directory): 55 raise RuntimeError(f"Directory '{directory}' does not exist") 56 57 def get_directories( 58 self, 59 directory: typing.Optional[PathLike] = None, 60 packages: typing.Optional[ 61 typing.List[typing.Union[str, typing.Tuple[str, str]]] 62 ] = None, 63 ) -> typing.List[PathLike]: 64 """ 65 Given `directory` and `packages` arguments, return a list of all the 66 directories that should be used for serving static files from. 67 """ 68 directories = [] 69 if directory is not None: 70 directories.append(directory) 71 72 for package in packages or []: 73 if isinstance(package, tuple): 74 package, statics_dir = package 75 else: 76 statics_dir = "statics" 77 spec = importlib.util.find_spec(package) 78 assert spec is not None, f"Package {package!r} could not be found." 79 assert spec.origin is not None, f"Package {package!r} could not be found." 80 package_directory = os.path.normpath( 81 os.path.join(spec.origin, "..", statics_dir) 82 ) 83 assert os.path.isdir( 84 package_directory 85 ), f"Directory '{statics_dir!r}' in package {package!r} could not be found." 86 directories.append(package_directory) 87 88 return directories 89 90 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 91 """ 92 The ASGI entry point. 93 """ 94 assert scope["type"] == "http" 95 96 if not self.config_checked: 97 await self.check_config() 98 self.config_checked = True 99 100 path = self.get_path(scope) 101 response = await self.get_response(path, scope) 102 await response(scope, receive, send) 103 104 def get_path(self, scope: Scope) -> str: 105 """ 106 Given the ASGI scope, return the `path` string to serve up, 107 with OS specific path separators, and any '..', '.' components removed. 108 """ 109 return os.path.normpath(os.path.join(*scope["path"].split("/"))) 110 111 async def get_response(self, path: str, scope: Scope) -> Response: 112 """ 113 Returns an HTTP response, given the incoming path, method and request headers. 114 """ 115 if scope["method"] not in ("GET", "HEAD"): 116 raise HTTPException(status_code=405) 117 118 try: 119 full_path, stat_result = await anyio.to_thread.run_sync( 120 self.lookup_path, path 121 ) 122 except PermissionError: 123 raise HTTPException(status_code=401) 124 except OSError: 125 raise 126 127 if stat_result and stat.S_ISREG(stat_result.st_mode): 128 # We have a static file to serve. 129 return self.file_response(full_path, stat_result, scope) 130 131 elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: 132 # We're in HTML mode, and have got a directory URL. 133 # Check if we have 'index.html' file to serve. 134 index_path = os.path.join(path, "index.html") 135 full_path, stat_result = await anyio.to_thread.run_sync( 136 self.lookup_path, index_path 137 ) 138 if stat_result is not None and stat.S_ISREG(stat_result.st_mode): 139 if not scope["path"].endswith("/"): 140 # Directory URLs should redirect to always end in "/". 141 url = URL(scope=scope) 142 url = url.replace(path=url.path + "/") 143 return RedirectResponse(url=url) 144 return self.file_response(full_path, stat_result, scope) 145 146 if self.html: 147 # Check for '404.html' if we're in HTML mode. 148 full_path, stat_result = await anyio.to_thread.run_sync( 149 self.lookup_path, "404.html" 150 ) 151 if stat_result and stat.S_ISREG(stat_result.st_mode): 152 return FileResponse( 153 full_path, 154 stat_result=stat_result, 155 method=scope["method"], 156 status_code=404, 157 ) 158 raise HTTPException(status_code=404) 159 160 def lookup_path( 161 self, path: str 162 ) -> typing.Tuple[str, typing.Optional[os.stat_result]]: 163 for directory in self.all_directories: 164 full_path = os.path.realpath(os.path.join(directory, path)) 165 directory = os.path.realpath(directory) 166 if os.path.commonprefix([full_path, directory]) != directory: 167 # Don't allow misbehaving clients to break out of the static files 168 # directory. 169 continue 170 try: 171 return full_path, os.stat(full_path) 172 except (FileNotFoundError, NotADirectoryError): 173 continue 174 return "", None 175 176 def file_response( 177 self, 178 full_path: PathLike, 179 stat_result: os.stat_result, 180 scope: Scope, 181 status_code: int = 200, 182 ) -> Response: 183 method = scope["method"] 184 request_headers = Headers(scope=scope) 185 186 response = FileResponse( 187 full_path, status_code=status_code, stat_result=stat_result, method=method 188 ) 189 if self.is_not_modified(response.headers, request_headers): 190 return NotModifiedResponse(response.headers) 191 return response 192 193 async def check_config(self) -> None: 194 """ 195 Perform a one-off configuration check that StaticFiles is actually 196 pointed at a directory, so that we can raise loud errors rather than 197 just returning 404 responses. 198 """ 199 if self.directory is None: 200 return 201 202 try: 203 stat_result = await anyio.to_thread.run_sync(os.stat, self.directory) 204 except FileNotFoundError: 205 raise RuntimeError( 206 f"StaticFiles directory '{self.directory}' does not exist." 207 ) 208 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)): 209 raise RuntimeError( 210 f"StaticFiles path '{self.directory}' is not a directory." 211 ) 212 213 def is_not_modified( 214 self, response_headers: Headers, request_headers: Headers 215 ) -> bool: 216 """ 217 Given the request and response headers, return `True` if an HTTP 218 "Not Modified" response could be returned instead. 219 """ 220 try: 221 if_none_match = request_headers["if-none-match"] 222 etag = response_headers["etag"] 223 if if_none_match == etag: 224 return True 225 except KeyError: 226 pass 227 228 try: 229 if_modified_since = parsedate(request_headers["if-modified-since"]) 230 last_modified = parsedate(response_headers["last-modified"]) 231 if ( 232 if_modified_since is not None 233 and last_modified is not None 234 and if_modified_since >= last_modified 235 ): 236 return True 237 except KeyError: 238 pass 239 240 return False 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py --- a/starlette/staticfiles.py +++ b/starlette/staticfiles.py @@ -3,6 +3,7 @@ import stat import typing from email.utils import parsedate +from pathlib import Path import anyio @@ -51,7 +52,7 @@ self.all_directories = self.get_directories(directory, packages) self.html = html self.config_checked = False - if check_dir and directory is not None and not os.path.isdir(directory): + if check_dir and directory is not None and not Path(directory).is_dir(): raise RuntimeError(f"Directory '{directory}' does not exist") def get_directories( @@ -77,11 +78,9 @@ spec = importlib.util.find_spec(package) assert spec is not None, f"Package {package!r} could not be found." assert spec.origin is not None, f"Package {package!r} could not be found." - package_directory = os.path.normpath( - os.path.join(spec.origin, "..", statics_dir) - ) - assert os.path.isdir( - package_directory + package_directory = Path(spec.origin).joinpath("..", statics_dir).resolve() + assert ( + package_directory.is_dir() ), f"Directory '{statics_dir!r}' in package {package!r} could not be found." directories.append(package_directory) @@ -101,14 +100,14 @@ response = await self.get_response(path, scope) await response(scope, receive, send) - def get_path(self, scope: Scope) -> str: + def get_path(self, scope: Scope) -> Path: """ Given the ASGI scope, return the `path` string to serve up, with OS specific path separators, and any '..', '.' components removed. """ - return os.path.normpath(os.path.join(*scope["path"].split("/"))) + return Path(*scope["path"].split("/")) - async def get_response(self, path: str, scope: Scope) -> Response: + async def get_response(self, path: Path, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ @@ -131,7 +130,7 @@ elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. - index_path = os.path.join(path, "index.html") + index_path = path.joinpath("index.html") full_path, stat_result = await anyio.to_thread.run_sync( self.lookup_path, index_path ) @@ -158,20 +157,25 @@ raise HTTPException(status_code=404) def lookup_path( - self, path: str - ) -> typing.Tuple[str, typing.Optional[os.stat_result]]: + self, path: Path + ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]: for directory in self.all_directories: - full_path = os.path.realpath(os.path.join(directory, path)) - directory = os.path.realpath(directory) - if os.path.commonprefix([full_path, directory]) != directory: - # Don't allow misbehaving clients to break out of the static files - # directory. - continue + original_path = Path(directory).joinpath(path) + full_path = original_path.resolve() + directory = Path(directory).resolve() try: - return full_path, os.stat(full_path) + stat_result = os.lstat(original_path) + full_path.relative_to(directory) + return full_path, stat_result + except ValueError: + # Allow clients to break out of the static files directory + # if following symlinks. + if stat.S_ISLNK(stat_result.st_mode): + stat_result = os.lstat(full_path) + return full_path, stat_result except (FileNotFoundError, NotADirectoryError): continue - return "", None + return Path(), None def file_response( self,
{"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -3,6 +3,7 @@\n import stat\n import typing\n from email.utils import parsedate\n+from pathlib import Path\n \n import anyio\n \n@@ -51,7 +52,7 @@\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n- if check_dir and directory is not None and not os.path.isdir(directory):\n+ if check_dir and directory is not None and not Path(directory).is_dir():\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n \n def get_directories(\n@@ -77,11 +78,9 @@\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert spec.origin is not None, f\"Package {package!r} could not be found.\"\n- package_directory = os.path.normpath(\n- os.path.join(spec.origin, \"..\", statics_dir)\n- )\n- assert os.path.isdir(\n- package_directory\n+ package_directory = Path(spec.origin).joinpath(\"..\", statics_dir).resolve()\n+ assert (\n+ package_directory.is_dir()\n ), f\"Directory '{statics_dir!r}' in package {package!r} could not be found.\"\n directories.append(package_directory)\n \n@@ -101,14 +100,14 @@\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n \n- def get_path(self, scope: Scope) -> str:\n+ def get_path(self, scope: Scope) -> Path:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path separators, and any '..', '.' components removed.\n \"\"\"\n- return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n+ return Path(*scope[\"path\"].split(\"/\"))\n \n- async def get_response(self, path: str, scope: Scope) -> Response:\n+ async def get_response(self, path: Path, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n@@ -131,7 +130,7 @@\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n- index_path = os.path.join(path, \"index.html\")\n+ index_path = path.joinpath(\"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n@@ -158,20 +157,25 @@\n raise HTTPException(status_code=404)\n \n def lookup_path(\n- self, path: str\n- ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n+ self, path: Path\n+ ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n- full_path = os.path.realpath(os.path.join(directory, path))\n- directory = os.path.realpath(directory)\n- if os.path.commonprefix([full_path, directory]) != directory:\n- # Don't allow misbehaving clients to break out of the static files\n- # directory.\n- continue\n+ original_path = Path(directory).joinpath(path)\n+ full_path = original_path.resolve()\n+ directory = Path(directory).resolve()\n try:\n- return full_path, os.stat(full_path)\n+ stat_result = os.lstat(original_path)\n+ full_path.relative_to(directory)\n+ return full_path, stat_result\n+ except ValueError:\n+ # Allow clients to break out of the static files directory\n+ # if following symlinks.\n+ if stat.S_ISLNK(stat_result.st_mode):\n+ stat_result = os.lstat(full_path)\n+ return full_path, stat_result\n except (FileNotFoundError, NotADirectoryError):\n continue\n- return \"\", None\n+ return Path(), None\n \n def file_response(\n self,\n", "issue": "StaticFiles middleware doesn't follow symlinks\n### Checklist\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nThe StaticFiles middleware is checking the `os.realpath` of a file and returning a 404 for symlinks that lead outside the static directory.\r\n\r\n### To reproduce\r\n\r\n1. create a minimal app with a staticfiles middleware\r\n1. put a symlink in your static directory. the link's target must be above the static directory.\r\n1. you'll get a 404\r\n\r\n### Expected behavior\r\n\r\nSupport symlinks in static directory.\r\n\r\nThe use case for symlinks in static is to target frontend assets that are being generated in file-watch mode.\r\n\r\n### Actual behavior\r\n\r\n404.\r\n\r\n### Debugging material\r\n\r\nIt's happening here:\r\nhttps://github.com/encode/starlette/blob/b95acea973c20eea3e7cbbca42d09b1f5d4a3412/starlette/staticfiles.py#L147-L149\r\n\r\n### Environment\r\n\r\n- OS: linux\r\n- Python version: 3.7.5\r\n- Starlette version: 0.13.8\r\n\r\n### Additional context\r\n\r\nI'm happy to post a PR for this if useful, ideally adding a bool param to the StaticFiles middleware that allows symlinks.\n", "before_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nimport anyio\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import FileResponse, RedirectResponse, Response\nfrom starlette.types import Receive, Scope, Send\n\nPathLike = typing.Union[str, \"os.PathLike[str]\"]\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n ) -> typing.List[PathLike]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n if isinstance(package, tuple):\n package, statics_dir = package\n else:\n statics_dir = \"statics\"\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert spec.origin is not None, f\"Package {package!r} could not be found.\"\n package_directory = os.path.normpath(\n os.path.join(spec.origin, \"..\", statics_dir)\n )\n assert os.path.isdir(\n package_directory\n ), f\"Directory '{statics_dir!r}' in package {package!r} could not be found.\"\n directories.append(package_directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path separators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n raise HTTPException(status_code=405)\n\n try:\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, path\n )\n except PermissionError:\n raise HTTPException(status_code=401)\n except OSError:\n raise\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, \"404.html\"\n )\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n return FileResponse(\n full_path,\n stat_result=stat_result,\n method=scope[\"method\"],\n status_code=404,\n )\n raise HTTPException(status_code=404)\n\n def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.realpath(os.path.join(directory, path))\n directory = os.path.realpath(directory)\n if os.path.commonprefix([full_path, directory]) != directory:\n # Don't allow misbehaving clients to break out of the static files\n # directory.\n continue\n try:\n return full_path, os.stat(full_path)\n except (FileNotFoundError, NotADirectoryError):\n continue\n return \"\", None\n\n def file_response(\n self,\n full_path: PathLike,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}], "after_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\nfrom pathlib import Path\n\nimport anyio\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import FileResponse, RedirectResponse, Response\nfrom starlette.types import Receive, Scope, Send\n\nPathLike = typing.Union[str, \"os.PathLike[str]\"]\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not Path(directory).is_dir():\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n ) -> typing.List[PathLike]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n if isinstance(package, tuple):\n package, statics_dir = package\n else:\n statics_dir = \"statics\"\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert spec.origin is not None, f\"Package {package!r} could not be found.\"\n package_directory = Path(spec.origin).joinpath(\"..\", statics_dir).resolve()\n assert (\n package_directory.is_dir()\n ), f\"Directory '{statics_dir!r}' in package {package!r} could not be found.\"\n directories.append(package_directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> Path:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path separators, and any '..', '.' components removed.\n \"\"\"\n return Path(*scope[\"path\"].split(\"/\"))\n\n async def get_response(self, path: Path, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n raise HTTPException(status_code=405)\n\n try:\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, path\n )\n except PermissionError:\n raise HTTPException(status_code=401)\n except OSError:\n raise\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = path.joinpath(\"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, \"404.html\"\n )\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n return FileResponse(\n full_path,\n stat_result=stat_result,\n method=scope[\"method\"],\n status_code=404,\n )\n raise HTTPException(status_code=404)\n\n def lookup_path(\n self, path: Path\n ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n original_path = Path(directory).joinpath(path)\n full_path = original_path.resolve()\n directory = Path(directory).resolve()\n try:\n stat_result = os.lstat(original_path)\n full_path.relative_to(directory)\n return full_path, stat_result\n except ValueError:\n # Allow clients to break out of the static files directory\n # if following symlinks.\n if stat.S_ISLNK(stat_result.st_mode):\n stat_result = os.lstat(full_path)\n return full_path, stat_result\n except (FileNotFoundError, NotADirectoryError):\n continue\n return Path(), None\n\n def file_response(\n self,\n full_path: PathLike,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}]}
2,937
950
gh_patches_debug_24494
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-11
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- clean_names breaks for MultiIndex Columns MultiIndex columns returns `expected str not tuple` type error. If you instead used, `df.rename(columns=lambda x: x.lower().replace('_', ''))` this would work for standard and MultiIndex DataFrames. Can do PR if required. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/functions.py` Content: ``` 1 import datetime as dt 2 from functools import reduce 3 4 import pandas as pd 5 6 from .errors import JanitorError 7 8 import re 9 10 11 def clean_names(df): 12 """ 13 Clean column names. 14 15 Takes all column names, converts them to lowercase, then replaces all 16 spaces with underscores. 17 18 Functional usage example: 19 20 .. code-block:: python 21 22 df = clean_names(df) 23 24 Method chaining example: 25 26 .. code-block:: python 27 28 df = pd.DataFrame(...) 29 df = jn.DataFrame(df).clean_names() 30 31 :param df: The pandas DataFrame object. 32 :returns: A pandas DataFrame. 33 """ 34 columns = [(c.lower() 35 .replace(' ', '_') 36 .replace('/', '_') 37 .replace(':', '_') 38 .replace("'", '') 39 .replace('’', '') 40 .replace(',', '_') 41 .replace('?', '_') 42 .replace('-', '_') 43 .replace('(', '_') 44 .replace(')', '_') 45 .replace('.', '_') 46 ) for c in df.columns] 47 48 newcolumns = [] 49 for col in columns: 50 # NOTE: Replace repeating underscores with single ones 51 newcol = re.sub('[_]{2,}', '_', col) 52 newcolumns.append(newcol) 53 df.columns = newcolumns 54 return df 55 56 57 def remove_empty(df): 58 """ 59 Drop all rows and columns that are completely null. 60 61 Implementation is shamelessly copied from `StackOverflow`_. 62 63 .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501 64 65 Functional usage example: 66 67 .. code-block:: python 68 69 df = remove_empty(df) 70 71 Method chaining example: 72 73 .. code-block:: python 74 75 df = pd.DataFrame(...) 76 df = jn.DataFrame(df).remove_empty() 77 78 :param df: The pandas DataFrame object. 79 :returns: A pandas DataFrame. 80 """ 81 82 nanrows = df.index[df.isnull().all(axis=1)] 83 df.drop(index=nanrows, inplace=True) 84 85 nancols = df.columns[df.isnull().all(axis=0)] 86 df.drop(columns=nancols, inplace=True) 87 88 return df 89 90 91 def get_dupes(df, columns=None): 92 """ 93 Returns all duplicate rows. 94 95 Functional usage example: 96 97 .. code-block:: python 98 99 get_dupes(df) 100 101 Method chaining example: 102 103 .. code-block:: python 104 105 df = pd.DataFrame(...) 106 jn.DataFrame(df).get_dupes() 107 108 :param df: The pandas DataFrame object. 109 :param str/iterable columns: (optional) A column name or an iterable (list 110 or tuple) of column names. Following pandas API, this only considers 111 certain columns for identifying duplicates. Defaults to using all 112 columns. 113 :returns: The duplicate rows, as a pandas DataFrame. 114 """ 115 dupes = df.duplicated(subset=columns, keep=False) 116 return df[dupes == True] # noqa: E712 117 118 119 def encode_categorical(df, columns): 120 """ 121 Encode the specified columns as categorical. 122 123 Functional usage example: 124 125 .. code-block:: python 126 127 encode_categorical(df, columns="my_categorical_column") # one way 128 129 Method chaining example: 130 131 .. code-block:: python 132 133 df = pd.DataFrame(...) 134 categorical_cols = ['col1', 'col2', 'col4'] 135 jn.DataFrame(df).encode_categorical(columns=categorical_cols) 136 137 :param df: The pandas DataFrame object. 138 :param str/iterable columns: A column name or an iterable (list or tuple) 139 of column names. 140 :returns: A pandas DataFrame 141 """ 142 if isinstance(columns, list) or isinstance(columns, tuple): 143 for col in columns: 144 assert col in df.columns, \ 145 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501 146 df[col] = pd.Categorical(df[col]) 147 elif isinstance(columns, str): 148 df[columns] = pd.Categorical(df[columns]) 149 else: 150 raise JanitorError('kwarg `columns` must be a string or iterable!') 151 return df 152 153 154 def get_features_targets(df, target_columns, feature_columns=None): 155 """ 156 Get the features and targets as separate DataFrames/Series. 157 158 The behaviour is as such: 159 160 - `target_columns` is mandatory. 161 - If `feature_columns` is present, then we will respect the column names 162 inside there. 163 - If `feature_columns` is not passed in, then we will assume that the 164 rest of the columns are feature columns, and return them. 165 166 Functional usage example: 167 168 .. code-block:: python 169 170 X, y = get_features_targets(df, target_columns="measurement") 171 172 Method chaining example: 173 174 .. code-block:: python 175 176 df = pd.DataFrame(...) 177 target_cols = ['output1', 'output2'] 178 X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501 179 180 :param df: The pandas DataFrame object. 181 :param str/iterable target_columns: Either a column name or an iterable 182 (list or tuple) of column names that are the target(s) to be predicted. 183 :param str/iterable feature_columns: (optional) The column name or iterable 184 of column names that are the features (a.k.a. predictors) used to 185 predict the targets. 186 :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are 187 pandas DataFrames. 188 """ 189 Y = df[target_columns] 190 191 if feature_columns: 192 X = df[feature_columns] 193 else: 194 if isinstance(target_columns, str): 195 xcols = [c for c in df.columns if target_columns != c] 196 elif (isinstance(target_columns, list) 197 or isinstance(target_columns, tuple)): 198 xcols = [c for c in df.columns if c not in target_columns] 199 X = df[xcols] 200 return X, Y 201 202 203 def rename_column(df, old, new): 204 """ 205 Rename a column in place. 206 207 Functional usage example: 208 209 .. code-block:: python 210 211 df = rename_column("old_column_name", "new_column_name") 212 213 Method chaining example: 214 215 .. code-block:: python 216 217 df = pd.DataFrame(...) 218 df = jn.DataFrame(df).rename_column("old_column_name", "new_column_name") # noqa: E501 219 220 This is just syntactic sugar/a convenience function for renaming one column 221 at a time. If you are convinced that there are multiple columns in need of 222 changing, then use the :py:meth:`pandas.DataFrame.rename` method. 223 224 :param str old: The old column name. 225 :param str new: The new column name. 226 :returns: A pandas DataFrame. 227 """ 228 return df.rename(columns={old: new}) 229 230 231 def coalesce(df, columns, new_column_name): 232 """ 233 Coalesces two or more columns of data in order of column names provided. 234 235 Functional usage example: 236 237 .. code-block:: python 238 239 df = coalesce(df, columns=['col1', 'col2']) 240 241 Method chaining example: 242 243 .. code-block:: python 244 245 df = pd.DataFrame(...) 246 df = jn.DataFrame(df).coalesce(['col1', 'col2']) 247 248 249 The result of this function is that we take the first non-null value across 250 rows. 251 252 This is more syntactic diabetes! For R users, this should look familiar to 253 `dplyr`'s `coalesce` function; for Python users, the interface 254 should be more intuitive than the :py:meth:`pandas.Series.combine_first` 255 method (which we're just using internally anyways). 256 257 :param df: A pandas DataFrame. 258 :param columns: A list of column names. 259 :param str new_column_name: The new column name after combining. 260 :returns: A pandas DataFrame. 261 """ 262 series = [df[c] for c in columns] 263 264 def _coalesce(series1, series2): 265 return series1.combine_first(series2) 266 df = df.drop(columns=columns) 267 df[new_column_name] = reduce(_coalesce, series) # noqa: F821 268 return df 269 270 271 def convert_excel_date(df, column): 272 """ 273 Convert Excel's serial date format into Python datetime format. 274 275 Implementation is also from `Stack Overflow`. 276 277 .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501 278 279 Functional usage example: 280 281 .. code-block:: python 282 283 df = convert_excel_date(df, column='date') 284 285 Method chaining example: 286 287 .. code-block:: python 288 289 df = pd.DataFrame(...) 290 df = jn.DataFrame(df).convert_excel_date('date') 291 292 :param df: A pandas DataFrame. 293 :param str column: A column name. 294 :returns: A pandas DataFrame with corrected dates. 295 """ 296 df[column] = (pd.TimedeltaIndex(df[column], unit='d') 297 + dt.datetime(1899, 12, 30)) 298 return df 299 300 301 def fill_empty(df, columns, value): 302 """ 303 Fill `NaN` values in specified columns with a given value. 304 305 Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`. 306 307 Functional usage example: 308 309 .. code-block:: python 310 311 df = fill_empty(df, columns=['col1', 'col2'], value=0) 312 313 Method chaining example: 314 315 .. code-block:: python 316 317 df = pd.DataFrame(...) 318 df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0) 319 320 :param df: A pandas DataFrame. 321 :param columns: Either a `str` or `list` or `tuple`. If a string is passed 322 in, then only that column will be filled; if a list or tuple of strings 323 are passed in, then they will all be filled with the same value. 324 :param value: The value that replaces the `NaN` values. 325 """ 326 if isinstance(columns, list) or isinstance(columns, tuple): 327 for col in columns: 328 assert col in df.columns, \ 329 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501 330 df[col] = df[col].fillna(value) 331 elif isinstance(columns, str): 332 df[columns] = df[columns].fillna(value) 333 else: 334 raise JanitorError('kwarg `columns` must be a string or iterable!') 335 336 return df 337 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/functions.py b/janitor/functions.py --- a/janitor/functions.py +++ b/janitor/functions.py @@ -31,26 +31,22 @@ :param df: The pandas DataFrame object. :returns: A pandas DataFrame. """ - columns = [(c.lower() - .replace(' ', '_') - .replace('/', '_') - .replace(':', '_') - .replace("'", '') - .replace('’', '') - .replace(',', '_') - .replace('?', '_') - .replace('-', '_') - .replace('(', '_') - .replace(')', '_') - .replace('.', '_') - ) for c in df.columns] - - newcolumns = [] - for col in columns: - # NOTE: Replace repeating underscores with single ones - newcol = re.sub('[_]{2,}', '_', col) - newcolumns.append(newcol) - df.columns = newcolumns + df = df.rename( + columns=lambda x: x.lower() + .replace(' ', '_') + .replace('/', '_') + .replace(':', '_') + .replace("'", '') + .replace('’', '') + .replace(',', '_') + .replace('?', '_') + .replace('-', '_') + .replace('(', '_') + .replace(')', '_') + .replace('.', '_') + ) + + df = df.rename(columns=lambda x: re.sub('_+', '_', x)) return df
{"golden_diff": "diff --git a/janitor/functions.py b/janitor/functions.py\n--- a/janitor/functions.py\n+++ b/janitor/functions.py\n@@ -31,26 +31,22 @@\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n- columns = [(c.lower()\n- .replace(' ', '_')\n- .replace('/', '_')\n- .replace(':', '_')\n- .replace(\"'\", '')\n- .replace('\u2019', '')\n- .replace(',', '_')\n- .replace('?', '_')\n- .replace('-', '_')\n- .replace('(', '_')\n- .replace(')', '_')\n- .replace('.', '_')\n- ) for c in df.columns]\n-\n- newcolumns = []\n- for col in columns:\n- # NOTE: Replace repeating underscores with single ones\n- newcol = re.sub('[_]{2,}', '_', col)\n- newcolumns.append(newcol)\n- df.columns = newcolumns\n+ df = df.rename(\n+ columns=lambda x: x.lower()\n+ .replace(' ', '_')\n+ .replace('/', '_')\n+ .replace(':', '_')\n+ .replace(\"'\", '')\n+ .replace('\u2019', '')\n+ .replace(',', '_')\n+ .replace('?', '_')\n+ .replace('-', '_')\n+ .replace('(', '_')\n+ .replace(')', '_')\n+ .replace('.', '_')\n+ )\n+\n+ df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n return df\n", "issue": "clean_names breaks for MultiIndex Columns\nMultiIndex columns returns `expected str not tuple` type error.\r\n\r\nIf you instead used,\r\n`df.rename(columns=lambda x: x.lower().replace('_', ''))`\r\nthis would work for standard and MultiIndex DataFrames.\r\n\r\nCan do PR if required.\n", "before_files": [{"content": "import datetime as dt\nfrom functools import reduce\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\nimport re\n\n\ndef clean_names(df):\n \"\"\"\n Clean column names.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).clean_names()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n columns = [(c.lower()\n .replace(' ', '_')\n .replace('/', '_')\n .replace(':', '_')\n .replace(\"'\", '')\n .replace('\u2019', '')\n .replace(',', '_')\n .replace('?', '_')\n .replace('-', '_')\n .replace('(', '_')\n .replace(')', '_')\n .replace('.', '_')\n ) for c in df.columns]\n\n newcolumns = []\n for col in columns:\n # NOTE: Replace repeating underscores with single ones\n newcol = re.sub('[_]{2,}', '_', col)\n newcolumns.append(newcol)\n df.columns = newcolumns\n return df\n\n\ndef remove_empty(df):\n \"\"\"\n Drop all rows and columns that are completely null.\n\n Implementation is shamelessly copied from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = remove_empty(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).remove_empty()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n\n nanrows = df.index[df.isnull().all(axis=1)]\n df.drop(index=nanrows, inplace=True)\n\n nancols = df.columns[df.isnull().all(axis=0)]\n df.drop(columns=nancols, inplace=True)\n\n return df\n\n\ndef get_dupes(df, columns=None):\n \"\"\"\n Returns all duplicate rows.\n\n Functional usage example:\n\n .. code-block:: python\n\n get_dupes(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n jn.DataFrame(df).get_dupes()\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: (optional) A column name or an iterable (list\n or tuple) of column names. Following pandas API, this only considers\n certain columns for identifying duplicates. Defaults to using all\n columns.\n :returns: The duplicate rows, as a pandas DataFrame.\n \"\"\"\n dupes = df.duplicated(subset=columns, keep=False)\n return df[dupes == True] # noqa: E712\n\n\ndef encode_categorical(df, columns):\n \"\"\"\n Encode the specified columns as categorical.\n\n Functional usage example:\n\n .. code-block:: python\n\n encode_categorical(df, columns=\"my_categorical_column\") # one way\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n categorical_cols = ['col1', 'col2', 'col4']\n jn.DataFrame(df).encode_categorical(columns=categorical_cols)\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: A column name or an iterable (list or tuple)\n of column names.\n :returns: A pandas DataFrame\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = pd.Categorical(df[col])\n elif isinstance(columns, str):\n df[columns] = pd.Categorical(df[columns])\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n return df\n\n\ndef get_features_targets(df, target_columns, feature_columns=None):\n \"\"\"\n Get the features and targets as separate DataFrames/Series.\n\n The behaviour is as such:\n\n - `target_columns` is mandatory.\n - If `feature_columns` is present, then we will respect the column names\n inside there.\n - If `feature_columns` is not passed in, then we will assume that the\n rest of the columns are feature columns, and return them.\n\n Functional usage example:\n\n .. code-block:: python\n\n X, y = get_features_targets(df, target_columns=\"measurement\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n target_cols = ['output1', 'output2']\n X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501\n\n :param df: The pandas DataFrame object.\n :param str/iterable target_columns: Either a column name or an iterable\n (list or tuple) of column names that are the target(s) to be predicted.\n :param str/iterable feature_columns: (optional) The column name or iterable\n of column names that are the features (a.k.a. predictors) used to\n predict the targets.\n :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are\n pandas DataFrames.\n \"\"\"\n Y = df[target_columns]\n\n if feature_columns:\n X = df[feature_columns]\n else:\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n\n\ndef rename_column(df, old, new):\n \"\"\"\n Rename a column in place.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = rename_column(\"old_column_name\", \"new_column_name\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).rename_column(\"old_column_name\", \"new_column_name\") # noqa: E501\n\n This is just syntactic sugar/a convenience function for renaming one column\n at a time. If you are convinced that there are multiple columns in need of\n changing, then use the :py:meth:`pandas.DataFrame.rename` method.\n\n :param str old: The old column name.\n :param str new: The new column name.\n :returns: A pandas DataFrame.\n \"\"\"\n return df.rename(columns={old: new})\n\n\ndef coalesce(df, columns, new_column_name):\n \"\"\"\n Coalesces two or more columns of data in order of column names provided.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = coalesce(df, columns=['col1', 'col2'])\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).coalesce(['col1', 'col2'])\n\n\n The result of this function is that we take the first non-null value across\n rows.\n\n This is more syntactic diabetes! For R users, this should look familiar to\n `dplyr`'s `coalesce` function; for Python users, the interface\n should be more intuitive than the :py:meth:`pandas.Series.combine_first`\n method (which we're just using internally anyways).\n\n :param df: A pandas DataFrame.\n :param columns: A list of column names.\n :param str new_column_name: The new column name after combining.\n :returns: A pandas DataFrame.\n \"\"\"\n series = [df[c] for c in columns]\n\n def _coalesce(series1, series2):\n return series1.combine_first(series2)\n df = df.drop(columns=columns)\n df[new_column_name] = reduce(_coalesce, series) # noqa: F821\n return df\n\n\ndef convert_excel_date(df, column):\n \"\"\"\n Convert Excel's serial date format into Python datetime format.\n\n Implementation is also from `Stack Overflow`.\n\n .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = convert_excel_date(df, column='date')\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).convert_excel_date('date')\n\n :param df: A pandas DataFrame.\n :param str column: A column name.\n :returns: A pandas DataFrame with corrected dates.\n \"\"\"\n df[column] = (pd.TimedeltaIndex(df[column], unit='d')\n + dt.datetime(1899, 12, 30))\n return df\n\n\ndef fill_empty(df, columns, value):\n \"\"\"\n Fill `NaN` values in specified columns with a given value.\n\n Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = fill_empty(df, columns=['col1', 'col2'], value=0)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)\n\n :param df: A pandas DataFrame.\n :param columns: Either a `str` or `list` or `tuple`. If a string is passed\n in, then only that column will be filled; if a list or tuple of strings\n are passed in, then they will all be filled with the same value.\n :param value: The value that replaces the `NaN` values.\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = df[col].fillna(value)\n elif isinstance(columns, str):\n df[columns] = df[columns].fillna(value)\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n\n return df\n", "path": "janitor/functions.py"}], "after_files": [{"content": "import datetime as dt\nfrom functools import reduce\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\nimport re\n\n\ndef clean_names(df):\n \"\"\"\n Clean column names.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).clean_names()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n df = df.rename(\n columns=lambda x: x.lower()\n .replace(' ', '_')\n .replace('/', '_')\n .replace(':', '_')\n .replace(\"'\", '')\n .replace('\u2019', '')\n .replace(',', '_')\n .replace('?', '_')\n .replace('-', '_')\n .replace('(', '_')\n .replace(')', '_')\n .replace('.', '_')\n )\n\n df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n return df\n\n\ndef remove_empty(df):\n \"\"\"\n Drop all rows and columns that are completely null.\n\n Implementation is shamelessly copied from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = remove_empty(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).remove_empty()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n\n nanrows = df.index[df.isnull().all(axis=1)]\n df.drop(index=nanrows, inplace=True)\n\n nancols = df.columns[df.isnull().all(axis=0)]\n df.drop(columns=nancols, inplace=True)\n\n return df\n\n\ndef get_dupes(df, columns=None):\n \"\"\"\n Returns all duplicate rows.\n\n Functional usage example:\n\n .. code-block:: python\n\n get_dupes(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n jn.DataFrame(df).get_dupes()\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: (optional) A column name or an iterable (list\n or tuple) of column names. Following pandas API, this only considers\n certain columns for identifying duplicates. Defaults to using all\n columns.\n :returns: The duplicate rows, as a pandas DataFrame.\n \"\"\"\n dupes = df.duplicated(subset=columns, keep=False)\n return df[dupes == True] # noqa: E712\n\n\ndef encode_categorical(df, columns):\n \"\"\"\n Encode the specified columns as categorical.\n\n Functional usage example:\n\n .. code-block:: python\n\n encode_categorical(df, columns=\"my_categorical_column\") # one way\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n categorical_cols = ['col1', 'col2', 'col4']\n jn.DataFrame(df).encode_categorical(columns=categorical_cols)\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: A column name or an iterable (list or tuple)\n of column names.\n :returns: A pandas DataFrame\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = pd.Categorical(df[col])\n elif isinstance(columns, str):\n df[columns] = pd.Categorical(df[columns])\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n return df\n\n\ndef get_features_targets(df, target_columns, feature_columns=None):\n \"\"\"\n Get the features and targets as separate DataFrames/Series.\n\n The behaviour is as such:\n\n - `target_columns` is mandatory.\n - If `feature_columns` is present, then we will respect the column names\n inside there.\n - If `feature_columns` is not passed in, then we will assume that the\n rest of the columns are feature columns, and return them.\n\n Functional usage example:\n\n .. code-block:: python\n\n X, y = get_features_targets(df, target_columns=\"measurement\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n target_cols = ['output1', 'output2']\n X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501\n\n :param df: The pandas DataFrame object.\n :param str/iterable target_columns: Either a column name or an iterable\n (list or tuple) of column names that are the target(s) to be predicted.\n :param str/iterable feature_columns: (optional) The column name or iterable\n of column names that are the features (a.k.a. predictors) used to\n predict the targets.\n :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are\n pandas DataFrames.\n \"\"\"\n Y = df[target_columns]\n\n if feature_columns:\n X = df[feature_columns]\n else:\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n\n\ndef rename_column(df, old, new):\n \"\"\"\n Rename a column in place.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = rename_column(\"old_column_name\", \"new_column_name\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).rename_column(\"old_column_name\", \"new_column_name\") # noqa: E501\n\n This is just syntactic sugar/a convenience function for renaming one column\n at a time. If you are convinced that there are multiple columns in need of\n changing, then use the :py:meth:`pandas.DataFrame.rename` method.\n\n :param str old: The old column name.\n :param str new: The new column name.\n :returns: A pandas DataFrame.\n \"\"\"\n return df.rename(columns={old: new})\n\n\ndef coalesce(df, columns, new_column_name):\n \"\"\"\n Coalesces two or more columns of data in order of column names provided.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = coalesce(df, columns=['col1', 'col2'])\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).coalesce(['col1', 'col2'])\n\n\n The result of this function is that we take the first non-null value across\n rows.\n\n This is more syntactic diabetes! For R users, this should look familiar to\n `dplyr`'s `coalesce` function; for Python users, the interface\n should be more intuitive than the :py:meth:`pandas.Series.combine_first`\n method (which we're just using internally anyways).\n\n :param df: A pandas DataFrame.\n :param columns: A list of column names.\n :param str new_column_name: The new column name after combining.\n :returns: A pandas DataFrame.\n \"\"\"\n series = [df[c] for c in columns]\n\n def _coalesce(series1, series2):\n return series1.combine_first(series2)\n df = df.drop(columns=columns)\n df[new_column_name] = reduce(_coalesce, series) # noqa: F821\n return df\n\n\ndef convert_excel_date(df, column):\n \"\"\"\n Convert Excel's serial date format into Python datetime format.\n\n Implementation is also from `Stack Overflow`.\n\n .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = convert_excel_date(df, column='date')\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).convert_excel_date('date')\n\n :param df: A pandas DataFrame.\n :param str column: A column name.\n :returns: A pandas DataFrame with corrected dates.\n \"\"\"\n df[column] = (pd.TimedeltaIndex(df[column], unit='d')\n + dt.datetime(1899, 12, 30))\n return df\n\n\ndef fill_empty(df, columns, value):\n \"\"\"\n Fill `NaN` values in specified columns with a given value.\n\n Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = fill_empty(df, columns=['col1', 'col2'], value=0)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)\n\n :param df: A pandas DataFrame.\n :param columns: Either a `str` or `list` or `tuple`. If a string is passed\n in, then only that column will be filled; if a list or tuple of strings\n are passed in, then they will all be filled with the same value.\n :param value: The value that replaces the `NaN` values.\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = df[col].fillna(value)\n elif isinstance(columns, str):\n df[columns] = df[columns].fillna(value)\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n\n return df\n", "path": "janitor/functions.py"}]}
3,592
344
gh_patches_debug_13860
rasdani/github-patches
git_diff
pre-commit__pre-commit-83
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pre-commit crashes when running during unresolved merge conflict I intentionally forced the following by making two branches conflict and then editing a file on that branch. `pre-commit` should fail-fast in a merge conflict situation. ``` $ git diff --exit-code diff --cc foo.txt index 8ff26e7,c148433..0000000 --- a/foo.txt +++ b/foo.txt @@@ -1,4 -1,5 +1,11 @@@ asdf ++<<<<<<< HEAD +fdsa +yeah +yeah ++======= + asdf + asdf + asdf + ++>>>>>>> derp diff --git a/git_code_debt/generate.py b/git_code_debt/generate.py index 12ceec6..967506e 100644 --- a/git_code_debt/generate.py +++ b/git_code_debt/generate.py @@ -12,6 +12,7 @@ from git_code_debt.logic import get_previous_sha from git_code_debt.logic import insert_metric_values from git_code_debt.repo_parser import RepoParser + def get_metrics(diff, metric_parsers): def get_all_metrics(file_diff_stats): for metric_parser_cls in metric_parsers: (py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ echo $? 1 (py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ pre-commit [WARNING] Unstaged files detected. [INFO] Stashing unstaged files to /tmp/git-code-debt/.pre-commit-files/patch1397455577. Traceback (most recent call last): File "/tmp/git-code-debt/py_env/bin/pre-commit", line 9, in <module> load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')() File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/util.py", line 52, in wrapper return func(argv) File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/run.py", line 59, in run return commands.run(runner, args) File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/commands.py", line 242, in run with ctx: File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ return self.gen.next() File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py", line 35, in staged_files_only cmd_runner.run(['git', 'checkout', '--', '.']) File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 77, in run returncode, replaced_cmd, retcode, output=(stdout, stderr), pre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'checkout', '--', '.'] Return code: 1 Expected return code: 0 Output: (u'', u"error: path 'foo.txt' is unmerged\n") (py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/commands.py` Content: ``` 1 from __future__ import print_function 2 3 import logging 4 import os 5 import pkg_resources 6 import shutil 7 import stat 8 import subprocess 9 import sys 10 from asottile.ordereddict import OrderedDict 11 from asottile.yaml import ordered_dump 12 from asottile.yaml import ordered_load 13 from plumbum import local 14 15 import pre_commit.constants as C 16 from pre_commit import git 17 from pre_commit import color 18 from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA 19 from pre_commit.clientlib.validate_config import load_config 20 from pre_commit.jsonschema_extensions import remove_defaults 21 from pre_commit.logging_handler import LoggingHandler 22 from pre_commit.repository import Repository 23 from pre_commit.staged_files_only import staged_files_only 24 from pre_commit.util import noop_context 25 26 27 logger = logging.getLogger('pre_commit') 28 29 COLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0]) 30 31 PASS_FAIL_LENGTH = 6 32 33 34 def install(runner): 35 """Install the pre-commit hooks.""" 36 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh') 37 with open(runner.pre_commit_path, 'w') as pre_commit_file_obj: 38 pre_commit_file_obj.write(open(pre_commit_file).read()) 39 40 original_mode = os.stat(runner.pre_commit_path).st_mode 41 os.chmod( 42 runner.pre_commit_path, 43 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, 44 ) 45 46 print('pre-commit installed at {0}'.format(runner.pre_commit_path)) 47 48 return 0 49 50 51 def uninstall(runner): 52 """Uninstall the pre-commit hooks.""" 53 if os.path.exists(runner.pre_commit_path): 54 os.remove(runner.pre_commit_path) 55 print('pre-commit uninstalled') 56 return 0 57 58 59 class RepositoryCannotBeUpdatedError(RuntimeError): 60 pass 61 62 63 def _update_repository(repo_config): 64 """Updates a repository to the tip of `master`. If the repository cannot 65 be updated because a hook that is configured does not exist in `master`, 66 this raises a RepositoryCannotBeUpdatedError 67 68 Args: 69 repo_config - A config for a repository 70 """ 71 repo = Repository(repo_config) 72 73 with repo.in_checkout(): 74 local['git']['fetch']() 75 head_sha = local['git']['rev-parse', 'origin/master']().strip() 76 77 # Don't bother trying to update if our sha is the same 78 if head_sha == repo_config['sha']: 79 return repo_config 80 81 # Construct a new config with the head sha 82 new_config = OrderedDict(repo_config) 83 new_config['sha'] = head_sha 84 new_repo = Repository(new_config) 85 86 # See if any of our hooks were deleted with the new commits 87 hooks = set(repo.hooks.keys()) 88 hooks_missing = hooks - (hooks & set(new_repo.manifest.keys())) 89 if hooks_missing: 90 raise RepositoryCannotBeUpdatedError( 91 'Cannot update because the tip of master is missing these hooks:\n' 92 '{0}'.format(', '.join(sorted(hooks_missing))) 93 ) 94 95 return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0] 96 97 98 def autoupdate(runner): 99 """Auto-update the pre-commit config to the latest versions of repos.""" 100 retv = 0 101 output_configs = [] 102 changed = False 103 104 input_configs = load_config( 105 runner.config_file_path, 106 load_strategy=ordered_load, 107 ) 108 109 for repo_config in input_configs: 110 print('Updating {0}...'.format(repo_config['repo']), end='') 111 try: 112 new_repo_config = _update_repository(repo_config) 113 except RepositoryCannotBeUpdatedError as error: 114 print(error.args[0]) 115 output_configs.append(repo_config) 116 retv = 1 117 continue 118 119 if new_repo_config['sha'] != repo_config['sha']: 120 changed = True 121 print( 122 'updating {0} -> {1}.'.format( 123 repo_config['sha'], new_repo_config['sha'], 124 ) 125 ) 126 output_configs.append(new_repo_config) 127 else: 128 print('already up to date.') 129 output_configs.append(repo_config) 130 131 if changed: 132 with open(runner.config_file_path, 'w') as config_file: 133 config_file.write( 134 ordered_dump(output_configs, **C.YAML_DUMP_KWARGS) 135 ) 136 137 return retv 138 139 140 def clean(runner): 141 if os.path.exists(runner.hooks_workspace_path): 142 shutil.rmtree(runner.hooks_workspace_path) 143 print('Cleaned {0}.'.format(runner.hooks_workspace_path)) 144 return 0 145 146 147 def _run_single_hook(runner, repository, hook_id, args, write): 148 if args.all_files: 149 get_filenames = git.get_all_files_matching 150 else: 151 get_filenames = git.get_staged_files_matching 152 153 hook = repository.hooks[hook_id] 154 155 filenames = get_filenames(hook['files'], hook['exclude']) 156 if not filenames: 157 no_files_msg = '(no files to check) ' 158 skipped_msg = 'Skipped' 159 write( 160 '{0}{1}{2}{3}\n'.format( 161 hook['name'], 162 '.' * ( 163 COLS - 164 len(hook['name']) - 165 len(no_files_msg) - 166 len(skipped_msg) - 167 6 168 ), 169 no_files_msg, 170 color.format_color(skipped_msg, color.TURQUOISE, args.color), 171 ) 172 ) 173 return 0 174 175 # Print the hook and the dots first in case the hook takes hella long to 176 # run. 177 write( 178 '{0}{1}'.format( 179 hook['name'], 180 '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6), 181 ), 182 ) 183 sys.stdout.flush() 184 185 retcode, stdout, stderr = repository.run_hook( 186 runner.cmd_runner, 187 hook_id, 188 filenames, 189 ) 190 191 if retcode != repository.hooks[hook_id]['expected_return_value']: 192 retcode = 1 193 print_color = color.RED 194 pass_fail = 'Failed' 195 else: 196 retcode = 0 197 print_color = color.GREEN 198 pass_fail = 'Passed' 199 200 write(color.format_color(pass_fail, print_color, args.color) + '\n') 201 202 if (stdout or stderr) and (retcode or args.verbose): 203 write('\n') 204 for output in (stdout, stderr): 205 if output.strip(): 206 write(output.strip() + '\n') 207 write('\n') 208 209 return retcode 210 211 212 def _run_hooks(runner, args, write): 213 """Actually run the hooks.""" 214 retval = 0 215 216 for repo in runner.repositories: 217 for hook_id in repo.hooks: 218 retval |= _run_single_hook(runner, repo, hook_id, args, write=write) 219 220 return retval 221 222 223 def _run_hook(runner, hook_id, args, write): 224 for repo in runner.repositories: 225 if hook_id in repo.hooks: 226 return _run_single_hook(runner, repo, hook_id, args, write=write) 227 else: 228 write('No hook with id `{0}`\n'.format(hook_id)) 229 return 1 230 231 232 def run(runner, args, write=sys.stdout.write): 233 # Set up our logging handler 234 logger.addHandler(LoggingHandler(args.color, write=write)) 235 logger.setLevel(logging.INFO) 236 237 if args.no_stash or args.all_files: 238 ctx = noop_context() 239 else: 240 ctx = staged_files_only(runner.cmd_runner) 241 242 with ctx: 243 if args.hook: 244 return _run_hook(runner, args.hook, args, write=write) 245 else: 246 return _run_hooks(runner, args, write=write) 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/commands.py b/pre_commit/commands.py --- a/pre_commit/commands.py +++ b/pre_commit/commands.py @@ -229,11 +229,21 @@ return 1 +def _has_unmerged_paths(runner): + _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged']) + return bool(stdout.strip()) + + def run(runner, args, write=sys.stdout.write): # Set up our logging handler logger.addHandler(LoggingHandler(args.color, write=write)) logger.setLevel(logging.INFO) + # Check if we have unresolved merge conflict files and fail fast. + if _has_unmerged_paths(runner): + logger.error('Unmerged files. Resolve before committing.') + return 1 + if args.no_stash or args.all_files: ctx = noop_context() else:
{"golden_diff": "diff --git a/pre_commit/commands.py b/pre_commit/commands.py\n--- a/pre_commit/commands.py\n+++ b/pre_commit/commands.py\n@@ -229,11 +229,21 @@\n return 1\n \n \n+def _has_unmerged_paths(runner):\n+ _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n+ return bool(stdout.strip())\n+\n+\n def run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n \n+ # Check if we have unresolved merge conflict files and fail fast.\n+ if _has_unmerged_paths(runner):\n+ logger.error('Unmerged files. Resolve before committing.')\n+ return 1\n+\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n", "issue": "pre-commit crashes when running during unresolved merge conflict\nI intentionally forced the following by making two branches conflict and then editing a file on that branch. `pre-commit` should fail-fast in a merge conflict situation.\n\n```\n$ git diff --exit-code\ndiff --cc foo.txt\nindex 8ff26e7,c148433..0000000\n--- a/foo.txt\n+++ b/foo.txt\n@@@ -1,4 -1,5 +1,11 @@@\n asdf\n++<<<<<<< HEAD\n +fdsa\n +yeah\n +yeah\n++=======\n+ asdf\n+ asdf\n+ asdf\n+ \n++>>>>>>> derp\ndiff --git a/git_code_debt/generate.py b/git_code_debt/generate.py\nindex 12ceec6..967506e 100644\n--- a/git_code_debt/generate.py\n+++ b/git_code_debt/generate.py\n@@ -12,6 +12,7 @@ from git_code_debt.logic import get_previous_sha\n from git_code_debt.logic import insert_metric_values\n from git_code_debt.repo_parser import RepoParser\n\n+\n def get_metrics(diff, metric_parsers):\n def get_all_metrics(file_diff_stats):\n for metric_parser_cls in metric_parsers:\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ echo $?\n1\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ pre-commit \n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /tmp/git-code-debt/.pre-commit-files/patch1397455577.\nTraceback (most recent call last):\n File \"/tmp/git-code-debt/py_env/bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/run.py\", line 59, in run\n return commands.run(runner, args)\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/commands.py\", line 242, in run\n with ctx:\n File \"/usr/lib/python2.7/contextlib.py\", line 17, in __enter__\n return self.gen.next()\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py\", line 35, in staged_files_only\n cmd_runner.run(['git', 'checkout', '--', '.'])\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 77, in run\n returncode, replaced_cmd, retcode, output=(stdout, stderr),\npre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'checkout', '--', '.']\nReturn code: 1\nExpected return code: 0\nOutput: (u'', u\"error: path 'foo.txt' is unmerged\\n\")\n\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ \n```\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom asottile.ordereddict import OrderedDict\nfrom asottile.yaml import ordered_dump\nfrom asottile.yaml import ordered_load\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import color\nfrom pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.jsonschema_extensions import remove_defaults\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.repository import Repository\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\nCOLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])\n\nPASS_FAIL_LENGTH = 6\n\n\ndef install(runner):\n \"\"\"Install the pre-commit hooks.\"\"\"\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:\n pre_commit_file_obj.write(open(pre_commit_file).read())\n\n original_mode = os.stat(runner.pre_commit_path).st_mode\n os.chmod(\n runner.pre_commit_path,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n print('pre-commit installed at {0}'.format(runner.pre_commit_path))\n\n return 0\n\n\ndef uninstall(runner):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n if os.path.exists(runner.pre_commit_path):\n os.remove(runner.pre_commit_path)\n print('pre-commit uninstalled')\n return 0\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _update_repository(repo_config):\n \"\"\"Updates a repository to the tip of `master`. If the repository cannot\n be updated because a hook that is configured does not exist in `master`,\n this raises a RepositoryCannotBeUpdatedError\n\n Args:\n repo_config - A config for a repository\n \"\"\"\n repo = Repository(repo_config)\n\n with repo.in_checkout():\n local['git']['fetch']()\n head_sha = local['git']['rev-parse', 'origin/master']().strip()\n\n # Don't bother trying to update if our sha is the same\n if head_sha == repo_config['sha']:\n return repo_config\n\n # Construct a new config with the head sha\n new_config = OrderedDict(repo_config)\n new_config['sha'] = head_sha\n new_repo = Repository(new_config)\n\n # See if any of our hooks were deleted with the new commits\n hooks = set(repo.hooks.keys())\n hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{0}'.format(', '.join(sorted(hooks_missing)))\n )\n\n return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]\n\n\ndef autoupdate(runner):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n retv = 0\n output_configs = []\n changed = False\n\n input_configs = load_config(\n runner.config_file_path,\n load_strategy=ordered_load,\n )\n\n for repo_config in input_configs:\n print('Updating {0}...'.format(repo_config['repo']), end='')\n try:\n new_repo_config = _update_repository(repo_config)\n except RepositoryCannotBeUpdatedError as error:\n print(error.args[0])\n output_configs.append(repo_config)\n retv = 1\n continue\n\n if new_repo_config['sha'] != repo_config['sha']:\n changed = True\n print(\n 'updating {0} -> {1}.'.format(\n repo_config['sha'], new_repo_config['sha'],\n )\n )\n output_configs.append(new_repo_config)\n else:\n print('already up to date.')\n output_configs.append(repo_config)\n\n if changed:\n with open(runner.config_file_path, 'w') as config_file:\n config_file.write(\n ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)\n )\n\n return retv\n\n\ndef clean(runner):\n if os.path.exists(runner.hooks_workspace_path):\n shutil.rmtree(runner.hooks_workspace_path)\n print('Cleaned {0}.'.format(runner.hooks_workspace_path))\n return 0\n\n\ndef _run_single_hook(runner, repository, hook_id, args, write):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n else:\n get_filenames = git.get_staged_files_matching\n\n hook = repository.hooks[hook_id]\n\n filenames = get_filenames(hook['files'], hook['exclude'])\n if not filenames:\n no_files_msg = '(no files to check) '\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}{3}\\n'.format(\n hook['name'],\n '.' * (\n COLS -\n len(hook['name']) -\n len(no_files_msg) -\n len(skipped_msg) -\n 6\n ),\n no_files_msg,\n color.format_color(skipped_msg, color.TURQUOISE, args.color),\n )\n )\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(\n '{0}{1}'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),\n ),\n )\n sys.stdout.flush()\n\n retcode, stdout, stderr = repository.run_hook(\n runner.cmd_runner,\n hook_id,\n filenames,\n )\n\n if retcode != repository.hooks[hook_id]['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(runner, args, write):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n retval |= _run_single_hook(runner, repo, hook_id, args, write=write)\n\n return retval\n\n\ndef _run_hook(runner, hook_id, args, write):\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n else:\n write('No hook with id `{0}`\\n'.format(hook_id))\n return 1\n\n\ndef run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n if args.hook:\n return _run_hook(runner, args.hook, args, write=write)\n else:\n return _run_hooks(runner, args, write=write)\n", "path": "pre_commit/commands.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom asottile.ordereddict import OrderedDict\nfrom asottile.yaml import ordered_dump\nfrom asottile.yaml import ordered_load\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import color\nfrom pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.jsonschema_extensions import remove_defaults\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.repository import Repository\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\nCOLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])\n\nPASS_FAIL_LENGTH = 6\n\n\ndef install(runner):\n \"\"\"Install the pre-commit hooks.\"\"\"\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:\n pre_commit_file_obj.write(open(pre_commit_file).read())\n\n original_mode = os.stat(runner.pre_commit_path).st_mode\n os.chmod(\n runner.pre_commit_path,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n print('pre-commit installed at {0}'.format(runner.pre_commit_path))\n\n return 0\n\n\ndef uninstall(runner):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n if os.path.exists(runner.pre_commit_path):\n os.remove(runner.pre_commit_path)\n print('pre-commit uninstalled')\n return 0\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _update_repository(repo_config):\n \"\"\"Updates a repository to the tip of `master`. If the repository cannot\n be updated because a hook that is configured does not exist in `master`,\n this raises a RepositoryCannotBeUpdatedError\n\n Args:\n repo_config - A config for a repository\n \"\"\"\n repo = Repository(repo_config)\n\n with repo.in_checkout():\n local['git']['fetch']()\n head_sha = local['git']['rev-parse', 'origin/master']().strip()\n\n # Don't bother trying to update if our sha is the same\n if head_sha == repo_config['sha']:\n return repo_config\n\n # Construct a new config with the head sha\n new_config = OrderedDict(repo_config)\n new_config['sha'] = head_sha\n new_repo = Repository(new_config)\n\n # See if any of our hooks were deleted with the new commits\n hooks = set(repo.hooks.keys())\n hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{0}'.format(', '.join(sorted(hooks_missing)))\n )\n\n return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]\n\n\ndef autoupdate(runner):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n retv = 0\n output_configs = []\n changed = False\n\n input_configs = load_config(\n runner.config_file_path,\n load_strategy=ordered_load,\n )\n\n for repo_config in input_configs:\n print('Updating {0}...'.format(repo_config['repo']), end='')\n try:\n new_repo_config = _update_repository(repo_config)\n except RepositoryCannotBeUpdatedError as error:\n print(error.args[0])\n output_configs.append(repo_config)\n retv = 1\n continue\n\n if new_repo_config['sha'] != repo_config['sha']:\n changed = True\n print(\n 'updating {0} -> {1}.'.format(\n repo_config['sha'], new_repo_config['sha'],\n )\n )\n output_configs.append(new_repo_config)\n else:\n print('already up to date.')\n output_configs.append(repo_config)\n\n if changed:\n with open(runner.config_file_path, 'w') as config_file:\n config_file.write(\n ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)\n )\n\n return retv\n\n\ndef clean(runner):\n if os.path.exists(runner.hooks_workspace_path):\n shutil.rmtree(runner.hooks_workspace_path)\n print('Cleaned {0}.'.format(runner.hooks_workspace_path))\n return 0\n\n\ndef _run_single_hook(runner, repository, hook_id, args, write):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n else:\n get_filenames = git.get_staged_files_matching\n\n hook = repository.hooks[hook_id]\n\n filenames = get_filenames(hook['files'], hook['exclude'])\n if not filenames:\n no_files_msg = '(no files to check) '\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}{3}\\n'.format(\n hook['name'],\n '.' * (\n COLS -\n len(hook['name']) -\n len(no_files_msg) -\n len(skipped_msg) -\n 6\n ),\n no_files_msg,\n color.format_color(skipped_msg, color.TURQUOISE, args.color),\n )\n )\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(\n '{0}{1}'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),\n ),\n )\n sys.stdout.flush()\n\n retcode, stdout, stderr = repository.run_hook(\n runner.cmd_runner,\n hook_id,\n filenames,\n )\n\n if retcode != repository.hooks[hook_id]['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(runner, args, write):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n retval |= _run_single_hook(runner, repo, hook_id, args, write=write)\n\n return retval\n\n\ndef _run_hook(runner, hook_id, args, write):\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n else:\n write('No hook with id `{0}`\\n'.format(hook_id))\n return 1\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n if args.hook:\n return _run_hook(runner, args.hook, args, write=write)\n else:\n return _run_hooks(runner, args, write=write)\n", "path": "pre_commit/commands.py"}]}
3,331
202
gh_patches_debug_26963
rasdani/github-patches
git_diff
sktime__sktime-6439
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOC] Misleading docstring parameters for convert utility function of dask_to_pd and vice versa #### Describe the issue linked to the documentation The docstring of function `convert_dask_to_pandas` and `convert_pandas_to_dask` has been interchanged in terms of parameters and return object ```python def convert_dask_to_pandas(obj): """Convert dask DataFrame to pandas DataFrame, preserving MultiIndex. Parameters ---------- obj : pandas.DataFrame Returns ------- dask DataFrame MultiIndex levels 0 .. -1 of X are converted to columns of name __index__[indexname], where indexname is name of multiindex level, or the integer index if the level has no name other columns and column names are identical to those of X """ ``` Here the type of parameter for the function should be `dask.DataFrame` while the return type should be `pandas.DataFrame`. There's similar issue in the docstring of `convert_pandas_to_dask`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/datatypes/_adapter/dask_to_pd.py` Content: ``` 1 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) 2 """Converter utilities between dask and pandas, with multiindex convention. 3 4 Converts between: 5 pd.DataFrames with ordinary (single-level) index or pd.Multiindex, and 6 dask DataFrame 7 8 If pd.DataFrame has ordinary index, converts using dask compute/from_pandas 9 10 if pd.DataFrame has MultiIndex, converts and back-converts 11 MultiIndex columns to DataFrame columns with the name: 12 __index__[indexname], if level has a name indexname 13 __index__[index_iloc], if level has no indexname and is index_iloc-th level 14 index is replaced by a string index where tuples are replaced with str coerced elements 15 """ 16 import pandas as pd 17 18 from sktime.datatypes._common import _req 19 from sktime.datatypes._common import _ret as ret 20 21 22 def _is_mi_col(x): 23 return isinstance(x, str) and x.startswith("__index__") 24 25 26 def get_mi_cols(obj): 27 """Get multiindex cols from a dask object. 28 29 Parameters 30 ---------- 31 obj : dask DataFrame 32 33 Returns 34 ------- 35 list of pandas index elements 36 all column index elements of obj that start with __index__ 37 i.e., columns that are interpreted as multiindex columns in the correspondence 38 """ 39 return [x for x in obj.columns if _is_mi_col(x)] 40 41 42 def convert_dask_to_pandas(obj): 43 """Convert dask DataFrame to pandas DataFrame, preserving MultiIndex. 44 45 Parameters 46 ---------- 47 obj : pandas.DataFrame 48 49 Returns 50 ------- 51 dask DataFrame 52 MultiIndex levels 0 .. -1 of X are converted to columns of name 53 __index__[indexname], where indexname is name of multiindex level, 54 or the integer index if the level has no name 55 other columns and column names are identical to those of X 56 """ 57 obj = obj.compute() 58 59 def mi_name(x): 60 return x.split("__index__")[1] 61 62 def mi_names(names): 63 new_names = [mi_name(x) for x in names] 64 for i, name in enumerate(new_names): 65 if name == str(i): 66 new_names[i] = None 67 return new_names 68 69 multi_cols = get_mi_cols(obj) 70 71 # if has multi-index cols, move to pandas MultiIndex 72 if len(multi_cols) > 0: 73 obj = obj.set_index(multi_cols) 74 75 names = obj.index.names 76 new_names = mi_names(names) 77 new_names = new_names 78 79 obj.index.names = new_names 80 81 return obj 82 83 84 def convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True): 85 """Convert pandas DataFrame to dask DataFrame, preserving MultiIndex. 86 87 Parameters 88 ---------- 89 obj : dask DataFrame 90 npartitions : int or None, optional, default = 1 91 npartitions passed to dask from_pandas when converting obj to dask 92 chunksize : int or None, optional, default = None 93 chunksize passed to dask from_pandas when converting obj to dask 94 sort : bool, optional, default = True 95 sort passed to dask from_pandas when converting obj to dask 96 97 Returns 98 ------- 99 pandas.DataFrame 100 MultiIndex levels 0 .. -1 of X are converted to columns of name 101 __index__[indexname], where indexname is name of multiindex level, 102 or the integer index if the level has no name 103 other columns and column names are identical to those of X 104 """ 105 from dask.dataframe import from_pandas 106 107 def dask_mi_names(names): 108 res = list(names).copy() 109 for i, name in enumerate(names): 110 if name is None: 111 res[i] = str(i) 112 return [f"__index__{x}" for x in res] 113 114 if isinstance(obj.index, pd.MultiIndex): 115 names = obj.index.names 116 new_names = dask_mi_names(names) 117 new_index = [str(x) for x in obj.index] 118 119 obj = obj.copy() 120 obj.index.names = new_names 121 obj = obj.reset_index() 122 obj.index = new_index 123 124 obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort) 125 126 return obj 127 128 129 def check_dask_frame( 130 obj, return_metadata=False, var_name="obj", freq_set_check=False, scitype="Series" 131 ): 132 """Check dask frame, generic for sktime check format.""" 133 import dask 134 135 metadata = {} 136 137 if not isinstance(obj, dask.dataframe.core.DataFrame): 138 msg = f"{var_name} must be a dask DataFrame, found {type(obj)}" 139 return ret(False, msg, None, return_metadata) 140 141 # we now know obj is a dask DataFrame 142 143 index_cols = get_mi_cols(obj) 144 145 # check right number of cols depending on scitype 146 if scitype == "Series": 147 cols_msg = ( 148 f"{var_name} must have exactly one index column, " 149 f"found {len(index_cols)}, namely: {index_cols}" 150 ) 151 right_no_index_cols = len(index_cols) <= 1 152 elif scitype == "Panel": 153 cols_msg = ( 154 f"{var_name} must have exactly two index columns, " 155 f"found {len(index_cols)}, namely: {index_cols}" 156 ) 157 right_no_index_cols = len(index_cols) == 2 158 elif scitype == "Hierarchical": 159 cols_msg = ( 160 f"{var_name} must have three or more index columns, " 161 f"found {len(index_cols)}, namely: {index_cols}" 162 ) 163 right_no_index_cols = len(index_cols) >= 3 164 else: 165 return RuntimeError( 166 'scitype arg of check_dask_frame must be one of strings "Series", ' 167 f'"Panel", or "Hierarchical", but found {scitype}' 168 ) 169 170 if not right_no_index_cols: 171 # dask series should have at most one __index__ col 172 return ret(False, cols_msg, None, return_metadata) 173 174 if _req("is_empty", return_metadata): 175 metadata["is_empty"] = len(obj.index) < 1 or len(obj.columns) < 1 176 if _req("is_univariate", return_metadata): 177 metadata["is_univariate"] = len(obj.columns) == 1 178 if _req("n_features", return_metadata): 179 metadata["n_features"] = len(obj.columns) 180 if _req("feature_names", return_metadata): 181 metadata["feature_names"] = obj.columns.to_list() 182 183 # check that columns are unique 184 if not obj.columns.is_unique: 185 msg = f"{var_name} must have unique column indices, but found {obj.columns}" 186 return ret(False, msg, None, return_metadata) 187 188 # check whether the time index is of valid type 189 # if not is_in_valid_index_types(index): 190 # msg = ( 191 # f"{type(index)} is not supported for {var_name}, use " 192 # f"one of {VALID_INDEX_TYPES} or integer index instead." 193 # ) 194 # return ret(False, msg, None, return_metadata) 195 196 # Check time index is ordered in time 197 if not obj.index.is_monotonic_increasing.compute(): 198 msg = ( 199 f"The (time) index of {var_name} must be sorted " 200 f"monotonically increasing, but found: {obj.index}" 201 ) 202 return ret(False, msg, None, return_metadata) 203 204 if freq_set_check and isinstance(obj.index, pd.DatetimeIndex): 205 if obj.index.freq is None: 206 msg = f"{var_name} has DatetimeIndex, but no freq attribute set." 207 return ret(False, msg, None, return_metadata) 208 209 # check whether index is equally spaced or if there are any nans 210 # compute only if needed 211 if _req("is_equally_spaced", return_metadata): 212 # todo: logic for equal spacing 213 metadata["is_equally_spaced"] = True 214 if _req("has_nans", return_metadata): 215 metadata["has_nans"] = obj.isnull().values.any().compute() 216 217 if scitype in ["Panel", "Hierarchical"]: 218 if _req("n_instances", return_metadata): 219 instance_cols = index_cols[:-1] 220 metadata["n_instances"] = len(obj[instance_cols].drop_duplicates()) 221 222 if scitype in ["Hierarchical"]: 223 if _req("n_panels", return_metadata): 224 panel_cols = index_cols[:-2] 225 metadata["n_panels"] = len(obj[panel_cols].drop_duplicates()) 226 227 return ret(True, None, metadata, return_metadata) 228 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/datatypes/_adapter/dask_to_pd.py b/sktime/datatypes/_adapter/dask_to_pd.py --- a/sktime/datatypes/_adapter/dask_to_pd.py +++ b/sktime/datatypes/_adapter/dask_to_pd.py @@ -44,11 +44,11 @@ Parameters ---------- - obj : pandas.DataFrame + obj : dask DataFrame Returns ------- - dask DataFrame + pandas.DataFrame MultiIndex levels 0 .. -1 of X are converted to columns of name __index__[indexname], where indexname is name of multiindex level, or the integer index if the level has no name @@ -86,7 +86,7 @@ Parameters ---------- - obj : dask DataFrame + obj : pandas.DataFrame npartitions : int or None, optional, default = 1 npartitions passed to dask from_pandas when converting obj to dask chunksize : int or None, optional, default = None @@ -96,7 +96,7 @@ Returns ------- - pandas.DataFrame + dask DataFrame MultiIndex levels 0 .. -1 of X are converted to columns of name __index__[indexname], where indexname is name of multiindex level, or the integer index if the level has no name
{"golden_diff": "diff --git a/sktime/datatypes/_adapter/dask_to_pd.py b/sktime/datatypes/_adapter/dask_to_pd.py\n--- a/sktime/datatypes/_adapter/dask_to_pd.py\n+++ b/sktime/datatypes/_adapter/dask_to_pd.py\n@@ -44,11 +44,11 @@\n \n Parameters\n ----------\n- obj : pandas.DataFrame\n+ obj : dask DataFrame\n \n Returns\n -------\n- dask DataFrame\n+ pandas.DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n@@ -86,7 +86,7 @@\n \n Parameters\n ----------\n- obj : dask DataFrame\n+ obj : pandas.DataFrame\n npartitions : int or None, optional, default = 1\n npartitions passed to dask from_pandas when converting obj to dask\n chunksize : int or None, optional, default = None\n@@ -96,7 +96,7 @@\n \n Returns\n -------\n- pandas.DataFrame\n+ dask DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n", "issue": "[DOC] Misleading docstring parameters for convert utility function of dask_to_pd and vice versa\n#### Describe the issue linked to the documentation\r\nThe docstring of function `convert_dask_to_pandas` and `convert_pandas_to_dask` has been interchanged in terms of parameters and return object\r\n\r\n```python\r\ndef convert_dask_to_pandas(obj):\r\n \"\"\"Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.\r\n\r\n Parameters\r\n ----------\r\n obj : pandas.DataFrame\r\n\r\n Returns\r\n -------\r\n dask DataFrame\r\n MultiIndex levels 0 .. -1 of X are converted to columns of name\r\n __index__[indexname], where indexname is name of multiindex level,\r\n or the integer index if the level has no name\r\n other columns and column names are identical to those of X\r\n \"\"\"\r\n```\r\n\r\nHere the type of parameter for the function should be `dask.DataFrame` while the return type should be `pandas.DataFrame`. There's similar issue in the docstring of `convert_pandas_to_dask`.\n", "before_files": [{"content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Converter utilities between dask and pandas, with multiindex convention.\n\nConverts between:\npd.DataFrames with ordinary (single-level) index or pd.Multiindex, and\ndask DataFrame\n\nIf pd.DataFrame has ordinary index, converts using dask compute/from_pandas\n\nif pd.DataFrame has MultiIndex, converts and back-converts\nMultiIndex columns to DataFrame columns with the name:\n __index__[indexname], if level has a name indexname\n __index__[index_iloc], if level has no indexname and is index_iloc-th level\nindex is replaced by a string index where tuples are replaced with str coerced elements\n\"\"\"\nimport pandas as pd\n\nfrom sktime.datatypes._common import _req\nfrom sktime.datatypes._common import _ret as ret\n\n\ndef _is_mi_col(x):\n return isinstance(x, str) and x.startswith(\"__index__\")\n\n\ndef get_mi_cols(obj):\n \"\"\"Get multiindex cols from a dask object.\n\n Parameters\n ----------\n obj : dask DataFrame\n\n Returns\n -------\n list of pandas index elements\n all column index elements of obj that start with __index__\n i.e., columns that are interpreted as multiindex columns in the correspondence\n \"\"\"\n return [x for x in obj.columns if _is_mi_col(x)]\n\n\ndef convert_dask_to_pandas(obj):\n \"\"\"Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : pandas.DataFrame\n\n Returns\n -------\n dask DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n obj = obj.compute()\n\n def mi_name(x):\n return x.split(\"__index__\")[1]\n\n def mi_names(names):\n new_names = [mi_name(x) for x in names]\n for i, name in enumerate(new_names):\n if name == str(i):\n new_names[i] = None\n return new_names\n\n multi_cols = get_mi_cols(obj)\n\n # if has multi-index cols, move to pandas MultiIndex\n if len(multi_cols) > 0:\n obj = obj.set_index(multi_cols)\n\n names = obj.index.names\n new_names = mi_names(names)\n new_names = new_names\n\n obj.index.names = new_names\n\n return obj\n\n\ndef convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True):\n \"\"\"Convert pandas DataFrame to dask DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : dask DataFrame\n npartitions : int or None, optional, default = 1\n npartitions passed to dask from_pandas when converting obj to dask\n chunksize : int or None, optional, default = None\n chunksize passed to dask from_pandas when converting obj to dask\n sort : bool, optional, default = True\n sort passed to dask from_pandas when converting obj to dask\n\n Returns\n -------\n pandas.DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n from dask.dataframe import from_pandas\n\n def dask_mi_names(names):\n res = list(names).copy()\n for i, name in enumerate(names):\n if name is None:\n res[i] = str(i)\n return [f\"__index__{x}\" for x in res]\n\n if isinstance(obj.index, pd.MultiIndex):\n names = obj.index.names\n new_names = dask_mi_names(names)\n new_index = [str(x) for x in obj.index]\n\n obj = obj.copy()\n obj.index.names = new_names\n obj = obj.reset_index()\n obj.index = new_index\n\n obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort)\n\n return obj\n\n\ndef check_dask_frame(\n obj, return_metadata=False, var_name=\"obj\", freq_set_check=False, scitype=\"Series\"\n):\n \"\"\"Check dask frame, generic for sktime check format.\"\"\"\n import dask\n\n metadata = {}\n\n if not isinstance(obj, dask.dataframe.core.DataFrame):\n msg = f\"{var_name} must be a dask DataFrame, found {type(obj)}\"\n return ret(False, msg, None, return_metadata)\n\n # we now know obj is a dask DataFrame\n\n index_cols = get_mi_cols(obj)\n\n # check right number of cols depending on scitype\n if scitype == \"Series\":\n cols_msg = (\n f\"{var_name} must have exactly one index column, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) <= 1\n elif scitype == \"Panel\":\n cols_msg = (\n f\"{var_name} must have exactly two index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) == 2\n elif scitype == \"Hierarchical\":\n cols_msg = (\n f\"{var_name} must have three or more index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) >= 3\n else:\n return RuntimeError(\n 'scitype arg of check_dask_frame must be one of strings \"Series\", '\n f'\"Panel\", or \"Hierarchical\", but found {scitype}'\n )\n\n if not right_no_index_cols:\n # dask series should have at most one __index__ col\n return ret(False, cols_msg, None, return_metadata)\n\n if _req(\"is_empty\", return_metadata):\n metadata[\"is_empty\"] = len(obj.index) < 1 or len(obj.columns) < 1\n if _req(\"is_univariate\", return_metadata):\n metadata[\"is_univariate\"] = len(obj.columns) == 1\n if _req(\"n_features\", return_metadata):\n metadata[\"n_features\"] = len(obj.columns)\n if _req(\"feature_names\", return_metadata):\n metadata[\"feature_names\"] = obj.columns.to_list()\n\n # check that columns are unique\n if not obj.columns.is_unique:\n msg = f\"{var_name} must have unique column indices, but found {obj.columns}\"\n return ret(False, msg, None, return_metadata)\n\n # check whether the time index is of valid type\n # if not is_in_valid_index_types(index):\n # msg = (\n # f\"{type(index)} is not supported for {var_name}, use \"\n # f\"one of {VALID_INDEX_TYPES} or integer index instead.\"\n # )\n # return ret(False, msg, None, return_metadata)\n\n # Check time index is ordered in time\n if not obj.index.is_monotonic_increasing.compute():\n msg = (\n f\"The (time) index of {var_name} must be sorted \"\n f\"monotonically increasing, but found: {obj.index}\"\n )\n return ret(False, msg, None, return_metadata)\n\n if freq_set_check and isinstance(obj.index, pd.DatetimeIndex):\n if obj.index.freq is None:\n msg = f\"{var_name} has DatetimeIndex, but no freq attribute set.\"\n return ret(False, msg, None, return_metadata)\n\n # check whether index is equally spaced or if there are any nans\n # compute only if needed\n if _req(\"is_equally_spaced\", return_metadata):\n # todo: logic for equal spacing\n metadata[\"is_equally_spaced\"] = True\n if _req(\"has_nans\", return_metadata):\n metadata[\"has_nans\"] = obj.isnull().values.any().compute()\n\n if scitype in [\"Panel\", \"Hierarchical\"]:\n if _req(\"n_instances\", return_metadata):\n instance_cols = index_cols[:-1]\n metadata[\"n_instances\"] = len(obj[instance_cols].drop_duplicates())\n\n if scitype in [\"Hierarchical\"]:\n if _req(\"n_panels\", return_metadata):\n panel_cols = index_cols[:-2]\n metadata[\"n_panels\"] = len(obj[panel_cols].drop_duplicates())\n\n return ret(True, None, metadata, return_metadata)\n", "path": "sktime/datatypes/_adapter/dask_to_pd.py"}], "after_files": [{"content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Converter utilities between dask and pandas, with multiindex convention.\n\nConverts between:\npd.DataFrames with ordinary (single-level) index or pd.Multiindex, and\ndask DataFrame\n\nIf pd.DataFrame has ordinary index, converts using dask compute/from_pandas\n\nif pd.DataFrame has MultiIndex, converts and back-converts\nMultiIndex columns to DataFrame columns with the name:\n __index__[indexname], if level has a name indexname\n __index__[index_iloc], if level has no indexname and is index_iloc-th level\nindex is replaced by a string index where tuples are replaced with str coerced elements\n\"\"\"\nimport pandas as pd\n\nfrom sktime.datatypes._common import _req\nfrom sktime.datatypes._common import _ret as ret\n\n\ndef _is_mi_col(x):\n return isinstance(x, str) and x.startswith(\"__index__\")\n\n\ndef get_mi_cols(obj):\n \"\"\"Get multiindex cols from a dask object.\n\n Parameters\n ----------\n obj : dask DataFrame\n\n Returns\n -------\n list of pandas index elements\n all column index elements of obj that start with __index__\n i.e., columns that are interpreted as multiindex columns in the correspondence\n \"\"\"\n return [x for x in obj.columns if _is_mi_col(x)]\n\n\ndef convert_dask_to_pandas(obj):\n \"\"\"Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : dask DataFrame\n\n Returns\n -------\n pandas.DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n obj = obj.compute()\n\n def mi_name(x):\n return x.split(\"__index__\")[1]\n\n def mi_names(names):\n new_names = [mi_name(x) for x in names]\n for i, name in enumerate(new_names):\n if name == str(i):\n new_names[i] = None\n return new_names\n\n multi_cols = get_mi_cols(obj)\n\n # if has multi-index cols, move to pandas MultiIndex\n if len(multi_cols) > 0:\n obj = obj.set_index(multi_cols)\n\n names = obj.index.names\n new_names = mi_names(names)\n new_names = new_names\n\n obj.index.names = new_names\n\n return obj\n\n\ndef convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True):\n \"\"\"Convert pandas DataFrame to dask DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : pandas.DataFrame\n npartitions : int or None, optional, default = 1\n npartitions passed to dask from_pandas when converting obj to dask\n chunksize : int or None, optional, default = None\n chunksize passed to dask from_pandas when converting obj to dask\n sort : bool, optional, default = True\n sort passed to dask from_pandas when converting obj to dask\n\n Returns\n -------\n dask DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n from dask.dataframe import from_pandas\n\n def dask_mi_names(names):\n res = list(names).copy()\n for i, name in enumerate(names):\n if name is None:\n res[i] = str(i)\n return [f\"__index__{x}\" for x in res]\n\n if isinstance(obj.index, pd.MultiIndex):\n names = obj.index.names\n new_names = dask_mi_names(names)\n new_index = [str(x) for x in obj.index]\n\n obj = obj.copy()\n obj.index.names = new_names\n obj = obj.reset_index()\n obj.index = new_index\n\n obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort)\n\n return obj\n\n\ndef check_dask_frame(\n obj, return_metadata=False, var_name=\"obj\", freq_set_check=False, scitype=\"Series\"\n):\n \"\"\"Check dask frame, generic for sktime check format.\"\"\"\n import dask\n\n metadata = {}\n\n if not isinstance(obj, dask.dataframe.core.DataFrame):\n msg = f\"{var_name} must be a dask DataFrame, found {type(obj)}\"\n return ret(False, msg, None, return_metadata)\n\n # we now know obj is a dask DataFrame\n\n index_cols = get_mi_cols(obj)\n\n # check right number of cols depending on scitype\n if scitype == \"Series\":\n cols_msg = (\n f\"{var_name} must have exactly one index column, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) <= 1\n elif scitype == \"Panel\":\n cols_msg = (\n f\"{var_name} must have exactly two index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) == 2\n elif scitype == \"Hierarchical\":\n cols_msg = (\n f\"{var_name} must have three or more index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) >= 3\n else:\n return RuntimeError(\n 'scitype arg of check_dask_frame must be one of strings \"Series\", '\n f'\"Panel\", or \"Hierarchical\", but found {scitype}'\n )\n\n if not right_no_index_cols:\n # dask series should have at most one __index__ col\n return ret(False, cols_msg, None, return_metadata)\n\n if _req(\"is_empty\", return_metadata):\n metadata[\"is_empty\"] = len(obj.index) < 1 or len(obj.columns) < 1\n if _req(\"is_univariate\", return_metadata):\n metadata[\"is_univariate\"] = len(obj.columns) == 1\n if _req(\"n_features\", return_metadata):\n metadata[\"n_features\"] = len(obj.columns)\n if _req(\"feature_names\", return_metadata):\n metadata[\"feature_names\"] = obj.columns.to_list()\n\n # check that columns are unique\n if not obj.columns.is_unique:\n msg = f\"{var_name} must have unique column indices, but found {obj.columns}\"\n return ret(False, msg, None, return_metadata)\n\n # check whether the time index is of valid type\n # if not is_in_valid_index_types(index):\n # msg = (\n # f\"{type(index)} is not supported for {var_name}, use \"\n # f\"one of {VALID_INDEX_TYPES} or integer index instead.\"\n # )\n # return ret(False, msg, None, return_metadata)\n\n # Check time index is ordered in time\n if not obj.index.is_monotonic_increasing.compute():\n msg = (\n f\"The (time) index of {var_name} must be sorted \"\n f\"monotonically increasing, but found: {obj.index}\"\n )\n return ret(False, msg, None, return_metadata)\n\n if freq_set_check and isinstance(obj.index, pd.DatetimeIndex):\n if obj.index.freq is None:\n msg = f\"{var_name} has DatetimeIndex, but no freq attribute set.\"\n return ret(False, msg, None, return_metadata)\n\n # check whether index is equally spaced or if there are any nans\n # compute only if needed\n if _req(\"is_equally_spaced\", return_metadata):\n # todo: logic for equal spacing\n metadata[\"is_equally_spaced\"] = True\n if _req(\"has_nans\", return_metadata):\n metadata[\"has_nans\"] = obj.isnull().values.any().compute()\n\n if scitype in [\"Panel\", \"Hierarchical\"]:\n if _req(\"n_instances\", return_metadata):\n instance_cols = index_cols[:-1]\n metadata[\"n_instances\"] = len(obj[instance_cols].drop_duplicates())\n\n if scitype in [\"Hierarchical\"]:\n if _req(\"n_panels\", return_metadata):\n panel_cols = index_cols[:-2]\n metadata[\"n_panels\"] = len(obj[panel_cols].drop_duplicates())\n\n return ret(True, None, metadata, return_metadata)\n", "path": "sktime/datatypes/_adapter/dask_to_pd.py"}]}
2,982
315
gh_patches_debug_8796
rasdani/github-patches
git_diff
archlinux__archinstall-249
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: module 'pathlib' has no attribute 'Paht' This is a serious typo in the code of luks.py I tried to create an encrypted luks partitiom but end up with this error in the title Please fix as soon as possible --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/luks.py` Content: ``` 1 import os 2 import shlex 3 import time 4 import pathlib 5 from .exceptions import * 6 from .general import * 7 from .disk import Partition 8 from .output import log, LOG_LEVELS 9 from .storage import storage 10 11 class luks2(): 12 def __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs): 13 self.password = password 14 self.partition = partition 15 self.mountpoint = mountpoint 16 self.args = args 17 self.kwargs = kwargs 18 self.key_file = key_file 19 self.auto_unmount = auto_unmount 20 self.filesystem = 'crypto_LUKS' 21 self.mapdev = None 22 23 def __enter__(self): 24 #if self.partition.allow_formatting: 25 # self.key_file = self.encrypt(self.partition, *self.args, **self.kwargs) 26 #else: 27 if not self.key_file: 28 self.key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique? 29 30 if type(self.password) != bytes: 31 self.password = bytes(self.password, 'UTF-8') 32 33 with open(self.key_file, 'wb') as fh: 34 fh.write(self.password) 35 36 return self.unlock(self.partition, self.mountpoint, self.key_file) 37 38 def __exit__(self, *args, **kwargs): 39 # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager 40 if self.auto_unmount: 41 self.close() 42 43 if len(args) >= 2 and args[1]: 44 raise args[1] 45 return True 46 47 def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None): 48 if not self.partition.allow_formatting: 49 raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.') 50 51 log(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info) 52 53 if not key_file: 54 if self.key_file: 55 key_file = self.key_file 56 else: 57 key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique? 58 59 if not password: 60 password = self.password 61 62 if type(password) != bytes: 63 password = bytes(password, 'UTF-8') 64 65 with open(key_file, 'wb') as fh: 66 fh.write(password) 67 68 cryptsetup_args = shlex.join([ 69 '/usr/bin/cryptsetup', 70 '--batch-mode', 71 '--verbose', 72 '--type', 'luks2', 73 '--pbkdf', 'argon2i', 74 '--hash', hash_type, 75 '--key-size', str(key_size), 76 '--iter-time', str(iter_time), 77 '--key-file', os.path.abspath(key_file), 78 '--use-urandom', 79 'luksFormat', partition.path, 80 ]) 81 82 try: 83 # Try to setup the crypt-device 84 cmd_handle = sys_command(cryptsetup_args) 85 except SysCallError as err: 86 if err.exit_code == 256: 87 log(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug) 88 # Partition was in use, unmount it and try again 89 partition.unmount() 90 91 # Get crypt-information about the device by doing a reverse lookup starting with the partition path 92 # For instance: /dev/sda 93 devinfo = json.loads(b''.join(sys_command(f"lsblk --fs -J {partition.path}")).decode('UTF-8'))['blockdevices'][0] 94 95 # For each child (sub-partition/sub-device) 96 if len(children := devinfo.get('children', [])): 97 for child in children: 98 # Unmount the child location 99 if child_mountpoint := child.get('mountpoint', None): 100 log(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug) 101 sys_command(f"umount -R {child_mountpoint}") 102 103 # And close it if possible. 104 log(f"Closing crypt device {child['name']}", level=LOG_LEVELS.Debug) 105 sys_command(f"cryptsetup close {child['name']}") 106 107 # Then try again to set up the crypt-device 108 cmd_handle = sys_command(cryptsetup_args) 109 else: 110 raise err 111 112 if cmd_handle.exit_code != 0: 113 raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}') 114 115 return key_file 116 117 def unlock(self, partition, mountpoint, key_file): 118 """ 119 Mounts a lukts2 compatible partition to a certain mountpoint. 120 Keyfile must be specified as there's no way to interact with the pw-prompt atm. 121 122 :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev 123 :type mountpoint: str 124 """ 125 from .disk import get_filesystem_type 126 if '/' in mountpoint: 127 os.path.basename(mountpoint) # TODO: Raise exception instead? 128 129 wait_timer = time.time() 130 while pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10: 131 time.sleep(0.025) 132 133 sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2') 134 if os.path.islink(f'/dev/mapper/{mountpoint}'): 135 self.mapdev = f'/dev/mapper/{mountpoint}' 136 unlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False) 137 unlocked_partition.allow_formatting = self.partition.allow_formatting 138 return unlocked_partition 139 140 def close(self, mountpoint=None): 141 if not mountpoint: 142 mountpoint = self.mapdev 143 144 sys_command(f'/usr/bin/cryptsetup close {self.mapdev}') 145 return os.path.islink(self.mapdev) is False 146 147 def format(self, path): 148 if (handle := sys_command(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0: 149 raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}') 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py --- a/archinstall/lib/luks.py +++ b/archinstall/lib/luks.py @@ -127,7 +127,7 @@ os.path.basename(mountpoint) # TODO: Raise exception instead? wait_timer = time.time() - while pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10: + while pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10: time.sleep(0.025) sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
{"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -127,7 +127,7 @@\n \t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n \n \t\twait_timer = time.time()\n-\t\twhile pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:\n+\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n \t\t\ttime.sleep(0.025)\n \n \t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n", "issue": "AttributeError: module 'pathlib' has no attribute 'Paht'\nThis is a serious typo in the code of luks.py\n\nI tried to create an encrypted luks partitiom but end up with this error in the title\n\nPlease fix as soon as possible\n\n", "before_files": [{"content": "import os\nimport shlex\nimport time\nimport pathlib\nfrom .exceptions import *\nfrom .general import *\nfrom .disk import Partition\nfrom .output import log, LOG_LEVELS\nfrom .storage import storage\n\nclass luks2():\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t#if self.partition.allow_formatting:\n\t\t#\tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t#else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\t\t\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2i',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(sys_command(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)\n\t\t\t\t\t\t\tsys_command(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=LOG_LEVELS.Debug)\n\t\t\t\t\t\tsys_command(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\t\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a lukts2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tsys_command(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}], "after_files": [{"content": "import os\nimport shlex\nimport time\nimport pathlib\nfrom .exceptions import *\nfrom .general import *\nfrom .disk import Partition\nfrom .output import log, LOG_LEVELS\nfrom .storage import storage\n\nclass luks2():\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t#if self.partition.allow_formatting:\n\t\t#\tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t#else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\t\t\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2i',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(sys_command(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)\n\t\t\t\t\t\t\tsys_command(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=LOG_LEVELS.Debug)\n\t\t\t\t\t\tsys_command(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\t\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a lukts2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tsys_command(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}]}
2,132
176
gh_patches_debug_48760
rasdani/github-patches
git_diff
pypa__virtualenv-1941
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Multiple processes competition on read/write update log **Issue** https://github.com/pypa/virtualenv/blob/0cd009b5a1338f66397f71c85a75f576a2f3eabf/src/virtualenv/app_data/via_disk_folder.py#L129-L141 When the file is opened for writing in another process, the JSON content can't be parsed, then the file will be removed. However, on Windows the removal will fail since the file is occupied by anther process. **Environment** Provide at least: - OS: Windows 10 - ``pip list`` of the host python where ``virtualenv`` is installed: ```console appdirs 1.4.4 argcomplete 1.11.1 backcall 0.1.0 better-exceptions 0.2.2 certifi 2020.4.5.1 chardet 3.0.4 colorama 0.4.3 decorator 4.4.2 distlib 0.3.1 filelock 3.0.12 idna 2.10 ipython 7.14.0 ipython-genutils 0.2.0 jedi 0.17.0 naipyext 0.5.2 parso 0.7.0 pickleshare 0.7.5 pip 20.1.1 pipenv 2020.8.13.dev0 d:\workspace\pipenv pipx 0.15.1.3 prompt-toolkit 3.0.5 Pygments 2.6.1 requests 2.24.0 setuptools 46.4.0 six 1.14.0 traitlets 4.3.3 urllib3 1.25.10 userpath 1.3.0 virtualenv 20.0.31 virtualenv-clone 0.5.4 wcwidth 0.1.9 wheel 0.34.2 ``` **Output of the virtual environment creation** Make sure to run the creation with `-vvv --with-traceback`: ```pytb Traceback (most recent call last): File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\embed\via_app_data\via_app_data.py", line 94, in _get do_periodic_update=self.periodic_update, File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\acquire.py", line 25, in get_wheel wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update) File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\bundle.py", line 20, in from_bundle wheel = periodic_update(distribution, for_py_version, wheel, search_dirs, app_data, do_periodic_update) File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\periodic_update.py", line 41, in periodic_update handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data) File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\periodic_update.py", line 62, in handle_auto_update u_log = UpdateLog.from_dict(embed_update_log.read()) File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\app_data\via_disk_folder.py", line 140, in read self.remove() File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\app_data\via_disk_folder.py", line 144, in remove self.file.unlink() File "C:\hostedtoolcache\windows\Python\3.6.8\x64\lib\pathlib.py", line 1284, in unlink self._accessor.unlink(self) File "C:\hostedtoolcache\windows\Python\3.6.8\x64\lib\pathlib.py", line 387, in wrapped return strfunc(str(pathobj), *args) PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\runneradmin\\AppData\\Local\\pypa\\virtualenv\\wheel\\3.8\\embed\\1\\wheel.json' PermissionError(13, 'The process cannot access the file because it is being used by another process') RuntimeError: seed failed due to failing to download wheels wheel ``` To fix the issue, I prefer to change the writing into **atomic**, that is, before the writing is done, the content should be kept. If that is an acceptable approach I can send a PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/virtualenv/app_data/via_disk_folder.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 A rough layout of the current storage goes as: 4 5 virtualenv-app-data 6 ├── py - <version> <cache information about python interpreters> 7 │   └── *.json/lock 8 ├── wheel <cache wheels used for seeding> 9 │   ├── house 10 │ │ └── *.whl <wheels downloaded go here> 11 │ └── <python major.minor> -> 3.9 12 │ ├── img-<version> 13 │ │ └── image 14 │ │ └── <install class> -> CopyPipInstall / SymlinkPipInstall 15 │ │ └── <wheel name> -> pip-20.1.1-py2.py3-none-any 16 │ └── embed 17 │ └── 1 18 │ └── *.json -> for every distribution contains data about newer embed versions and releases 19 └─── unzip <in zip app we cannot refer to some internal files, so first extract them> 20 └── <virtualenv version> 21 ├── py_info.py 22 ├── debug.py 23 └── _virtualenv.py 24 """ 25 from __future__ import absolute_import, unicode_literals 26 27 import json 28 import logging 29 from abc import ABCMeta 30 from contextlib import contextmanager 31 from hashlib import sha256 32 33 import six 34 35 from virtualenv.util.lock import ReentrantFileLock 36 from virtualenv.util.path import safe_delete 37 from virtualenv.util.six import ensure_text 38 from virtualenv.util.zipapp import extract 39 from virtualenv.version import __version__ 40 41 from .base import AppData, ContentStore 42 43 44 class AppDataDiskFolder(AppData): 45 """ 46 Store the application data on the disk within a folder layout. 47 """ 48 49 def __init__(self, folder): 50 self.lock = ReentrantFileLock(folder) 51 52 def __repr__(self): 53 return "{}".format(self.lock.path) 54 55 @property 56 def transient(self): 57 return False 58 59 def reset(self): 60 logging.debug("reset app data folder %s", self.lock.path) 61 safe_delete(self.lock.path) 62 63 def close(self): 64 """do nothing""" 65 66 @contextmanager 67 def locked(self, path): 68 path_lock = self.lock / path 69 with path_lock: 70 yield path_lock.path 71 72 @contextmanager 73 def extract(self, path, to_folder): 74 if to_folder is not None: 75 root = ReentrantFileLock(to_folder()) 76 else: 77 root = self.lock / "unzip" / __version__ 78 with root.lock_for_key(path.name): 79 dest = root.path / path.name 80 if not dest.exists(): 81 extract(path, dest) 82 yield dest 83 84 @property 85 def py_info_at(self): 86 return self.lock / "py_info" / "1" 87 88 def py_info(self, path): 89 return PyInfoStoreDisk(self.py_info_at, path) 90 91 def py_info_clear(self): 92 """""" 93 py_info_folder = self.py_info_at 94 with py_info_folder: 95 for filename in py_info_folder.path.iterdir(): 96 if filename.suffix == ".json": 97 with py_info_folder.lock_for_key(filename.stem): 98 if filename.exists(): 99 filename.unlink() 100 101 def embed_update_log(self, distribution, for_py_version): 102 return EmbedDistributionUpdateStoreDisk(self.lock / "wheel" / for_py_version / "embed" / "1", distribution) 103 104 @property 105 def house(self): 106 path = self.lock.path / "wheel" / "house" 107 path.mkdir(parents=True, exist_ok=True) 108 return path 109 110 def wheel_image(self, for_py_version, name): 111 return self.lock.path / "wheel" / for_py_version / "image" / "1" / name 112 113 114 @six.add_metaclass(ABCMeta) 115 class JSONStoreDisk(ContentStore): 116 def __init__(self, in_folder, key, msg, msg_args): 117 self.in_folder = in_folder 118 self.key = key 119 self.msg = msg 120 self.msg_args = msg_args + (self.file,) 121 122 @property 123 def file(self): 124 return self.in_folder.path / "{}.json".format(self.key) 125 126 def exists(self): 127 return self.file.exists() 128 129 def read(self): 130 data, bad_format = None, False 131 try: 132 data = json.loads(self.file.read_text()) 133 logging.debug("got {} from %s".format(self.msg), *self.msg_args) 134 return data 135 except ValueError: 136 bad_format = True 137 except Exception: # noqa 138 pass 139 if bad_format: 140 self.remove() 141 return None 142 143 def remove(self): 144 self.file.unlink() 145 logging.debug("removed {} at %s".format(self.msg), *self.msg_args) 146 147 @contextmanager 148 def locked(self): 149 with self.in_folder.lock_for_key(self.key): 150 yield 151 152 def write(self, content): 153 folder = self.file.parent 154 try: 155 folder.mkdir(parents=True, exist_ok=True) 156 except OSError: 157 pass 158 self.file.write_text(ensure_text(json.dumps(content, sort_keys=True, indent=2))) 159 logging.debug("wrote {} at %s".format(self.msg), *self.msg_args) 160 161 162 class PyInfoStoreDisk(JSONStoreDisk): 163 def __init__(self, in_folder, path): 164 key = sha256(str(path).encode("utf-8") if six.PY3 else str(path)).hexdigest() 165 super(PyInfoStoreDisk, self).__init__(in_folder, key, "python info of %s", (path,)) 166 167 168 class EmbedDistributionUpdateStoreDisk(JSONStoreDisk): 169 def __init__(self, in_folder, distribution): 170 super(EmbedDistributionUpdateStoreDisk, self).__init__( 171 in_folder, 172 distribution, 173 "embed update of distribution %s", 174 (distribution,), 175 ) 176 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/virtualenv/app_data/via_disk_folder.py b/src/virtualenv/app_data/via_disk_folder.py --- a/src/virtualenv/app_data/via_disk_folder.py +++ b/src/virtualenv/app_data/via_disk_folder.py @@ -137,7 +137,10 @@ except Exception: # noqa pass if bad_format: - self.remove() + try: + self.remove() + except OSError: # reading and writing on the same file may cause race on multiple processes + pass return None def remove(self):
{"golden_diff": "diff --git a/src/virtualenv/app_data/via_disk_folder.py b/src/virtualenv/app_data/via_disk_folder.py\n--- a/src/virtualenv/app_data/via_disk_folder.py\n+++ b/src/virtualenv/app_data/via_disk_folder.py\n@@ -137,7 +137,10 @@\n except Exception: # noqa\n pass\n if bad_format:\n- self.remove()\n+ try:\n+ self.remove()\n+ except OSError: # reading and writing on the same file may cause race on multiple processes\n+ pass\n return None\n \n def remove(self):\n", "issue": "Multiple processes competition on read/write update log\n**Issue**\r\n\r\nhttps://github.com/pypa/virtualenv/blob/0cd009b5a1338f66397f71c85a75f576a2f3eabf/src/virtualenv/app_data/via_disk_folder.py#L129-L141\r\n\r\nWhen the file is opened for writing in another process, the JSON content can't be parsed, then the file will be removed. However, on Windows the removal will fail since the file is occupied by anther process.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Windows 10\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\n ```console\r\n appdirs 1.4.4\r\n argcomplete 1.11.1\r\n backcall 0.1.0\r\n better-exceptions 0.2.2\r\n certifi 2020.4.5.1\r\n chardet 3.0.4\r\n colorama 0.4.3\r\n decorator 4.4.2\r\n distlib 0.3.1\r\n filelock 3.0.12\r\n idna 2.10\r\n ipython 7.14.0\r\n ipython-genutils 0.2.0\r\n jedi 0.17.0\r\n naipyext 0.5.2\r\n parso 0.7.0\r\n pickleshare 0.7.5\r\n pip 20.1.1\r\n pipenv 2020.8.13.dev0 d:\\workspace\\pipenv\r\n pipx 0.15.1.3\r\n prompt-toolkit 3.0.5\r\n Pygments 2.6.1\r\n requests 2.24.0\r\n setuptools 46.4.0\r\n six 1.14.0\r\n traitlets 4.3.3\r\n urllib3 1.25.10\r\n userpath 1.3.0\r\n virtualenv 20.0.31\r\n virtualenv-clone 0.5.4\r\n wcwidth 0.1.9\r\n wheel 0.34.2\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\embed\\via_app_data\\via_app_data.py\", line 94, in _get\r\n do_periodic_update=self.periodic_update,\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\acquire.py\", line 25, in get_wheel\r\n wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\bundle.py\", line 20, in from_bundle\r\n wheel = periodic_update(distribution, for_py_version, wheel, search_dirs, app_data, do_periodic_update)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\periodic_update.py\", line 41, in periodic_update\r\n handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\periodic_update.py\", line 62, in handle_auto_update\r\n u_log = UpdateLog.from_dict(embed_update_log.read())\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\app_data\\via_disk_folder.py\", line 140, in read\r\n self.remove()\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\app_data\\via_disk_folder.py\", line 144, in remove\r\n self.file.unlink()\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.6.8\\x64\\lib\\pathlib.py\", line 1284, in unlink\r\n self._accessor.unlink(self)\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.6.8\\x64\\lib\\pathlib.py\", line 387, in wrapped\r\n return strfunc(str(pathobj), *args)\r\nPermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\\\Users\\\\runneradmin\\\\AppData\\\\Local\\\\pypa\\\\virtualenv\\\\wheel\\\\3.8\\\\embed\\\\1\\\\wheel.json'\r\nPermissionError(13, 'The process cannot access the file because it is being used by another process')\r\nRuntimeError: seed failed due to failing to download wheels wheel\r\n```\r\n\r\nTo fix the issue, I prefer to change the writing into **atomic**, that is, before the writing is done, the content should be kept.\r\n\r\nIf that is an acceptable approach I can send a PR.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nA rough layout of the current storage goes as:\n\nvirtualenv-app-data\n\u251c\u2500\u2500 py - <version> <cache information about python interpreters>\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 *.json/lock\n\u251c\u2500\u2500 wheel <cache wheels used for seeding>\n\u2502 \u00a0 \u251c\u2500\u2500 house\n\u2502 \u2502 \u2514\u2500\u2500 *.whl <wheels downloaded go here>\n\u2502 \u2514\u2500\u2500 <python major.minor> -> 3.9\n\u2502 \u251c\u2500\u2500 img-<version>\n\u2502 \u2502 \u2514\u2500\u2500 image\n\u2502 \u2502 \u2514\u2500\u2500 <install class> -> CopyPipInstall / SymlinkPipInstall\n\u2502 \u2502 \u2514\u2500\u2500 <wheel name> -> pip-20.1.1-py2.py3-none-any\n\u2502 \u2514\u2500\u2500 embed\n\u2502 \u2514\u2500\u2500 1\n\u2502 \u2514\u2500\u2500 *.json -> for every distribution contains data about newer embed versions and releases\n\u2514\u2500\u2500\u2500 unzip <in zip app we cannot refer to some internal files, so first extract them>\n \u2514\u2500\u2500 <virtualenv version>\n \u251c\u2500\u2500 py_info.py\n \u251c\u2500\u2500 debug.py\n \u2514\u2500\u2500 _virtualenv.py\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\nfrom abc import ABCMeta\nfrom contextlib import contextmanager\nfrom hashlib import sha256\n\nimport six\n\nfrom virtualenv.util.lock import ReentrantFileLock\nfrom virtualenv.util.path import safe_delete\nfrom virtualenv.util.six import ensure_text\nfrom virtualenv.util.zipapp import extract\nfrom virtualenv.version import __version__\n\nfrom .base import AppData, ContentStore\n\n\nclass AppDataDiskFolder(AppData):\n \"\"\"\n Store the application data on the disk within a folder layout.\n \"\"\"\n\n def __init__(self, folder):\n self.lock = ReentrantFileLock(folder)\n\n def __repr__(self):\n return \"{}\".format(self.lock.path)\n\n @property\n def transient(self):\n return False\n\n def reset(self):\n logging.debug(\"reset app data folder %s\", self.lock.path)\n safe_delete(self.lock.path)\n\n def close(self):\n \"\"\"do nothing\"\"\"\n\n @contextmanager\n def locked(self, path):\n path_lock = self.lock / path\n with path_lock:\n yield path_lock.path\n\n @contextmanager\n def extract(self, path, to_folder):\n if to_folder is not None:\n root = ReentrantFileLock(to_folder())\n else:\n root = self.lock / \"unzip\" / __version__\n with root.lock_for_key(path.name):\n dest = root.path / path.name\n if not dest.exists():\n extract(path, dest)\n yield dest\n\n @property\n def py_info_at(self):\n return self.lock / \"py_info\" / \"1\"\n\n def py_info(self, path):\n return PyInfoStoreDisk(self.py_info_at, path)\n\n def py_info_clear(self):\n \"\"\"\"\"\"\n py_info_folder = self.py_info_at\n with py_info_folder:\n for filename in py_info_folder.path.iterdir():\n if filename.suffix == \".json\":\n with py_info_folder.lock_for_key(filename.stem):\n if filename.exists():\n filename.unlink()\n\n def embed_update_log(self, distribution, for_py_version):\n return EmbedDistributionUpdateStoreDisk(self.lock / \"wheel\" / for_py_version / \"embed\" / \"1\", distribution)\n\n @property\n def house(self):\n path = self.lock.path / \"wheel\" / \"house\"\n path.mkdir(parents=True, exist_ok=True)\n return path\n\n def wheel_image(self, for_py_version, name):\n return self.lock.path / \"wheel\" / for_py_version / \"image\" / \"1\" / name\n\n\[email protected]_metaclass(ABCMeta)\nclass JSONStoreDisk(ContentStore):\n def __init__(self, in_folder, key, msg, msg_args):\n self.in_folder = in_folder\n self.key = key\n self.msg = msg\n self.msg_args = msg_args + (self.file,)\n\n @property\n def file(self):\n return self.in_folder.path / \"{}.json\".format(self.key)\n\n def exists(self):\n return self.file.exists()\n\n def read(self):\n data, bad_format = None, False\n try:\n data = json.loads(self.file.read_text())\n logging.debug(\"got {} from %s\".format(self.msg), *self.msg_args)\n return data\n except ValueError:\n bad_format = True\n except Exception: # noqa\n pass\n if bad_format:\n self.remove()\n return None\n\n def remove(self):\n self.file.unlink()\n logging.debug(\"removed {} at %s\".format(self.msg), *self.msg_args)\n\n @contextmanager\n def locked(self):\n with self.in_folder.lock_for_key(self.key):\n yield\n\n def write(self, content):\n folder = self.file.parent\n try:\n folder.mkdir(parents=True, exist_ok=True)\n except OSError:\n pass\n self.file.write_text(ensure_text(json.dumps(content, sort_keys=True, indent=2)))\n logging.debug(\"wrote {} at %s\".format(self.msg), *self.msg_args)\n\n\nclass PyInfoStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, path):\n key = sha256(str(path).encode(\"utf-8\") if six.PY3 else str(path)).hexdigest()\n super(PyInfoStoreDisk, self).__init__(in_folder, key, \"python info of %s\", (path,))\n\n\nclass EmbedDistributionUpdateStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, distribution):\n super(EmbedDistributionUpdateStoreDisk, self).__init__(\n in_folder,\n distribution,\n \"embed update of distribution %s\",\n (distribution,),\n )\n", "path": "src/virtualenv/app_data/via_disk_folder.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nA rough layout of the current storage goes as:\n\nvirtualenv-app-data\n\u251c\u2500\u2500 py - <version> <cache information about python interpreters>\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 *.json/lock\n\u251c\u2500\u2500 wheel <cache wheels used for seeding>\n\u2502 \u00a0 \u251c\u2500\u2500 house\n\u2502 \u2502 \u2514\u2500\u2500 *.whl <wheels downloaded go here>\n\u2502 \u2514\u2500\u2500 <python major.minor> -> 3.9\n\u2502 \u251c\u2500\u2500 img-<version>\n\u2502 \u2502 \u2514\u2500\u2500 image\n\u2502 \u2502 \u2514\u2500\u2500 <install class> -> CopyPipInstall / SymlinkPipInstall\n\u2502 \u2502 \u2514\u2500\u2500 <wheel name> -> pip-20.1.1-py2.py3-none-any\n\u2502 \u2514\u2500\u2500 embed\n\u2502 \u2514\u2500\u2500 1\n\u2502 \u2514\u2500\u2500 *.json -> for every distribution contains data about newer embed versions and releases\n\u2514\u2500\u2500\u2500 unzip <in zip app we cannot refer to some internal files, so first extract them>\n \u2514\u2500\u2500 <virtualenv version>\n \u251c\u2500\u2500 py_info.py\n \u251c\u2500\u2500 debug.py\n \u2514\u2500\u2500 _virtualenv.py\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\nfrom abc import ABCMeta\nfrom contextlib import contextmanager\nfrom hashlib import sha256\n\nimport six\n\nfrom virtualenv.util.lock import ReentrantFileLock\nfrom virtualenv.util.path import safe_delete\nfrom virtualenv.util.six import ensure_text\nfrom virtualenv.util.zipapp import extract\nfrom virtualenv.version import __version__\n\nfrom .base import AppData, ContentStore\n\n\nclass AppDataDiskFolder(AppData):\n \"\"\"\n Store the application data on the disk within a folder layout.\n \"\"\"\n\n def __init__(self, folder):\n self.lock = ReentrantFileLock(folder)\n\n def __repr__(self):\n return \"{}\".format(self.lock.path)\n\n @property\n def transient(self):\n return False\n\n def reset(self):\n logging.debug(\"reset app data folder %s\", self.lock.path)\n safe_delete(self.lock.path)\n\n def close(self):\n \"\"\"do nothing\"\"\"\n\n @contextmanager\n def locked(self, path):\n path_lock = self.lock / path\n with path_lock:\n yield path_lock.path\n\n @contextmanager\n def extract(self, path, to_folder):\n if to_folder is not None:\n root = ReentrantFileLock(to_folder())\n else:\n root = self.lock / \"unzip\" / __version__\n with root.lock_for_key(path.name):\n dest = root.path / path.name\n if not dest.exists():\n extract(path, dest)\n yield dest\n\n @property\n def py_info_at(self):\n return self.lock / \"py_info\" / \"1\"\n\n def py_info(self, path):\n return PyInfoStoreDisk(self.py_info_at, path)\n\n def py_info_clear(self):\n \"\"\"\"\"\"\n py_info_folder = self.py_info_at\n with py_info_folder:\n for filename in py_info_folder.path.iterdir():\n if filename.suffix == \".json\":\n with py_info_folder.lock_for_key(filename.stem):\n if filename.exists():\n filename.unlink()\n\n def embed_update_log(self, distribution, for_py_version):\n return EmbedDistributionUpdateStoreDisk(self.lock / \"wheel\" / for_py_version / \"embed\" / \"1\", distribution)\n\n @property\n def house(self):\n path = self.lock.path / \"wheel\" / \"house\"\n path.mkdir(parents=True, exist_ok=True)\n return path\n\n def wheel_image(self, for_py_version, name):\n return self.lock.path / \"wheel\" / for_py_version / \"image\" / \"1\" / name\n\n\[email protected]_metaclass(ABCMeta)\nclass JSONStoreDisk(ContentStore):\n def __init__(self, in_folder, key, msg, msg_args):\n self.in_folder = in_folder\n self.key = key\n self.msg = msg\n self.msg_args = msg_args + (self.file,)\n\n @property\n def file(self):\n return self.in_folder.path / \"{}.json\".format(self.key)\n\n def exists(self):\n return self.file.exists()\n\n def read(self):\n data, bad_format = None, False\n try:\n data = json.loads(self.file.read_text())\n logging.debug(\"got {} from %s\".format(self.msg), *self.msg_args)\n return data\n except ValueError:\n bad_format = True\n except Exception: # noqa\n pass\n if bad_format:\n try:\n self.remove()\n except OSError: # reading and writing on the same file may cause race on multiple processes\n pass\n return None\n\n def remove(self):\n self.file.unlink()\n logging.debug(\"removed {} at %s\".format(self.msg), *self.msg_args)\n\n @contextmanager\n def locked(self):\n with self.in_folder.lock_for_key(self.key):\n yield\n\n def write(self, content):\n folder = self.file.parent\n try:\n folder.mkdir(parents=True, exist_ok=True)\n except OSError:\n pass\n self.file.write_text(ensure_text(json.dumps(content, sort_keys=True, indent=2)))\n logging.debug(\"wrote {} at %s\".format(self.msg), *self.msg_args)\n\n\nclass PyInfoStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, path):\n key = sha256(str(path).encode(\"utf-8\") if six.PY3 else str(path)).hexdigest()\n super(PyInfoStoreDisk, self).__init__(in_folder, key, \"python info of %s\", (path,))\n\n\nclass EmbedDistributionUpdateStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, distribution):\n super(EmbedDistributionUpdateStoreDisk, self).__init__(\n in_folder,\n distribution,\n \"embed update of distribution %s\",\n (distribution,),\n )\n", "path": "src/virtualenv/app_data/via_disk_folder.py"}]}
3,253
135
gh_patches_debug_28668
rasdani/github-patches
git_diff
dotkom__onlineweb4-1888
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot save user profile with an empty zip code ## What kind of an issue is this? - [x] Bug report ## What is the expected behaviour? That you can save the form and receive guidance without having to input a ZIP code. ## What is the current behaviour? If you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work. ## How do you reproduce this problem? Go to my profile, empty the ZIP code field and save. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/profiles/forms.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import re 3 4 from django import forms 5 from django.contrib import auth 6 from django.utils.translation import ugettext as _ 7 8 from apps.authentication.models import OnlineUser, Position 9 from apps.profiles.models import Privacy 10 11 12 class ProfileForm(forms.ModelForm): 13 class Meta(object): 14 model = OnlineUser 15 16 fields = [ 17 'nickname', 18 'website', 19 'phone_number', 20 'address', 21 'zip_code', 22 'allergies', 23 'compiled', 24 'bio', 25 'gender', 26 'github', 27 'linkedin' 28 ] 29 widgets = { 30 'allergies': forms.Textarea(attrs={'id': 'allergies'}), 31 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}), 32 'bio': forms.Textarea(attrs={'id': 'bio'}), 33 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}), 34 } 35 36 def clean(self): 37 super(ProfileForm, self).clean() 38 39 cleaned_data = self.cleaned_data 40 41 # ZIP code digits only 42 zip_code = cleaned_data['zip_code'] 43 if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code): 44 self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")]) 45 46 return cleaned_data 47 48 49 class PrivacyForm(forms.ModelForm): 50 class Meta(object): 51 model = Privacy 52 exclude = ['user', 'expose_nickname'] 53 54 55 class MailSettingsForm(forms.ModelForm): 56 class Meta(object): 57 model = OnlineUser 58 fields = ['infomail', ] 59 60 61 class PositionForm(forms.ModelForm): 62 class Meta(object): 63 model = Position 64 exclude = ['user'] 65 widgets = { 66 'committee': forms.Select(attrs={'class': 'form-control'}), 67 'position': forms.Select(attrs={'class': 'form-control'}), 68 } 69 70 def clean(self): 71 super(PositionForm, self).clean() 72 73 range_compiler = re.compile(r'\d{4}-\d{4}') 74 year_range = self.cleaned_data['period'] 75 76 # If it doesn't match the format YYYY-YYYY 77 if not range_compiler.match(year_range): 78 self._errors['period'] = self.error_class( 79 [_('Feil format. Dobbelsjekk at input er på formatet YYYY-YYYY.')] 80 ) 81 return self.cleaned_data 82 83 years = year_range.split('-') 84 85 # If somewhat they fucked up input, we don't want None-shit after the split. 86 if not years[0] or not years[1]: 87 self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')]) 88 return self.cleaned_data 89 90 # If first year is larger than latter, or the diff is more than one, fail. 91 if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1: 92 self._errors['period'] = self.error_class([_('Ikke gyldig års-intervall. Bare ett år er tillat.')]) 93 94 return self.cleaned_data 95 96 97 class MembershipSettingsForm(forms.ModelForm): 98 def __init__(self, *args, **kwargs): 99 super(MembershipSettingsForm, self).__init__(*args, **kwargs) 100 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker' 101 102 class Meta(object): 103 model = OnlineUser 104 fields = ['field_of_study', 'started_date'] 105 106 widgets = { 107 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}), 108 } 109 110 111 class InternalServicesForm(forms.Form): 112 ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Online passord")) 113 services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Ønsket service passord")) 114 current_user = None 115 116 def clean(self): 117 super(InternalServicesForm, self).clean() 118 if self.is_valid(): 119 cleaned_data = self.cleaned_data 120 121 # User object relation here 122 user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password']) 123 124 if user is None or user.id != self.current_user.id: 125 self._errors['ow4_password'] = self.error_class([_(u"Passordet er ikke korrekt.")]) 126 127 return cleaned_data 128 ``` Path: `apps/authentication/templatetags/gravatar_url_resolver.py` Content: ``` 1 import hashlib 2 import urllib 3 4 from django import template 5 from django.conf import settings 6 7 register = template.Library() 8 9 10 @register.assignment_tag(takes_context=True) 11 def gravatar_url(context, user, size): 12 prefix = "https://" if context['request'].is_secure() else "http://" 13 default = "%s%s%s_%s.png" % ( 14 prefix, 15 context['request'].META['HTTP_HOST'], 16 settings.DEFAULT_PROFILE_PICTURE_PREFIX, 17 user.gender 18 ) 19 20 grav_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.email.encode()).hexdigest() + "?" 21 grav_url += urllib.parse.urlencode({'d': default, 's': str(size)}) 22 23 return grav_url 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py --- a/apps/authentication/templatetags/gravatar_url_resolver.py +++ b/apps/authentication/templatetags/gravatar_url_resolver.py @@ -12,7 +12,7 @@ prefix = "https://" if context['request'].is_secure() else "http://" default = "%s%s%s_%s.png" % ( prefix, - context['request'].META['HTTP_HOST'], + context['request'].META.get('HTTP_HOST', 'localhost'), settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender ) diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py --- a/apps/profiles/forms.py +++ b/apps/profiles/forms.py @@ -8,6 +8,8 @@ from apps.authentication.models import OnlineUser, Position from apps.profiles.models import Privacy +ZIP_CODE_VALIDATION_ERROR = "Postnummer må bestå av fire siffer." + class ProfileForm(forms.ModelForm): class Meta(object): @@ -33,17 +35,10 @@ 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}), } - def clean(self): - super(ProfileForm, self).clean() - - cleaned_data = self.cleaned_data - - # ZIP code digits only - zip_code = cleaned_data['zip_code'] - if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code): - self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")]) - - return cleaned_data + def clean_zip_code(self): + zip_code = self.cleaned_data['zip_code'] + if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code): + self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR) class PrivacyForm(forms.ModelForm):
{"golden_diff": "diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py\n--- a/apps/authentication/templatetags/gravatar_url_resolver.py\n+++ b/apps/authentication/templatetags/gravatar_url_resolver.py\n@@ -12,7 +12,7 @@\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n- context['request'].META['HTTP_HOST'],\n+ context['request'].META.get('HTTP_HOST', 'localhost'),\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\ndiff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -8,6 +8,8 @@\n from apps.authentication.models import OnlineUser, Position\n from apps.profiles.models import Privacy\n \n+ZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n+\n \n class ProfileForm(forms.ModelForm):\n class Meta(object):\n@@ -33,17 +35,10 @@\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n \n- def clean(self):\n- super(ProfileForm, self).clean()\n-\n- cleaned_data = self.cleaned_data\n-\n- # ZIP code digits only\n- zip_code = cleaned_data['zip_code']\n- if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n- self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n-\n- return cleaned_data\n+ def clean_zip_code(self):\n+ zip_code = self.cleaned_data['zip_code']\n+ if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n+ self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n \n \n class PrivacyForm(forms.ModelForm):\n", "issue": "Cannot save user profile with an empty zip code\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n## What is the expected behaviour?\r\n\r\nThat you can save the form and receive guidance without having to input a ZIP code.\r\n\r\n## What is the current behaviour?\r\n\r\nIf you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work.\r\n\r\n## How do you reproduce this problem? \r\n\r\nGo to my profile, empty the ZIP code field and save.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}, {"content": "import hashlib\nimport urllib\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\n\n grav_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.email.encode()).hexdigest() + \"?\"\n grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n\n return grav_url\n", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}, {"content": "import hashlib\nimport urllib\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n context['request'].META.get('HTTP_HOST', 'localhost'),\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\n\n grav_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.email.encode()).hexdigest() + \"?\"\n grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n\n return grav_url\n", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}]}
1,829
439
gh_patches_debug_7146
rasdani/github-patches
git_diff
certbot__certbot-7009
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow the certbot_dns_rfc2136 algorithm specified in it's config file to be case insensitive. ## My operating system is (include version): ``` [root@host ~]# head -n4 /etc/os-release NAME=Fedora VERSION="29 (Server Edition)" ID=fedora VERSION_ID=29 ``` ## I installed Certbot with (certbot-auto, OS package manager, pip, etc): OS Package Manager ## I ran this command and it produced this output: RFC-2136 configuration file contents(Secret is random data and not valid): ``` dns_rfc2136_server = 1.2.3.4 dns_rfc2136_name = certbot dns_rfc2136_secret = Mjk0MjQzZTg1ODk5MTUyMWMyNjZkMjBhYWY1N2UzOGVkMGU1MmM1NDg1OTJiODVmYWY4M2M2N2Uy== dns_rfc2136_algorithm = hmac-sha512 ``` ``` [root@host ~]# certbot certonly --dns-rfc2136 --dns-rfc2136-credentials /etc/letsencrypt/rfc2136.ini --server https://acme-v02.api.letsencrypt.org/directory --email [email protected] --agree-tos --no-eff-email -d 'domain.tld' -d '*.domain.tld' Saving debug log to /var/log/letsencrypt/letsencrypt.log Plugins selected: Authenticator dns-rfc2136, Installer None Obtaining a new certificate Performing the following challenges: dns-01 challenge for domain.tld dns-01 challenge for domain.tld Cleaning up challenges Unknown algorithm: hmac-sha512. ``` ## Certbot's behavior differed from what I expected because: The plugins documentation does not specify that the algorithm name is expected to be all uppercase letters. This causes the command to fail when the plugins configuration file specifies the algorithm with the name provided in lowercase letters. This is due to the algorithm validation function checking if the specified algorithm name is found in the algorithm dictionary which has the keys named with all uppercase letters. ``` 30 ALGORITHMS = { 31 'HMAC-MD5': dns.tsig.HMAC_MD5, 32 'HMAC-SHA1': dns.tsig.HMAC_SHA1, 33 'HMAC-SHA224': dns.tsig.HMAC_SHA224, 34 'HMAC-SHA256': dns.tsig.HMAC_SHA256, 35 'HMAC-SHA384': dns.tsig.HMAC_SHA384, 36 'HMAC-SHA512': dns.tsig.HMAC_SHA512 37 } [...TRUNCATED...] 57 def _validate_algorithm(self, credentials): 58 algorithm = credentials.conf('algorithm') 59 if algorithm: 60 if not self.ALGORITHMS.get(algorithm): 61 raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm)) ``` This issue occurred for me due to copying and pasting the results of the `pdnsutil` output when generating the key and after looking at other utilities I couldn't find an example that only worked with the algorithm name being in all caps. I'm proposing allowing the configuration option to be valid regardless of the case of the letters provided in the value which can easily be done with the patch below. ``` diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py index f985c9bf..2061374e 100644 --- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py +++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py @@ -57,7 +57,7 @@ class Authenticator(dns_common.DNSAuthenticator): def _validate_algorithm(self, credentials): algorithm = credentials.conf('algorithm') if algorithm: - if not self.ALGORITHMS.get(algorithm): + if not self.ALGORITHMS.get(algorithm.upper()): raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm)) def _setup_credentials(self): ``` The change made wouldn't have any effect on existing users and would simply remove the strict casing requirement currently in place. I have a fork with this as well as an update to the unit test for this that I can submit a pull request for should there be any interest in the change. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py` Content: ``` 1 """DNS Authenticator using RFC 2136 Dynamic Updates.""" 2 import logging 3 4 import dns.flags 5 import dns.message 6 import dns.name 7 import dns.query 8 import dns.rdataclass 9 import dns.rdatatype 10 import dns.tsig 11 import dns.tsigkeyring 12 import dns.update 13 import zope.interface 14 15 from certbot import errors 16 from certbot import interfaces 17 from certbot.plugins import dns_common 18 19 logger = logging.getLogger(__name__) 20 21 22 @zope.interface.implementer(interfaces.IAuthenticator) 23 @zope.interface.provider(interfaces.IPluginFactory) 24 class Authenticator(dns_common.DNSAuthenticator): 25 """DNS Authenticator using RFC 2136 Dynamic Updates 26 27 This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge. 28 """ 29 30 ALGORITHMS = { 31 'HMAC-MD5': dns.tsig.HMAC_MD5, 32 'HMAC-SHA1': dns.tsig.HMAC_SHA1, 33 'HMAC-SHA224': dns.tsig.HMAC_SHA224, 34 'HMAC-SHA256': dns.tsig.HMAC_SHA256, 35 'HMAC-SHA384': dns.tsig.HMAC_SHA384, 36 'HMAC-SHA512': dns.tsig.HMAC_SHA512 37 } 38 39 PORT = 53 40 41 description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).' 42 ttl = 120 43 44 def __init__(self, *args, **kwargs): 45 super(Authenticator, self).__init__(*args, **kwargs) 46 self.credentials = None 47 48 @classmethod 49 def add_parser_arguments(cls, add): # pylint: disable=arguments-differ 50 super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60) 51 add('credentials', help='RFC 2136 credentials INI file.') 52 53 def more_info(self): # pylint: disable=missing-docstring,no-self-use 54 return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \ 55 'RFC 2136 Dynamic Updates.' 56 57 def _validate_algorithm(self, credentials): 58 algorithm = credentials.conf('algorithm') 59 if algorithm: 60 if not self.ALGORITHMS.get(algorithm): 61 raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm)) 62 63 def _setup_credentials(self): 64 self.credentials = self._configure_credentials( 65 'credentials', 66 'RFC 2136 credentials INI file', 67 { 68 'name': 'TSIG key name', 69 'secret': 'TSIG key secret', 70 'server': 'The target DNS server' 71 }, 72 self._validate_algorithm 73 ) 74 75 def _perform(self, _domain, validation_name, validation): 76 self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl) 77 78 def _cleanup(self, _domain, validation_name, validation): 79 self._get_rfc2136_client().del_txt_record(validation_name, validation) 80 81 def _get_rfc2136_client(self): 82 return _RFC2136Client(self.credentials.conf('server'), 83 int(self.credentials.conf('port') or self.PORT), 84 self.credentials.conf('name'), 85 self.credentials.conf('secret'), 86 self.ALGORITHMS.get(self.credentials.conf('algorithm'), 87 dns.tsig.HMAC_MD5)) 88 89 90 class _RFC2136Client(object): 91 """ 92 Encapsulates all communication with the target DNS server. 93 """ 94 def __init__(self, server, port, key_name, key_secret, key_algorithm): 95 self.server = server 96 self.port = port 97 self.keyring = dns.tsigkeyring.from_text({ 98 key_name: key_secret 99 }) 100 self.algorithm = key_algorithm 101 102 def add_txt_record(self, record_name, record_content, record_ttl): 103 """ 104 Add a TXT record using the supplied information. 105 106 :param str record_name: The record name (typically beginning with '_acme-challenge.'). 107 :param str record_content: The record content (typically the challenge validation). 108 :param int record_ttl: The record TTL (number of seconds that the record may be cached). 109 :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server 110 """ 111 112 domain = self._find_domain(record_name) 113 114 n = dns.name.from_text(record_name) 115 o = dns.name.from_text(domain) 116 rel = n.relativize(o) 117 118 update = dns.update.Update( 119 domain, 120 keyring=self.keyring, 121 keyalgorithm=self.algorithm) 122 update.add(rel, record_ttl, dns.rdatatype.TXT, record_content) 123 124 try: 125 response = dns.query.tcp(update, self.server, port=self.port) 126 except Exception as e: 127 raise errors.PluginError('Encountered error adding TXT record: {0}' 128 .format(e)) 129 rcode = response.rcode() 130 131 if rcode == dns.rcode.NOERROR: 132 logger.debug('Successfully added TXT record') 133 else: 134 raise errors.PluginError('Received response from server: {0}' 135 .format(dns.rcode.to_text(rcode))) 136 137 def del_txt_record(self, record_name, record_content): 138 """ 139 Delete a TXT record using the supplied information. 140 141 :param str record_name: The record name (typically beginning with '_acme-challenge.'). 142 :param str record_content: The record content (typically the challenge validation). 143 :param int record_ttl: The record TTL (number of seconds that the record may be cached). 144 :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server 145 """ 146 147 domain = self._find_domain(record_name) 148 149 n = dns.name.from_text(record_name) 150 o = dns.name.from_text(domain) 151 rel = n.relativize(o) 152 153 update = dns.update.Update( 154 domain, 155 keyring=self.keyring, 156 keyalgorithm=self.algorithm) 157 update.delete(rel, dns.rdatatype.TXT, record_content) 158 159 try: 160 response = dns.query.tcp(update, self.server, port=self.port) 161 except Exception as e: 162 raise errors.PluginError('Encountered error deleting TXT record: {0}' 163 .format(e)) 164 rcode = response.rcode() 165 166 if rcode == dns.rcode.NOERROR: 167 logger.debug('Successfully deleted TXT record') 168 else: 169 raise errors.PluginError('Received response from server: {0}' 170 .format(dns.rcode.to_text(rcode))) 171 172 def _find_domain(self, record_name): 173 """ 174 Find the closest domain with an SOA record for a given domain name. 175 176 :param str record_name: The record name for which to find the closest SOA record. 177 :returns: The domain, if found. 178 :rtype: str 179 :raises certbot.errors.PluginError: if no SOA record can be found. 180 """ 181 182 domain_name_guesses = dns_common.base_domain_name_guesses(record_name) 183 184 # Loop through until we find an authoritative SOA record 185 for guess in domain_name_guesses: 186 if self._query_soa(guess): 187 return guess 188 189 raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.' 190 .format(record_name, domain_name_guesses)) 191 192 def _query_soa(self, domain_name): 193 """ 194 Query a domain name for an authoritative SOA record. 195 196 :param str domain_name: The domain name to query for an SOA record. 197 :returns: True if found, False otherwise. 198 :rtype: bool 199 :raises certbot.errors.PluginError: if no response is received. 200 """ 201 202 domain = dns.name.from_text(domain_name) 203 204 request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN) 205 # Turn off Recursion Desired bit in query 206 request.flags ^= dns.flags.RD 207 208 try: 209 response = dns.query.udp(request, self.server, port=self.port) 210 rcode = response.rcode() 211 212 # Authoritative Answer bit should be set 213 if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer, 214 domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA): 215 logger.debug('Received authoritative SOA response for %s', domain_name) 216 return True 217 218 logger.debug('No authoritative SOA record found for %s', domain_name) 219 return False 220 except Exception as e: 221 raise errors.PluginError('Encountered error when making query: {0}' 222 .format(e)) 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py --- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py +++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py @@ -57,7 +57,7 @@ def _validate_algorithm(self, credentials): algorithm = credentials.conf('algorithm') if algorithm: - if not self.ALGORITHMS.get(algorithm): + if not self.ALGORITHMS.get(algorithm.upper()): raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm)) def _setup_credentials(self):
{"golden_diff": "diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\n--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\n+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\n@@ -57,7 +57,7 @@\n def _validate_algorithm(self, credentials):\n algorithm = credentials.conf('algorithm')\n if algorithm:\n- if not self.ALGORITHMS.get(algorithm):\n+ if not self.ALGORITHMS.get(algorithm.upper()):\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\n \n def _setup_credentials(self):\n", "issue": "Allow the certbot_dns_rfc2136 algorithm specified in it's config file to be case insensitive.\n## My operating system is (include version):\r\n\r\n```\r\n[root@host ~]# head -n4 /etc/os-release\r\nNAME=Fedora\r\nVERSION=\"29 (Server Edition)\"\r\nID=fedora\r\nVERSION_ID=29\r\n```\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\nOS Package Manager\r\n\r\n## I ran this command and it produced this output:\r\nRFC-2136 configuration file contents(Secret is random data and not valid):\r\n```\r\ndns_rfc2136_server = 1.2.3.4\r\ndns_rfc2136_name = certbot\r\ndns_rfc2136_secret = \r\nMjk0MjQzZTg1ODk5MTUyMWMyNjZkMjBhYWY1N2UzOGVkMGU1MmM1NDg1OTJiODVmYWY4M2M2N2Uy==\r\ndns_rfc2136_algorithm = hmac-sha512\r\n```\r\n\r\n```\r\n[root@host ~]# certbot certonly --dns-rfc2136 --dns-rfc2136-credentials /etc/letsencrypt/rfc2136.ini --server https://acme-v02.api.letsencrypt.org/directory --email [email protected] --agree-tos --no-eff-email -d 'domain.tld' -d '*.domain.tld'\r\nSaving debug log to /var/log/letsencrypt/letsencrypt.log\r\nPlugins selected: Authenticator dns-rfc2136, Installer None\r\nObtaining a new certificate\r\nPerforming the following challenges:\r\ndns-01 challenge for domain.tld\r\ndns-01 challenge for domain.tld\r\nCleaning up challenges\r\nUnknown algorithm: hmac-sha512.\r\n```\r\n\r\n## Certbot's behavior differed from what I expected because:\r\nThe plugins documentation does not specify that the algorithm name is expected to be all uppercase letters. This causes the command to fail when the plugins configuration file specifies the algorithm with the name provided in lowercase letters.\r\n\r\nThis is due to the algorithm validation function checking if the specified algorithm name is found in the algorithm dictionary which has the keys named with all uppercase letters.\r\n\r\n```\r\n 30 ALGORITHMS = {\r\n 31 'HMAC-MD5': dns.tsig.HMAC_MD5,\r\n 32 'HMAC-SHA1': dns.tsig.HMAC_SHA1,\r\n 33 'HMAC-SHA224': dns.tsig.HMAC_SHA224,\r\n 34 'HMAC-SHA256': dns.tsig.HMAC_SHA256,\r\n 35 'HMAC-SHA384': dns.tsig.HMAC_SHA384,\r\n 36 'HMAC-SHA512': dns.tsig.HMAC_SHA512\r\n 37 }\r\n\r\n[...TRUNCATED...]\r\n\r\n 57 def _validate_algorithm(self, credentials):\r\n 58 algorithm = credentials.conf('algorithm')\r\n 59 if algorithm:\r\n 60 if not self.ALGORITHMS.get(algorithm):\r\n 61 raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\r\n```\r\n\r\nThis issue occurred for me due to copying and pasting the results of the `pdnsutil` output when generating the key and after looking at other utilities I couldn't find an example that only worked with the algorithm name being in all caps.\r\n\r\nI'm proposing allowing the configuration option to be valid regardless of the case of the letters provided in the value which can easily be done with the patch below.\r\n\r\n```\r\ndiff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\r\nindex f985c9bf..2061374e 100644\r\n--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\r\n+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py\r\n@@ -57,7 +57,7 @@ class Authenticator(dns_common.DNSAuthenticator):\r\n def _validate_algorithm(self, credentials):\r\n algorithm = credentials.conf('algorithm')\r\n if algorithm:\r\n- if not self.ALGORITHMS.get(algorithm):\r\n+ if not self.ALGORITHMS.get(algorithm.upper()):\r\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\r\n\r\n def _setup_credentials(self):\r\n```\r\n\r\nThe change made wouldn't have any effect on existing users and would simply remove the strict casing requirement currently in place.\r\n\r\nI have a fork with this as well as an update to the unit test for this that I can submit a pull request for should there be any interest in the change.\r\n\n", "before_files": [{"content": "\"\"\"DNS Authenticator using RFC 2136 Dynamic Updates.\"\"\"\nimport logging\n\nimport dns.flags\nimport dns.message\nimport dns.name\nimport dns.query\nimport dns.rdataclass\nimport dns.rdatatype\nimport dns.tsig\nimport dns.tsigkeyring\nimport dns.update\nimport zope.interface\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.plugins import dns_common\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"DNS Authenticator using RFC 2136 Dynamic Updates\n\n This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge.\n \"\"\"\n\n ALGORITHMS = {\n 'HMAC-MD5': dns.tsig.HMAC_MD5,\n 'HMAC-SHA1': dns.tsig.HMAC_SHA1,\n 'HMAC-SHA224': dns.tsig.HMAC_SHA224,\n 'HMAC-SHA256': dns.tsig.HMAC_SHA256,\n 'HMAC-SHA384': dns.tsig.HMAC_SHA384,\n 'HMAC-SHA512': dns.tsig.HMAC_SHA512\n }\n\n PORT = 53\n\n description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'\n ttl = 120\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.credentials = None\n\n @classmethod\n def add_parser_arguments(cls, add): # pylint: disable=arguments-differ\n super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)\n add('credentials', help='RFC 2136 credentials INI file.')\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \\\n 'RFC 2136 Dynamic Updates.'\n\n def _validate_algorithm(self, credentials):\n algorithm = credentials.conf('algorithm')\n if algorithm:\n if not self.ALGORITHMS.get(algorithm):\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\n\n def _setup_credentials(self):\n self.credentials = self._configure_credentials(\n 'credentials',\n 'RFC 2136 credentials INI file',\n {\n 'name': 'TSIG key name',\n 'secret': 'TSIG key secret',\n 'server': 'The target DNS server'\n },\n self._validate_algorithm\n )\n\n def _perform(self, _domain, validation_name, validation):\n self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)\n\n def _cleanup(self, _domain, validation_name, validation):\n self._get_rfc2136_client().del_txt_record(validation_name, validation)\n\n def _get_rfc2136_client(self):\n return _RFC2136Client(self.credentials.conf('server'),\n int(self.credentials.conf('port') or self.PORT),\n self.credentials.conf('name'),\n self.credentials.conf('secret'),\n self.ALGORITHMS.get(self.credentials.conf('algorithm'),\n dns.tsig.HMAC_MD5))\n\n\nclass _RFC2136Client(object):\n \"\"\"\n Encapsulates all communication with the target DNS server.\n \"\"\"\n def __init__(self, server, port, key_name, key_secret, key_algorithm):\n self.server = server\n self.port = port\n self.keyring = dns.tsigkeyring.from_text({\n key_name: key_secret\n })\n self.algorithm = key_algorithm\n\n def add_txt_record(self, record_name, record_content, record_ttl):\n \"\"\"\n Add a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully added TXT record')\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def del_txt_record(self, record_name, record_content):\n \"\"\"\n Delete a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.delete(rel, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error deleting TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully deleted TXT record')\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def _find_domain(self, record_name):\n \"\"\"\n Find the closest domain with an SOA record for a given domain name.\n\n :param str record_name: The record name for which to find the closest SOA record.\n :returns: The domain, if found.\n :rtype: str\n :raises certbot.errors.PluginError: if no SOA record can be found.\n \"\"\"\n\n domain_name_guesses = dns_common.base_domain_name_guesses(record_name)\n\n # Loop through until we find an authoritative SOA record\n for guess in domain_name_guesses:\n if self._query_soa(guess):\n return guess\n\n raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'\n .format(record_name, domain_name_guesses))\n\n def _query_soa(self, domain_name):\n \"\"\"\n Query a domain name for an authoritative SOA record.\n\n :param str domain_name: The domain name to query for an SOA record.\n :returns: True if found, False otherwise.\n :rtype: bool\n :raises certbot.errors.PluginError: if no response is received.\n \"\"\"\n\n domain = dns.name.from_text(domain_name)\n\n request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)\n # Turn off Recursion Desired bit in query\n request.flags ^= dns.flags.RD\n\n try:\n response = dns.query.udp(request, self.server, port=self.port)\n rcode = response.rcode()\n\n # Authoritative Answer bit should be set\n if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer,\n domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA):\n logger.debug('Received authoritative SOA response for %s', domain_name)\n return True\n\n logger.debug('No authoritative SOA record found for %s', domain_name)\n return False\n except Exception as e:\n raise errors.PluginError('Encountered error when making query: {0}'\n .format(e))\n", "path": "certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py"}], "after_files": [{"content": "\"\"\"DNS Authenticator using RFC 2136 Dynamic Updates.\"\"\"\nimport logging\n\nimport dns.flags\nimport dns.message\nimport dns.name\nimport dns.query\nimport dns.rdataclass\nimport dns.rdatatype\nimport dns.tsig\nimport dns.tsigkeyring\nimport dns.update\nimport zope.interface\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.plugins import dns_common\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"DNS Authenticator using RFC 2136 Dynamic Updates\n\n This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge.\n \"\"\"\n\n ALGORITHMS = {\n 'HMAC-MD5': dns.tsig.HMAC_MD5,\n 'HMAC-SHA1': dns.tsig.HMAC_SHA1,\n 'HMAC-SHA224': dns.tsig.HMAC_SHA224,\n 'HMAC-SHA256': dns.tsig.HMAC_SHA256,\n 'HMAC-SHA384': dns.tsig.HMAC_SHA384,\n 'HMAC-SHA512': dns.tsig.HMAC_SHA512\n }\n\n PORT = 53\n\n description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'\n ttl = 120\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.credentials = None\n\n @classmethod\n def add_parser_arguments(cls, add): # pylint: disable=arguments-differ\n super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)\n add('credentials', help='RFC 2136 credentials INI file.')\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \\\n 'RFC 2136 Dynamic Updates.'\n\n def _validate_algorithm(self, credentials):\n algorithm = credentials.conf('algorithm')\n if algorithm:\n if not self.ALGORITHMS.get(algorithm.upper()):\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\n\n def _setup_credentials(self):\n self.credentials = self._configure_credentials(\n 'credentials',\n 'RFC 2136 credentials INI file',\n {\n 'name': 'TSIG key name',\n 'secret': 'TSIG key secret',\n 'server': 'The target DNS server'\n },\n self._validate_algorithm\n )\n\n def _perform(self, _domain, validation_name, validation):\n self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)\n\n def _cleanup(self, _domain, validation_name, validation):\n self._get_rfc2136_client().del_txt_record(validation_name, validation)\n\n def _get_rfc2136_client(self):\n return _RFC2136Client(self.credentials.conf('server'),\n int(self.credentials.conf('port') or self.PORT),\n self.credentials.conf('name'),\n self.credentials.conf('secret'),\n self.ALGORITHMS.get(self.credentials.conf('algorithm'),\n dns.tsig.HMAC_MD5))\n\n\nclass _RFC2136Client(object):\n \"\"\"\n Encapsulates all communication with the target DNS server.\n \"\"\"\n def __init__(self, server, port, key_name, key_secret, key_algorithm):\n self.server = server\n self.port = port\n self.keyring = dns.tsigkeyring.from_text({\n key_name: key_secret\n })\n self.algorithm = key_algorithm\n\n def add_txt_record(self, record_name, record_content, record_ttl):\n \"\"\"\n Add a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully added TXT record')\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def del_txt_record(self, record_name, record_content):\n \"\"\"\n Delete a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.delete(rel, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error deleting TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully deleted TXT record')\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def _find_domain(self, record_name):\n \"\"\"\n Find the closest domain with an SOA record for a given domain name.\n\n :param str record_name: The record name for which to find the closest SOA record.\n :returns: The domain, if found.\n :rtype: str\n :raises certbot.errors.PluginError: if no SOA record can be found.\n \"\"\"\n\n domain_name_guesses = dns_common.base_domain_name_guesses(record_name)\n\n # Loop through until we find an authoritative SOA record\n for guess in domain_name_guesses:\n if self._query_soa(guess):\n return guess\n\n raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'\n .format(record_name, domain_name_guesses))\n\n def _query_soa(self, domain_name):\n \"\"\"\n Query a domain name for an authoritative SOA record.\n\n :param str domain_name: The domain name to query for an SOA record.\n :returns: True if found, False otherwise.\n :rtype: bool\n :raises certbot.errors.PluginError: if no response is received.\n \"\"\"\n\n domain = dns.name.from_text(domain_name)\n\n request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)\n # Turn off Recursion Desired bit in query\n request.flags ^= dns.flags.RD\n\n try:\n response = dns.query.udp(request, self.server, port=self.port)\n rcode = response.rcode()\n\n # Authoritative Answer bit should be set\n if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer,\n domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA):\n logger.debug('Received authoritative SOA response for %s', domain_name)\n return True\n\n logger.debug('No authoritative SOA record found for %s', domain_name)\n return False\n except Exception as e:\n raise errors.PluginError('Encountered error when making query: {0}'\n .format(e))\n", "path": "certbot-dns-rfc2136/certbot_dns_rfc2136/dns_rfc2136.py"}]}
3,847
215
gh_patches_debug_33077
rasdani/github-patches
git_diff
certbot__certbot-8898
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- apache authenticator should authenticate all duplicate vhosts See https://community.letsencrypt.org/t/cant-renew-certificate/153274/3?u=_az. I've seen this a few other times as well. The Apache authenticator needs to authenticate all of the virtual hosts which could match the requested domain, including duplicates and HTTPS ones. #6729 fixed this for the most part, but there are seemingly still some edge cases left over. Let's investigate and fix those. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `certbot-apache/certbot_apache/_internal/http_01.py` Content: ``` 1 """A class that performs HTTP-01 challenges for Apache""" 2 import errno 3 import logging 4 from typing import List 5 from typing import Set 6 7 from certbot import errors 8 from certbot.compat import filesystem 9 from certbot.compat import os 10 from certbot.plugins import common 11 from certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import 12 from certbot_apache._internal.parser import get_aug_path 13 14 logger = logging.getLogger(__name__) 15 16 17 class ApacheHttp01(common.ChallengePerformer): 18 """Class that performs HTTP-01 challenges within the Apache configurator.""" 19 20 CONFIG_TEMPLATE22_PRE = """\ 21 RewriteEngine on 22 RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L] 23 24 """ 25 CONFIG_TEMPLATE22_POST = """\ 26 <Directory {0}> 27 Order Allow,Deny 28 Allow from all 29 </Directory> 30 <Location /.well-known/acme-challenge> 31 Order Allow,Deny 32 Allow from all 33 </Location> 34 """ 35 36 CONFIG_TEMPLATE24_PRE = """\ 37 RewriteEngine on 38 RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END] 39 """ 40 CONFIG_TEMPLATE24_POST = """\ 41 <Directory {0}> 42 Require all granted 43 </Directory> 44 <Location /.well-known/acme-challenge> 45 Require all granted 46 </Location> 47 """ 48 49 def __init__(self, *args, **kwargs): 50 super().__init__(*args, **kwargs) 51 self.challenge_conf_pre = os.path.join( 52 self.configurator.conf("challenge-location"), 53 "le_http_01_challenge_pre.conf") 54 self.challenge_conf_post = os.path.join( 55 self.configurator.conf("challenge-location"), 56 "le_http_01_challenge_post.conf") 57 self.challenge_dir = os.path.join( 58 self.configurator.config.work_dir, 59 "http_challenges") 60 self.moded_vhosts: Set[VirtualHost] = set() 61 62 def perform(self): 63 """Perform all HTTP-01 challenges.""" 64 if not self.achalls: 65 return [] 66 # Save any changes to the configuration as a precaution 67 # About to make temporary changes to the config 68 self.configurator.save("Changes before challenge setup", True) 69 70 self.configurator.ensure_listen(str( 71 self.configurator.config.http01_port)) 72 self.prepare_http01_modules() 73 74 responses = self._set_up_challenges() 75 76 self._mod_config() 77 # Save reversible changes 78 self.configurator.save("HTTP Challenge", True) 79 80 return responses 81 82 def prepare_http01_modules(self): 83 """Make sure that we have the needed modules available for http01""" 84 85 if self.configurator.conf("handle-modules"): 86 needed_modules = ["rewrite"] 87 if self.configurator.version < (2, 4): 88 needed_modules.append("authz_host") 89 else: 90 needed_modules.append("authz_core") 91 for mod in needed_modules: 92 if mod + "_module" not in self.configurator.parser.modules: 93 self.configurator.enable_mod(mod, temp=True) 94 95 def _mod_config(self): 96 selected_vhosts: List[VirtualHost] = [] 97 http_port = str(self.configurator.config.http01_port) 98 for chall in self.achalls: 99 # Search for matching VirtualHosts 100 for vh in self._matching_vhosts(chall.domain): 101 selected_vhosts.append(vh) 102 103 # Ensure that we have one or more VirtualHosts that we can continue 104 # with. (one that listens to port configured with --http-01-port) 105 found = False 106 for vhost in selected_vhosts: 107 if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs): 108 found = True 109 110 if not found: 111 for vh in self._relevant_vhosts(): 112 selected_vhosts.append(vh) 113 114 # Add the challenge configuration 115 for vh in selected_vhosts: 116 self._set_up_include_directives(vh) 117 118 self.configurator.reverter.register_file_creation( 119 True, self.challenge_conf_pre) 120 self.configurator.reverter.register_file_creation( 121 True, self.challenge_conf_post) 122 123 if self.configurator.version < (2, 4): 124 config_template_pre = self.CONFIG_TEMPLATE22_PRE 125 config_template_post = self.CONFIG_TEMPLATE22_POST 126 else: 127 config_template_pre = self.CONFIG_TEMPLATE24_PRE 128 config_template_post = self.CONFIG_TEMPLATE24_POST 129 130 config_text_pre = config_template_pre.format(self.challenge_dir) 131 config_text_post = config_template_post.format(self.challenge_dir) 132 133 logger.debug("writing a pre config file with text:\n %s", config_text_pre) 134 with open(self.challenge_conf_pre, "w") as new_conf: 135 new_conf.write(config_text_pre) 136 logger.debug("writing a post config file with text:\n %s", config_text_post) 137 with open(self.challenge_conf_post, "w") as new_conf: 138 new_conf.write(config_text_post) 139 140 def _matching_vhosts(self, domain): 141 """Return all VirtualHost objects that have the requested domain name or 142 a wildcard name that would match the domain in ServerName or ServerAlias 143 directive. 144 """ 145 matching_vhosts = [] 146 for vhost in self.configurator.vhosts: 147 if self.configurator.domain_in_names(vhost.get_names(), domain): 148 # domain_in_names also matches the exact names, so no need 149 # to check "domain in vhost.get_names()" explicitly here 150 matching_vhosts.append(vhost) 151 152 return matching_vhosts 153 154 def _relevant_vhosts(self): 155 http01_port = str(self.configurator.config.http01_port) 156 relevant_vhosts = [] 157 for vhost in self.configurator.vhosts: 158 if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs): 159 if not vhost.ssl: 160 relevant_vhosts.append(vhost) 161 if not relevant_vhosts: 162 raise errors.PluginError( 163 "Unable to find a virtual host listening on port {0} which is" 164 " currently needed for Certbot to prove to the CA that you" 165 " control your domain. Please add a virtual host for port" 166 " {0}.".format(http01_port)) 167 168 return relevant_vhosts 169 170 def _set_up_challenges(self): 171 if not os.path.isdir(self.challenge_dir): 172 old_umask = filesystem.umask(0o022) 173 try: 174 filesystem.makedirs(self.challenge_dir, 0o755) 175 except OSError as exception: 176 if exception.errno not in (errno.EEXIST, errno.EISDIR): 177 raise errors.PluginError( 178 "Couldn't create root for http-01 challenge") 179 finally: 180 filesystem.umask(old_umask) 181 182 responses = [] 183 for achall in self.achalls: 184 responses.append(self._set_up_challenge(achall)) 185 186 return responses 187 188 def _set_up_challenge(self, achall): 189 response, validation = achall.response_and_validation() 190 191 name = os.path.join(self.challenge_dir, achall.chall.encode("token")) 192 193 self.configurator.reverter.register_file_creation(True, name) 194 with open(name, 'wb') as f: 195 f.write(validation.encode()) 196 filesystem.chmod(name, 0o644) 197 198 return response 199 200 def _set_up_include_directives(self, vhost): 201 """Includes override configuration to the beginning and to the end of 202 VirtualHost. Note that this include isn't added to Augeas search tree""" 203 204 if vhost not in self.moded_vhosts: 205 logger.debug( 206 "Adding a temporary challenge validation Include for name: %s in: %s", 207 vhost.name, vhost.filep) 208 self.configurator.parser.add_dir_beginning( 209 vhost.path, "Include", self.challenge_conf_pre) 210 self.configurator.parser.add_dir( 211 vhost.path, "Include", self.challenge_conf_post) 212 213 if not vhost.enabled: 214 self.configurator.parser.add_dir( 215 get_aug_path(self.configurator.parser.loc["default"]), 216 "Include", vhost.filep) 217 218 self.moded_vhosts.add(vhost) 219 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/certbot-apache/certbot_apache/_internal/http_01.py b/certbot-apache/certbot_apache/_internal/http_01.py --- a/certbot-apache/certbot_apache/_internal/http_01.py +++ b/certbot-apache/certbot_apache/_internal/http_01.py @@ -95,10 +95,10 @@ def _mod_config(self): selected_vhosts: List[VirtualHost] = [] http_port = str(self.configurator.config.http01_port) + + # Search for VirtualHosts matching by name for chall in self.achalls: - # Search for matching VirtualHosts - for vh in self._matching_vhosts(chall.domain): - selected_vhosts.append(vh) + selected_vhosts += self._matching_vhosts(chall.domain) # Ensure that we have one or more VirtualHosts that we can continue # with. (one that listens to port configured with --http-01-port) @@ -107,9 +107,13 @@ if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs): found = True - if not found: - for vh in self._relevant_vhosts(): - selected_vhosts.append(vh) + # If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts + # because they might match at runtime (#8890) + if found: + selected_vhosts += self._unnamed_vhosts() + # Otherwise, add every Virtualhost which listens on the right port + else: + selected_vhosts += self._relevant_vhosts() # Add the challenge configuration for vh in selected_vhosts: @@ -167,6 +171,10 @@ return relevant_vhosts + def _unnamed_vhosts(self) -> List[VirtualHost]: + """Return all VirtualHost objects with no ServerName""" + return [vh for vh in self.configurator.vhosts if vh.name is None] + def _set_up_challenges(self): if not os.path.isdir(self.challenge_dir): old_umask = filesystem.umask(0o022)
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/_internal/http_01.py b/certbot-apache/certbot_apache/_internal/http_01.py\n--- a/certbot-apache/certbot_apache/_internal/http_01.py\n+++ b/certbot-apache/certbot_apache/_internal/http_01.py\n@@ -95,10 +95,10 @@\n def _mod_config(self):\n selected_vhosts: List[VirtualHost] = []\n http_port = str(self.configurator.config.http01_port)\n+\n+ # Search for VirtualHosts matching by name\n for chall in self.achalls:\n- # Search for matching VirtualHosts\n- for vh in self._matching_vhosts(chall.domain):\n- selected_vhosts.append(vh)\n+ selected_vhosts += self._matching_vhosts(chall.domain)\n \n # Ensure that we have one or more VirtualHosts that we can continue\n # with. (one that listens to port configured with --http-01-port)\n@@ -107,9 +107,13 @@\n if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):\n found = True\n \n- if not found:\n- for vh in self._relevant_vhosts():\n- selected_vhosts.append(vh)\n+ # If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts\n+ # because they might match at runtime (#8890)\n+ if found:\n+ selected_vhosts += self._unnamed_vhosts()\n+ # Otherwise, add every Virtualhost which listens on the right port\n+ else:\n+ selected_vhosts += self._relevant_vhosts()\n \n # Add the challenge configuration\n for vh in selected_vhosts:\n@@ -167,6 +171,10 @@\n \n return relevant_vhosts\n \n+ def _unnamed_vhosts(self) -> List[VirtualHost]:\n+ \"\"\"Return all VirtualHost objects with no ServerName\"\"\"\n+ return [vh for vh in self.configurator.vhosts if vh.name is None]\n+\n def _set_up_challenges(self):\n if not os.path.isdir(self.challenge_dir):\n old_umask = filesystem.umask(0o022)\n", "issue": "apache authenticator should authenticate all duplicate vhosts\nSee https://community.letsencrypt.org/t/cant-renew-certificate/153274/3?u=_az. I've seen this a few other times as well.\r\n\r\nThe Apache authenticator needs to authenticate all of the virtual hosts which could match the requested domain, including duplicates and HTTPS ones.\r\n\r\n#6729 fixed this for the most part, but there are seemingly still some edge cases left over.\r\n\r\nLet's investigate and fix those.\n", "before_files": [{"content": "\"\"\"A class that performs HTTP-01 challenges for Apache\"\"\"\nimport errno\nimport logging\nfrom typing import List\nfrom typing import Set\n\nfrom certbot import errors\nfrom certbot.compat import filesystem\nfrom certbot.compat import os\nfrom certbot.plugins import common\nfrom certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import\nfrom certbot_apache._internal.parser import get_aug_path\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApacheHttp01(common.ChallengePerformer):\n \"\"\"Class that performs HTTP-01 challenges within the Apache configurator.\"\"\"\n\n CONFIG_TEMPLATE22_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]\n\n \"\"\"\n CONFIG_TEMPLATE22_POST = \"\"\"\\\n <Directory {0}>\n Order Allow,Deny\n Allow from all\n </Directory>\n <Location /.well-known/acme-challenge>\n Order Allow,Deny\n Allow from all\n </Location>\n \"\"\"\n\n CONFIG_TEMPLATE24_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]\n \"\"\"\n CONFIG_TEMPLATE24_POST = \"\"\"\\\n <Directory {0}>\n Require all granted\n </Directory>\n <Location /.well-known/acme-challenge>\n Require all granted\n </Location>\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.challenge_conf_pre = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_pre.conf\")\n self.challenge_conf_post = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_post.conf\")\n self.challenge_dir = os.path.join(\n self.configurator.config.work_dir,\n \"http_challenges\")\n self.moded_vhosts: Set[VirtualHost] = set()\n\n def perform(self):\n \"\"\"Perform all HTTP-01 challenges.\"\"\"\n if not self.achalls:\n return []\n # Save any changes to the configuration as a precaution\n # About to make temporary changes to the config\n self.configurator.save(\"Changes before challenge setup\", True)\n\n self.configurator.ensure_listen(str(\n self.configurator.config.http01_port))\n self.prepare_http01_modules()\n\n responses = self._set_up_challenges()\n\n self._mod_config()\n # Save reversible changes\n self.configurator.save(\"HTTP Challenge\", True)\n\n return responses\n\n def prepare_http01_modules(self):\n \"\"\"Make sure that we have the needed modules available for http01\"\"\"\n\n if self.configurator.conf(\"handle-modules\"):\n needed_modules = [\"rewrite\"]\n if self.configurator.version < (2, 4):\n needed_modules.append(\"authz_host\")\n else:\n needed_modules.append(\"authz_core\")\n for mod in needed_modules:\n if mod + \"_module\" not in self.configurator.parser.modules:\n self.configurator.enable_mod(mod, temp=True)\n\n def _mod_config(self):\n selected_vhosts: List[VirtualHost] = []\n http_port = str(self.configurator.config.http01_port)\n for chall in self.achalls:\n # Search for matching VirtualHosts\n for vh in self._matching_vhosts(chall.domain):\n selected_vhosts.append(vh)\n\n # Ensure that we have one or more VirtualHosts that we can continue\n # with. (one that listens to port configured with --http-01-port)\n found = False\n for vhost in selected_vhosts:\n if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):\n found = True\n\n if not found:\n for vh in self._relevant_vhosts():\n selected_vhosts.append(vh)\n\n # Add the challenge configuration\n for vh in selected_vhosts:\n self._set_up_include_directives(vh)\n\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_pre)\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_post)\n\n if self.configurator.version < (2, 4):\n config_template_pre = self.CONFIG_TEMPLATE22_PRE\n config_template_post = self.CONFIG_TEMPLATE22_POST\n else:\n config_template_pre = self.CONFIG_TEMPLATE24_PRE\n config_template_post = self.CONFIG_TEMPLATE24_POST\n\n config_text_pre = config_template_pre.format(self.challenge_dir)\n config_text_post = config_template_post.format(self.challenge_dir)\n\n logger.debug(\"writing a pre config file with text:\\n %s\", config_text_pre)\n with open(self.challenge_conf_pre, \"w\") as new_conf:\n new_conf.write(config_text_pre)\n logger.debug(\"writing a post config file with text:\\n %s\", config_text_post)\n with open(self.challenge_conf_post, \"w\") as new_conf:\n new_conf.write(config_text_post)\n\n def _matching_vhosts(self, domain):\n \"\"\"Return all VirtualHost objects that have the requested domain name or\n a wildcard name that would match the domain in ServerName or ServerAlias\n directive.\n \"\"\"\n matching_vhosts = []\n for vhost in self.configurator.vhosts:\n if self.configurator.domain_in_names(vhost.get_names(), domain):\n # domain_in_names also matches the exact names, so no need\n # to check \"domain in vhost.get_names()\" explicitly here\n matching_vhosts.append(vhost)\n\n return matching_vhosts\n\n def _relevant_vhosts(self):\n http01_port = str(self.configurator.config.http01_port)\n relevant_vhosts = []\n for vhost in self.configurator.vhosts:\n if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):\n if not vhost.ssl:\n relevant_vhosts.append(vhost)\n if not relevant_vhosts:\n raise errors.PluginError(\n \"Unable to find a virtual host listening on port {0} which is\"\n \" currently needed for Certbot to prove to the CA that you\"\n \" control your domain. Please add a virtual host for port\"\n \" {0}.\".format(http01_port))\n\n return relevant_vhosts\n\n def _set_up_challenges(self):\n if not os.path.isdir(self.challenge_dir):\n old_umask = filesystem.umask(0o022)\n try:\n filesystem.makedirs(self.challenge_dir, 0o755)\n except OSError as exception:\n if exception.errno not in (errno.EEXIST, errno.EISDIR):\n raise errors.PluginError(\n \"Couldn't create root for http-01 challenge\")\n finally:\n filesystem.umask(old_umask)\n\n responses = []\n for achall in self.achalls:\n responses.append(self._set_up_challenge(achall))\n\n return responses\n\n def _set_up_challenge(self, achall):\n response, validation = achall.response_and_validation()\n\n name = os.path.join(self.challenge_dir, achall.chall.encode(\"token\"))\n\n self.configurator.reverter.register_file_creation(True, name)\n with open(name, 'wb') as f:\n f.write(validation.encode())\n filesystem.chmod(name, 0o644)\n\n return response\n\n def _set_up_include_directives(self, vhost):\n \"\"\"Includes override configuration to the beginning and to the end of\n VirtualHost. Note that this include isn't added to Augeas search tree\"\"\"\n\n if vhost not in self.moded_vhosts:\n logger.debug(\n \"Adding a temporary challenge validation Include for name: %s in: %s\",\n vhost.name, vhost.filep)\n self.configurator.parser.add_dir_beginning(\n vhost.path, \"Include\", self.challenge_conf_pre)\n self.configurator.parser.add_dir(\n vhost.path, \"Include\", self.challenge_conf_post)\n\n if not vhost.enabled:\n self.configurator.parser.add_dir(\n get_aug_path(self.configurator.parser.loc[\"default\"]),\n \"Include\", vhost.filep)\n\n self.moded_vhosts.add(vhost)\n", "path": "certbot-apache/certbot_apache/_internal/http_01.py"}], "after_files": [{"content": "\"\"\"A class that performs HTTP-01 challenges for Apache\"\"\"\nimport errno\nimport logging\nfrom typing import List\nfrom typing import Set\n\nfrom certbot import errors\nfrom certbot.compat import filesystem\nfrom certbot.compat import os\nfrom certbot.plugins import common\nfrom certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import\nfrom certbot_apache._internal.parser import get_aug_path\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApacheHttp01(common.ChallengePerformer):\n \"\"\"Class that performs HTTP-01 challenges within the Apache configurator.\"\"\"\n\n CONFIG_TEMPLATE22_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]\n\n \"\"\"\n CONFIG_TEMPLATE22_POST = \"\"\"\\\n <Directory {0}>\n Order Allow,Deny\n Allow from all\n </Directory>\n <Location /.well-known/acme-challenge>\n Order Allow,Deny\n Allow from all\n </Location>\n \"\"\"\n\n CONFIG_TEMPLATE24_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]\n \"\"\"\n CONFIG_TEMPLATE24_POST = \"\"\"\\\n <Directory {0}>\n Require all granted\n </Directory>\n <Location /.well-known/acme-challenge>\n Require all granted\n </Location>\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.challenge_conf_pre = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_pre.conf\")\n self.challenge_conf_post = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_post.conf\")\n self.challenge_dir = os.path.join(\n self.configurator.config.work_dir,\n \"http_challenges\")\n self.moded_vhosts: Set[VirtualHost] = set()\n\n def perform(self):\n \"\"\"Perform all HTTP-01 challenges.\"\"\"\n if not self.achalls:\n return []\n # Save any changes to the configuration as a precaution\n # About to make temporary changes to the config\n self.configurator.save(\"Changes before challenge setup\", True)\n\n self.configurator.ensure_listen(str(\n self.configurator.config.http01_port))\n self.prepare_http01_modules()\n\n responses = self._set_up_challenges()\n\n self._mod_config()\n # Save reversible changes\n self.configurator.save(\"HTTP Challenge\", True)\n\n return responses\n\n def prepare_http01_modules(self):\n \"\"\"Make sure that we have the needed modules available for http01\"\"\"\n\n if self.configurator.conf(\"handle-modules\"):\n needed_modules = [\"rewrite\"]\n if self.configurator.version < (2, 4):\n needed_modules.append(\"authz_host\")\n else:\n needed_modules.append(\"authz_core\")\n for mod in needed_modules:\n if mod + \"_module\" not in self.configurator.parser.modules:\n self.configurator.enable_mod(mod, temp=True)\n\n def _mod_config(self):\n selected_vhosts: List[VirtualHost] = []\n http_port = str(self.configurator.config.http01_port)\n\n # Search for VirtualHosts matching by name\n for chall in self.achalls:\n selected_vhosts += self._matching_vhosts(chall.domain)\n\n # Ensure that we have one or more VirtualHosts that we can continue\n # with. (one that listens to port configured with --http-01-port)\n found = False\n for vhost in selected_vhosts:\n if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):\n found = True\n\n # If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts\n # because they might match at runtime (#8890)\n if found:\n selected_vhosts += self._unnamed_vhosts()\n # Otherwise, add every Virtualhost which listens on the right port\n else:\n selected_vhosts += self._relevant_vhosts()\n\n # Add the challenge configuration\n for vh in selected_vhosts:\n self._set_up_include_directives(vh)\n\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_pre)\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_post)\n\n if self.configurator.version < (2, 4):\n config_template_pre = self.CONFIG_TEMPLATE22_PRE\n config_template_post = self.CONFIG_TEMPLATE22_POST\n else:\n config_template_pre = self.CONFIG_TEMPLATE24_PRE\n config_template_post = self.CONFIG_TEMPLATE24_POST\n\n config_text_pre = config_template_pre.format(self.challenge_dir)\n config_text_post = config_template_post.format(self.challenge_dir)\n\n logger.debug(\"writing a pre config file with text:\\n %s\", config_text_pre)\n with open(self.challenge_conf_pre, \"w\") as new_conf:\n new_conf.write(config_text_pre)\n logger.debug(\"writing a post config file with text:\\n %s\", config_text_post)\n with open(self.challenge_conf_post, \"w\") as new_conf:\n new_conf.write(config_text_post)\n\n def _matching_vhosts(self, domain):\n \"\"\"Return all VirtualHost objects that have the requested domain name or\n a wildcard name that would match the domain in ServerName or ServerAlias\n directive.\n \"\"\"\n matching_vhosts = []\n for vhost in self.configurator.vhosts:\n if self.configurator.domain_in_names(vhost.get_names(), domain):\n # domain_in_names also matches the exact names, so no need\n # to check \"domain in vhost.get_names()\" explicitly here\n matching_vhosts.append(vhost)\n\n return matching_vhosts\n\n def _relevant_vhosts(self):\n http01_port = str(self.configurator.config.http01_port)\n relevant_vhosts = []\n for vhost in self.configurator.vhosts:\n if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):\n if not vhost.ssl:\n relevant_vhosts.append(vhost)\n if not relevant_vhosts:\n raise errors.PluginError(\n \"Unable to find a virtual host listening on port {0} which is\"\n \" currently needed for Certbot to prove to the CA that you\"\n \" control your domain. Please add a virtual host for port\"\n \" {0}.\".format(http01_port))\n\n return relevant_vhosts\n\n def _unnamed_vhosts(self) -> List[VirtualHost]:\n \"\"\"Return all VirtualHost objects with no ServerName\"\"\"\n return [vh for vh in self.configurator.vhosts if vh.name is None]\n\n def _set_up_challenges(self):\n if not os.path.isdir(self.challenge_dir):\n old_umask = filesystem.umask(0o022)\n try:\n filesystem.makedirs(self.challenge_dir, 0o755)\n except OSError as exception:\n if exception.errno not in (errno.EEXIST, errno.EISDIR):\n raise errors.PluginError(\n \"Couldn't create root for http-01 challenge\")\n finally:\n filesystem.umask(old_umask)\n\n responses = []\n for achall in self.achalls:\n responses.append(self._set_up_challenge(achall))\n\n return responses\n\n def _set_up_challenge(self, achall):\n response, validation = achall.response_and_validation()\n\n name = os.path.join(self.challenge_dir, achall.chall.encode(\"token\"))\n\n self.configurator.reverter.register_file_creation(True, name)\n with open(name, 'wb') as f:\n f.write(validation.encode())\n filesystem.chmod(name, 0o644)\n\n return response\n\n def _set_up_include_directives(self, vhost):\n \"\"\"Includes override configuration to the beginning and to the end of\n VirtualHost. Note that this include isn't added to Augeas search tree\"\"\"\n\n if vhost not in self.moded_vhosts:\n logger.debug(\n \"Adding a temporary challenge validation Include for name: %s in: %s\",\n vhost.name, vhost.filep)\n self.configurator.parser.add_dir_beginning(\n vhost.path, \"Include\", self.challenge_conf_pre)\n self.configurator.parser.add_dir(\n vhost.path, \"Include\", self.challenge_conf_post)\n\n if not vhost.enabled:\n self.configurator.parser.add_dir(\n get_aug_path(self.configurator.parser.loc[\"default\"]),\n \"Include\", vhost.filep)\n\n self.moded_vhosts.add(vhost)\n", "path": "certbot-apache/certbot_apache/_internal/http_01.py"}]}
2,751
515
gh_patches_debug_4312
rasdani/github-patches
git_diff
common-workflow-language__cwltool-1457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip install cwltool fails due to broken psutil version Hello, It looks like the latest release of psutil is broken, and the version is not bounded in the cwltool requirements; ``` [mdb@h-0000 kegg-cwl]$ pip install cwltool==1.0.20181217162649 Collecting cwltool==1.0.20181217162649 Cache entry deserialization failed, entry ignored Downloading https://files.pythonhosted.org/packages/9f/ab/d2c672e36133346b7fede41312e4b8da31cba331bfb2c02a4e6f2ed3d565/cwltool-1.0.20181217162649-py2.py3-none-any.whl (628kB) 100% |████████████████████████████████| 634kB 2.2MB/s Collecting bagit>=1.6.4 (from cwltool==1.0.20181217162649) Cache entry deserialization failed, entry ignored Requirement already satisfied (use --upgrade to upgrade): setuptools in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649) Collecting prov==1.5.1 (from cwltool==1.0.20181217162649) Cache entry deserialization failed, entry ignored Cache entry deserialization failed, entry ignored Downloading https://files.pythonhosted.org/packages/8e/fb/2c4c618185be2bda327f9dacd16b3122cc938809f19df7be840595d0e584/prov-1.5.1-py2.py3-none-any.whl (426kB) 100% |████████████████████████████████| 430kB 3.3MB/s Collecting mypy-extensions (from cwltool==1.0.20181217162649) Cache entry deserialization failed, entry ignored Cache entry deserialization failed, entry ignored Downloading https://files.pythonhosted.org/packages/4d/72/8d54e2b296631b9b14961d583e56e90d9d7fba8a240d5ce7f1113cc5e887/mypy_extensions-0.4.1-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): typing>=3.5.3; python_version < "3.6" in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649) Requirement already satisfied (use --upgrade to upgrade): rdflib<4.3.0,>=4.2.2 in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649) Collecting ruamel.yaml<=0.15.77,>=0.12.4 (from cwltool==1.0.20181217162649) Cache entry deserialization failed, entry ignored Cache entry deserialization failed, entry ignored Downloading https://files.pythonhosted.org/packages/9a/f3/96df89eb8fd542dcb7bfdaecd79c7642c6d272b122dc2e96d237fa57fcd1/ruamel.yaml-0.15.77-cp27-cp27mu-manylinux1_x86_64.whl (609kB) 100% |████████████████████████████████| 614kB 2.2MB/s Collecting psutil (from cwltool==1.0.20181217162649) Cache entry deserialization failed, entry ignored Exception: Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/pip/basecommand.py", line 215, in main status = self.run(options, args) File "/usr/lib/python2.7/site-packages/pip/commands/install.py", line 307, in run requirement_set.prepare_files(finder) File "/usr/lib/python2.7/site-packages/pip/req/req_set.py", line 370, in prepare_files ignore_dependencies=self.ignore_dependencies)) File "/usr/lib/python2.7/site-packages/pip/req/req_set.py", line 587, in _prepare_file session=self.session, hashes=hashes) File "/usr/lib/python2.7/site-packages/pip/download.py", line 798, in unpack_url unpack_file_url(link, location, download_dir, hashes=hashes) File "/usr/lib/python2.7/site-packages/pip/download.py", line 705, in unpack_file_url unpack_file(from_path, location, content_type, link) File "/usr/lib/python2.7/site-packages/pip/utils/__init__.py", line 599, in unpack_file flatten=not filename.endswith('.whl') File "/usr/lib/python2.7/site-packages/pip/utils/__init__.py", line 484, in unzip_file zip = zipfile.ZipFile(zipfp, allowZip64=True) File "/usr/lib64/python2.7/zipfile.py", line 766, in __init__ self._RealGetContents() File "/usr/lib64/python2.7/zipfile.py", line 807, in _RealGetContents raise BadZipfile, "File is not a zip file" BadZipfile: File is not a zip file ``` A possible fix is to install `pip install psutil==5.6.1` prior to install cwltool. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python3 2 """Setup for the reference implementation of the CWL standards.""" 3 import os 4 import sys 5 import warnings 6 7 import setuptools.command.egg_info as egg_info_cmd 8 from setuptools import setup 9 10 if os.name == "nt": 11 warnings.warn( 12 "The CWL reference runner (cwltool) no longer supports running " 13 "CWL workflows natively on MS Windows as its previous MS Windows " 14 "support was incomplete and untested. Instead, please see " 15 "https://pypi.org/project/cwltool/#ms-windows-users " 16 "for instructions on running cwltool via " 17 "Windows Subsystem for Linux 2 (WSL2). If don't need to execute " 18 "CWL documents, then you can ignore this warning, but please " 19 "consider migrating to https://pypi.org/project/cwl-utils/ " 20 "for your CWL document processing needs." 21 ) 22 23 SETUP_DIR = os.path.dirname(__file__) 24 README = os.path.join(SETUP_DIR, "README.rst") 25 26 try: 27 import gittaggers 28 29 Tagger = gittaggers.EggInfoFromGit 30 except ImportError: 31 Tagger = egg_info_cmd.egg_info 32 33 NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv) 34 PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else [] 35 USE_MYPYC = False 36 # To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH 37 if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc": 38 sys.argv.pop(1) 39 USE_MYPYC = True 40 if os.getenv("CWLTOOL_USE_MYPYC", None) == "1": 41 USE_MYPYC = True 42 43 if USE_MYPYC: 44 mypyc_targets = [ 45 "cwltool/argparser.py", 46 "cwltool/builder.py", 47 "cwltool/checker.py", 48 "cwltool/command_line_tool.py", 49 # "cwltool/context.py", # monkeypatching 50 "cwltool/cwlrdf.py", 51 "cwltool/docker_id.py", 52 "cwltool/docker.py", 53 "cwltool/udocker.py", 54 "cwltool/errors.py", 55 "cwltool/executors.py", 56 "cwltool/expression.py", 57 "cwltool/factory.py", 58 "cwltool/flatten.py", 59 # "cwltool/__init__.py", 60 "cwltool/job.py", 61 "cwltool/load_tool.py", 62 # "cwltool/loghandler.py", # so we can monkeypatch the logger from tests 63 # "cwltool/__main__.py", 64 "cwltool/main.py", 65 "cwltool/mutation.py", 66 "cwltool/pack.py", 67 # "cwltool/pathmapper.py", # class PathMapper needs to be subclassable 68 "cwltool/process.py", 69 "cwltool/procgenerator.py", 70 # "cwltool/provenance.py", # WritableBag is having issues 71 "cwltool/resolver.py", 72 # "cwltool/sandboxjs.py", # probably not speed critical, tests need to mock components 73 "cwltool/secrets.py", 74 "cwltool/singularity.py", 75 "cwltool/software_requirements.py", 76 "cwltool/stdfsaccess.py", 77 "cwltool/subgraph.py", 78 "cwltool/update.py", 79 "cwltool/utils.py", 80 "cwltool/validate_js.py", 81 "cwltool/workflow.py", 82 ] 83 84 from mypyc.build import mypycify 85 86 opt_level = os.getenv("MYPYC_OPT_LEVEL", "3") 87 ext_modules = mypycify(mypyc_targets, opt_level=opt_level) 88 else: 89 ext_modules = [] 90 91 setup( 92 name="cwltool", 93 version="3.1", 94 description="Common workflow language reference implementation", 95 long_description=open(README).read(), 96 long_description_content_type="text/x-rst", 97 author="Common workflow language working group", 98 author_email="[email protected]", 99 url="https://github.com/common-workflow-language/cwltool", 100 download_url="https://github.com/common-workflow-language/cwltool", 101 ext_modules=ext_modules, 102 # platforms='', # empty as is conveyed by the classifier below 103 # license='', # empty as is conveyed by the classifier below 104 packages=["cwltool", "cwltool.tests"], 105 package_dir={"cwltool.tests": "tests"}, 106 include_package_data=True, 107 install_requires=[ 108 "setuptools", 109 "requests >= 2.6.1", # >= 2.6.1 to workaround 110 # https://github.com/ionrock/cachecontrol/issues/137 111 "ruamel.yaml >= 0.12.4, < 0.17.10", 112 "rdflib >= 4.2.2, < 5.1.0", 113 "shellescape >= 3.4.1, < 3.9", 114 # 7.1.20210518142926 or later required due to 115 # https://github.com/common-workflow-language/schema_salad/issues/385 116 "schema-salad >= 7.1.20210518142926, < 8", 117 "mypy-extensions", 118 "psutil", 119 "prov == 1.5.1", 120 "bagit >= 1.6.4", 121 "typing-extensions", 122 "coloredlogs", 123 "pydot >= 1.4.1", 124 "argcomplete", 125 ], 126 extras_require={ 127 "deps": ["galaxy-tool-util >= 21.1.0"], 128 "docs": [ 129 "sphinx >= 2.2", 130 "sphinx-rtd-theme", 131 "sphinx-autoapi", 132 "sphinx-autodoc-typehints", 133 "typed_ast;python_version<'3.8'", 134 ], 135 }, 136 python_requires=">=3.6, <4", 137 setup_requires=PYTEST_RUNNER, 138 test_suite="tests", 139 tests_require=[ 140 "pytest >= 6.2, < 6.3", 141 "mock >= 2.0.0", 142 "pytest-mock >= 1.10.0", 143 "arcp >= 0.2.0", 144 "rdflib-jsonld >= 0.4.0", 145 ], 146 entry_points={"console_scripts": ["cwltool=cwltool.main:run"]}, 147 zip_safe=True, 148 cmdclass={"egg_info": Tagger}, 149 classifiers=[ 150 "Development Status :: 5 - Production/Stable", 151 "Environment :: Console", 152 "Intended Audience :: Developers", 153 "Intended Audience :: Science/Research", 154 "Intended Audience :: Healthcare Industry", 155 "License :: OSI Approved :: Apache Software License", 156 "Natural Language :: English", 157 "Operating System :: MacOS :: MacOS X", 158 "Operating System :: POSIX", 159 "Operating System :: POSIX :: Linux", 160 "Programming Language :: Python :: 3", 161 "Programming Language :: Python :: 3.6", 162 "Programming Language :: Python :: 3.7", 163 "Programming Language :: Python :: 3.8", 164 "Programming Language :: Python :: 3.9", 165 "Topic :: Scientific/Engineering", 166 "Topic :: Scientific/Engineering :: Bio-Informatics", 167 "Topic :: Scientific/Engineering :: Astronomy", 168 "Topic :: Scientific/Engineering :: Atmospheric Science", 169 "Topic :: Scientific/Engineering :: Information Analysis", 170 "Topic :: Scientific/Engineering :: Medical Science Apps.", 171 "Topic :: System :: Distributed Computing", 172 "Topic :: Utilities", 173 ], 174 ) 175 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ # https://github.com/common-workflow-language/schema_salad/issues/385 "schema-salad >= 7.1.20210518142926, < 8", "mypy-extensions", - "psutil", + "psutil != 5.6.1", "prov == 1.5.1", "bagit >= 1.6.4", "typing-extensions",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -115,7 +115,7 @@\n # https://github.com/common-workflow-language/schema_salad/issues/385\n \"schema-salad >= 7.1.20210518142926, < 8\",\n \"mypy-extensions\",\n- \"psutil\",\n+ \"psutil != 5.6.1\",\n \"prov == 1.5.1\",\n \"bagit >= 1.6.4\",\n \"typing-extensions\",\n", "issue": "pip install cwltool fails due to broken psutil version\nHello,\r\nIt looks like the latest release of psutil is broken, and the version is not bounded in the cwltool requirements;\r\n```\r\n[mdb@h-0000 kegg-cwl]$ pip install cwltool==1.0.20181217162649\r\nCollecting cwltool==1.0.20181217162649\r\n Cache entry deserialization failed, entry ignored\r\n Downloading https://files.pythonhosted.org/packages/9f/ab/d2c672e36133346b7fede41312e4b8da31cba331bfb2c02a4e6f2ed3d565/cwltool-1.0.20181217162649-py2.py3-none-any.whl (628kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 634kB 2.2MB/s \r\nCollecting bagit>=1.6.4 (from cwltool==1.0.20181217162649)\r\n Cache entry deserialization failed, entry ignored\r\nRequirement already satisfied (use --upgrade to upgrade): setuptools in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649)\r\nCollecting prov==1.5.1 (from cwltool==1.0.20181217162649)\r\n Cache entry deserialization failed, entry ignored\r\n Cache entry deserialization failed, entry ignored\r\n Downloading https://files.pythonhosted.org/packages/8e/fb/2c4c618185be2bda327f9dacd16b3122cc938809f19df7be840595d0e584/prov-1.5.1-py2.py3-none-any.whl (426kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 430kB 3.3MB/s \r\nCollecting mypy-extensions (from cwltool==1.0.20181217162649)\r\n Cache entry deserialization failed, entry ignored\r\n Cache entry deserialization failed, entry ignored\r\n Downloading https://files.pythonhosted.org/packages/4d/72/8d54e2b296631b9b14961d583e56e90d9d7fba8a240d5ce7f1113cc5e887/mypy_extensions-0.4.1-py2.py3-none-any.whl\r\nRequirement already satisfied (use --upgrade to upgrade): typing>=3.5.3; python_version < \"3.6\" in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649)\r\nRequirement already satisfied (use --upgrade to upgrade): rdflib<4.3.0,>=4.2.2 in /usr/lib/python2.7/site-packages (from cwltool==1.0.20181217162649)\r\nCollecting ruamel.yaml<=0.15.77,>=0.12.4 (from cwltool==1.0.20181217162649)\r\n Cache entry deserialization failed, entry ignored\r\n Cache entry deserialization failed, entry ignored\r\n Downloading https://files.pythonhosted.org/packages/9a/f3/96df89eb8fd542dcb7bfdaecd79c7642c6d272b122dc2e96d237fa57fcd1/ruamel.yaml-0.15.77-cp27-cp27mu-manylinux1_x86_64.whl (609kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 614kB 2.2MB/s \r\nCollecting psutil (from cwltool==1.0.20181217162649)\r\n Cache entry deserialization failed, entry ignored\r\nException:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/site-packages/pip/basecommand.py\", line 215, in main\r\n status = self.run(options, args)\r\n File \"/usr/lib/python2.7/site-packages/pip/commands/install.py\", line 307, in run\r\n requirement_set.prepare_files(finder)\r\n File \"/usr/lib/python2.7/site-packages/pip/req/req_set.py\", line 370, in prepare_files\r\n ignore_dependencies=self.ignore_dependencies))\r\n File \"/usr/lib/python2.7/site-packages/pip/req/req_set.py\", line 587, in _prepare_file\r\n session=self.session, hashes=hashes)\r\n File \"/usr/lib/python2.7/site-packages/pip/download.py\", line 798, in unpack_url\r\n unpack_file_url(link, location, download_dir, hashes=hashes)\r\n File \"/usr/lib/python2.7/site-packages/pip/download.py\", line 705, in unpack_file_url\r\n unpack_file(from_path, location, content_type, link)\r\n File \"/usr/lib/python2.7/site-packages/pip/utils/__init__.py\", line 599, in unpack_file\r\n flatten=not filename.endswith('.whl')\r\n File \"/usr/lib/python2.7/site-packages/pip/utils/__init__.py\", line 484, in unzip_file\r\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\r\n File \"/usr/lib64/python2.7/zipfile.py\", line 766, in __init__\r\n self._RealGetContents()\r\n File \"/usr/lib64/python2.7/zipfile.py\", line 807, in _RealGetContents\r\n raise BadZipfile, \"File is not a zip file\"\r\nBadZipfile: File is not a zip file\r\n```\r\n\r\nA possible fix is to install `pip install psutil==5.6.1` prior to install cwltool.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nimport os\nimport sys\nimport warnings\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nif os.name == \"nt\":\n warnings.warn(\n \"The CWL reference runner (cwltool) no longer supports running \"\n \"CWL workflows natively on MS Windows as its previous MS Windows \"\n \"support was incomplete and untested. Instead, please see \"\n \"https://pypi.org/project/cwltool/#ms-windows-users \"\n \"for instructions on running cwltool via \"\n \"Windows Subsystem for Linux 2 (WSL2). If don't need to execute \"\n \"CWL documents, then you can ignore this warning, but please \"\n \"consider migrating to https://pypi.org/project/cwl-utils/ \"\n \"for your CWL document processing needs.\"\n )\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, \"README.rst\")\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\nPYTEST_RUNNER = [\"pytest-runner\", \"pytest-cov\"] if NEEDS_PYTEST else []\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"CWLTOOL_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"cwltool/argparser.py\",\n \"cwltool/builder.py\",\n \"cwltool/checker.py\",\n \"cwltool/command_line_tool.py\",\n # \"cwltool/context.py\", # monkeypatching\n \"cwltool/cwlrdf.py\",\n \"cwltool/docker_id.py\",\n \"cwltool/docker.py\",\n \"cwltool/udocker.py\",\n \"cwltool/errors.py\",\n \"cwltool/executors.py\",\n \"cwltool/expression.py\",\n \"cwltool/factory.py\",\n \"cwltool/flatten.py\",\n # \"cwltool/__init__.py\",\n \"cwltool/job.py\",\n \"cwltool/load_tool.py\",\n # \"cwltool/loghandler.py\", # so we can monkeypatch the logger from tests\n # \"cwltool/__main__.py\",\n \"cwltool/main.py\",\n \"cwltool/mutation.py\",\n \"cwltool/pack.py\",\n # \"cwltool/pathmapper.py\", # class PathMapper needs to be subclassable\n \"cwltool/process.py\",\n \"cwltool/procgenerator.py\",\n # \"cwltool/provenance.py\", # WritableBag is having issues\n \"cwltool/resolver.py\",\n # \"cwltool/sandboxjs.py\", # probably not speed critical, tests need to mock components\n \"cwltool/secrets.py\",\n \"cwltool/singularity.py\",\n \"cwltool/software_requirements.py\",\n \"cwltool/stdfsaccess.py\",\n \"cwltool/subgraph.py\",\n \"cwltool/update.py\",\n \"cwltool/utils.py\",\n \"cwltool/validate_js.py\",\n \"cwltool/workflow.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"cwltool\",\n version=\"3.1\",\n description=\"Common workflow language reference implementation\",\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author=\"Common workflow language working group\",\n author_email=\"[email protected]\",\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n ext_modules=ext_modules,\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", \"cwltool.tests\"],\n package_dir={\"cwltool.tests\": \"tests\"},\n include_package_data=True,\n install_requires=[\n \"setuptools\",\n \"requests >= 2.6.1\", # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n \"ruamel.yaml >= 0.12.4, < 0.17.10\",\n \"rdflib >= 4.2.2, < 5.1.0\",\n \"shellescape >= 3.4.1, < 3.9\",\n # 7.1.20210518142926 or later required due to\n # https://github.com/common-workflow-language/schema_salad/issues/385\n \"schema-salad >= 7.1.20210518142926, < 8\",\n \"mypy-extensions\",\n \"psutil\",\n \"prov == 1.5.1\",\n \"bagit >= 1.6.4\",\n \"typing-extensions\",\n \"coloredlogs\",\n \"pydot >= 1.4.1\",\n \"argcomplete\",\n ],\n extras_require={\n \"deps\": [\"galaxy-tool-util >= 21.1.0\"],\n \"docs\": [\n \"sphinx >= 2.2\",\n \"sphinx-rtd-theme\",\n \"sphinx-autoapi\",\n \"sphinx-autodoc-typehints\",\n \"typed_ast;python_version<'3.8'\",\n ],\n },\n python_requires=\">=3.6, <4\",\n setup_requires=PYTEST_RUNNER,\n test_suite=\"tests\",\n tests_require=[\n \"pytest >= 6.2, < 6.3\",\n \"mock >= 2.0.0\",\n \"pytest-mock >= 1.10.0\",\n \"arcp >= 0.2.0\",\n \"rdflib-jsonld >= 0.4.0\",\n ],\n entry_points={\"console_scripts\": [\"cwltool=cwltool.main:run\"]},\n zip_safe=True,\n cmdclass={\"egg_info\": Tagger},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Healthcare Industry\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Utilities\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nimport os\nimport sys\nimport warnings\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nif os.name == \"nt\":\n warnings.warn(\n \"The CWL reference runner (cwltool) no longer supports running \"\n \"CWL workflows natively on MS Windows as its previous MS Windows \"\n \"support was incomplete and untested. Instead, please see \"\n \"https://pypi.org/project/cwltool/#ms-windows-users \"\n \"for instructions on running cwltool via \"\n \"Windows Subsystem for Linux 2 (WSL2). If don't need to execute \"\n \"CWL documents, then you can ignore this warning, but please \"\n \"consider migrating to https://pypi.org/project/cwl-utils/ \"\n \"for your CWL document processing needs.\"\n )\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, \"README.rst\")\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\nPYTEST_RUNNER = [\"pytest-runner\", \"pytest-cov\"] if NEEDS_PYTEST else []\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"CWLTOOL_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"cwltool/argparser.py\",\n \"cwltool/builder.py\",\n \"cwltool/checker.py\",\n \"cwltool/command_line_tool.py\",\n # \"cwltool/context.py\", # monkeypatching\n \"cwltool/cwlrdf.py\",\n \"cwltool/docker_id.py\",\n \"cwltool/docker.py\",\n \"cwltool/udocker.py\",\n \"cwltool/errors.py\",\n \"cwltool/executors.py\",\n \"cwltool/expression.py\",\n \"cwltool/factory.py\",\n \"cwltool/flatten.py\",\n # \"cwltool/__init__.py\",\n \"cwltool/job.py\",\n \"cwltool/load_tool.py\",\n # \"cwltool/loghandler.py\", # so we can monkeypatch the logger from tests\n # \"cwltool/__main__.py\",\n \"cwltool/main.py\",\n \"cwltool/mutation.py\",\n \"cwltool/pack.py\",\n # \"cwltool/pathmapper.py\", # class PathMapper needs to be subclassable\n \"cwltool/process.py\",\n \"cwltool/procgenerator.py\",\n # \"cwltool/provenance.py\", # WritableBag is having issues\n \"cwltool/resolver.py\",\n # \"cwltool/sandboxjs.py\", # probably not speed critical, tests need to mock components\n \"cwltool/secrets.py\",\n \"cwltool/singularity.py\",\n \"cwltool/software_requirements.py\",\n \"cwltool/stdfsaccess.py\",\n \"cwltool/subgraph.py\",\n \"cwltool/update.py\",\n \"cwltool/utils.py\",\n \"cwltool/validate_js.py\",\n \"cwltool/workflow.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"cwltool\",\n version=\"3.1\",\n description=\"Common workflow language reference implementation\",\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author=\"Common workflow language working group\",\n author_email=\"[email protected]\",\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n ext_modules=ext_modules,\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", \"cwltool.tests\"],\n package_dir={\"cwltool.tests\": \"tests\"},\n include_package_data=True,\n install_requires=[\n \"setuptools\",\n \"requests >= 2.6.1\", # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n \"ruamel.yaml >= 0.12.4, < 0.17.10\",\n \"rdflib >= 4.2.2, < 5.1.0\",\n \"shellescape >= 3.4.1, < 3.9\",\n # 7.1.20210518142926 or later required due to\n # https://github.com/common-workflow-language/schema_salad/issues/385\n \"schema-salad >= 7.1.20210518142926, < 8\",\n \"mypy-extensions\",\n \"psutil != 5.6.1\",\n \"prov == 1.5.1\",\n \"bagit >= 1.6.4\",\n \"typing-extensions\",\n \"coloredlogs\",\n \"pydot >= 1.4.1\",\n \"argcomplete\",\n ],\n extras_require={\n \"deps\": [\"galaxy-tool-util >= 21.1.0\"],\n \"docs\": [\n \"sphinx >= 2.2\",\n \"sphinx-rtd-theme\",\n \"sphinx-autoapi\",\n \"sphinx-autodoc-typehints\",\n \"typed_ast;python_version<'3.8'\",\n ],\n },\n python_requires=\">=3.6, <4\",\n setup_requires=PYTEST_RUNNER,\n test_suite=\"tests\",\n tests_require=[\n \"pytest >= 6.2, < 6.3\",\n \"mock >= 2.0.0\",\n \"pytest-mock >= 1.10.0\",\n \"arcp >= 0.2.0\",\n \"rdflib-jsonld >= 0.4.0\",\n ],\n entry_points={\"console_scripts\": [\"cwltool=cwltool.main:run\"]},\n zip_safe=True,\n cmdclass={\"egg_info\": Tagger},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Healthcare Industry\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Utilities\",\n ],\n)\n", "path": "setup.py"}]}
3,858
139
gh_patches_debug_41884
rasdani/github-patches
git_diff
opensearch-project__opensearch-build-2358
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Generated manifests for OpenSearch 2.x need custom JAVA_HOME ### Is your feature request related to a problem? Please describe Automation opened a PR to generate the OpenSearch 3.0 manifest. That manifest was missing `JAVA_HOME` that was added manually in https://github.com/opensearch-project/opensearch-build/pull/2042. ### Describe the solution you'd like Add a way to customize generated manifests for various products/versions, add `JAVA_HOME` to the OpenSearch ones. ### Describe alternatives you've considered Keep doing it manually. ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/manifests_workflow/input_manifests.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # 3 # The OpenSearch Contributors require contributions made to 4 # this file be licensed under the Apache-2.0 license or a 5 # compatible open source license. 6 7 import glob 8 import logging 9 import os 10 import re 11 from abc import abstractmethod 12 from typing import Dict, List, Type, Union 13 14 from manifests.input_manifest import InputManifest 15 from manifests.manifests import Manifests 16 from manifests_workflow.component_opensearch import ComponentOpenSearch 17 from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin 18 from manifests_workflow.component_opensearch_min import ComponentOpenSearchMin 19 from system.temporary_directory import TemporaryDirectory 20 21 22 class InputManifests(Manifests): 23 def __init__(self, name: str) -> None: 24 self.name = name 25 self.prefix = name.lower().replace(" ", "-") 26 super().__init__(InputManifest, InputManifests.files(self.prefix)) 27 28 @classmethod 29 def manifests_path(self) -> str: 30 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests")) 31 32 @classmethod 33 def jenkins_path(self) -> str: 34 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "jenkins")) 35 36 @classmethod 37 def cron_jenkinsfile(self) -> str: 38 return os.path.join(self.jenkins_path(), "check-for-build.jenkinsfile") 39 40 @classmethod 41 def files(self, name: str) -> List: 42 results = [] 43 for filename in glob.glob(os.path.join(self.manifests_path(), f"**/{name}-*.yml")): 44 # avoids the -maven manifest 45 match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename)) 46 if match: 47 results.append(filename) 48 return results 49 50 @abstractmethod 51 def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None: 52 known_versions = self.versions 53 logging.info(f"Known versions: {known_versions}") 54 main_versions: Dict = {} 55 with TemporaryDirectory(keep=keep, chdir=True) as work_dir: 56 logging.info(f"Checking out components into {work_dir.name}") 57 58 # check out and build #main, 1.x, etc. 59 branches = min_klass.branches() 60 61 logging.info(f"Checking {self.name} {branches} branches") 62 for branch in branches: 63 c = min_klass.checkout( 64 path=os.path.join(work_dir.name, self.name.replace(" ", ""), branch), 65 branch=branch, 66 ) 67 68 version = c.version 69 logging.info(f"{self.name}#{branch} is version {version}") 70 if version not in main_versions.keys(): 71 main_versions[version] = [c] 72 73 if component_klass is not None: 74 # components can increment their own version first without incrementing min 75 manifest = self.latest 76 logging.info(f"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})") 77 for component in manifest.components.values(): 78 if component.name == self.name: 79 continue 80 81 logging.info(f"Checking out {component.name}#main") 82 component = component_klass.checkout( 83 name=component.name, 84 path=os.path.join(work_dir.name, component.name), 85 opensearch_version=manifest.build.version, 86 branch="main", 87 ) 88 89 component_version = component.version 90 if component_version: 91 release_version = ".".join(component_version.split(".")[:3]) 92 if release_version not in main_versions.keys(): 93 main_versions[release_version] = [] 94 main_versions[release_version].append(component) 95 logging.info(f"{component.name}#main is version {release_version} (from {component_version})") 96 97 # summarize 98 logging.info("Found versions on main:") 99 for main_version in main_versions.keys(): 100 for component in main_versions[main_version]: 101 logging.info(f" {component.name}={main_version}") 102 103 # generate new manifests 104 for release_version in sorted(main_versions.keys() - known_versions): 105 self.write_manifest(release_version, main_versions[release_version]) 106 self.add_to_cron(release_version) 107 108 def create_manifest(self, version: str, components: List = []) -> InputManifest: 109 image_map = { 110 "opensearch": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2", 111 "opensearch-dashboards": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2" 112 } 113 114 data: Dict = { 115 "schema-version": "1.0", 116 "build": { 117 "name": self.name, 118 "version": version 119 }, 120 "ci": { 121 "image": { 122 "name": image_map[self.prefix] 123 } 124 }, 125 "components": [], 126 } 127 128 for component in components: 129 logging.info(f" Adding {component.name}") 130 data["components"].append(component.to_dict()) 131 132 return InputManifest(data) 133 134 def write_manifest(self, version: str, components: List = []) -> None: 135 logging.info(f"Creating new version: {version}") 136 manifest = self.create_manifest(version, components) 137 manifest_dir = os.path.join(self.manifests_path(), version) 138 os.makedirs(manifest_dir, exist_ok=True) 139 manifest_path = os.path.join(manifest_dir, f"{self.prefix}-{version}.yml") 140 manifest.to_file(manifest_path) 141 logging.info(f"Wrote {manifest_path}") 142 143 def add_to_cron(self, version: str) -> None: 144 logging.info(f"Adding new version to cron: {version}") 145 jenkinsfile = self.cron_jenkinsfile() 146 with open(jenkinsfile, "r") as f: 147 data = f.read() 148 149 cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\n" 150 151 if cron_entry in data: 152 raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}") 153 154 data = data.replace( 155 "parameterizedCron '''\n", 156 f"parameterizedCron '''\n{' ' * 12}{cron_entry}" 157 ) 158 159 with open(jenkinsfile, "w") as f: 160 f.write(data) 161 162 logging.info(f"Wrote {jenkinsfile}") 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py --- a/src/manifests_workflow/input_manifests.py +++ b/src/manifests_workflow/input_manifests.py @@ -11,7 +11,7 @@ from abc import abstractmethod from typing import Dict, List, Type, Union -from manifests.input_manifest import InputManifest +from manifests.input_manifest import InputComponents, InputManifest from manifests.manifests import Manifests from manifests_workflow.component_opensearch import ComponentOpenSearch from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin @@ -48,7 +48,12 @@ return results @abstractmethod - def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None: + def update( + self, + min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], + component_klass: Type[ComponentOpenSearch], + keep: bool = False, + ) -> None: known_versions = self.versions logging.info(f"Known versions: {known_versions}") main_versions: Dict = {} @@ -106,30 +111,23 @@ self.add_to_cron(release_version) def create_manifest(self, version: str, components: List = []) -> InputManifest: - image_map = { - "opensearch": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2", - "opensearch-dashboards": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2" - } - - data: Dict = { - "schema-version": "1.0", - "build": { - "name": self.name, - "version": version - }, - "ci": { - "image": { - "name": image_map[self.prefix] - } - }, - "components": [], - } + templates_base_path = os.path.join(self.manifests_path(), "templates") + template_version_folder = version.split(".")[0] + ".x" + template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, "manifest.yml") + if not os.path.exists(template_full_path): + template_full_path = os.path.join(templates_base_path, self.prefix, "default", "manifest.yml") + + manifest = InputManifest.from_file(open(template_full_path)) + + manifest.build.version = version + manifests_components = [] for component in components: logging.info(f" Adding {component.name}") - data["components"].append(component.to_dict()) + manifests_components.append(component.to_dict()) - return InputManifest(data) + manifest.components = InputComponents(manifests_components) # type: ignore + return manifest def write_manifest(self, version: str, components: List = []) -> None: logging.info(f"Creating new version: {version}") @@ -151,10 +149,7 @@ if cron_entry in data: raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}") - data = data.replace( - "parameterizedCron '''\n", - f"parameterizedCron '''\n{' ' * 12}{cron_entry}" - ) + data = data.replace("parameterizedCron '''\n", f"parameterizedCron '''\n{' ' * 12}{cron_entry}") with open(jenkinsfile, "w") as f: f.write(data)
{"golden_diff": "diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py\n--- a/src/manifests_workflow/input_manifests.py\n+++ b/src/manifests_workflow/input_manifests.py\n@@ -11,7 +11,7 @@\n from abc import abstractmethod\n from typing import Dict, List, Type, Union\n \n-from manifests.input_manifest import InputManifest\n+from manifests.input_manifest import InputComponents, InputManifest\n from manifests.manifests import Manifests\n from manifests_workflow.component_opensearch import ComponentOpenSearch\n from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\n@@ -48,7 +48,12 @@\n return results\n \n @abstractmethod\n- def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:\n+ def update(\n+ self,\n+ min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],\n+ component_klass: Type[ComponentOpenSearch],\n+ keep: bool = False,\n+ ) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n@@ -106,30 +111,23 @@\n self.add_to_cron(release_version)\n \n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n- image_map = {\n- \"opensearch\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2\",\n- \"opensearch-dashboards\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2\"\n- }\n-\n- data: Dict = {\n- \"schema-version\": \"1.0\",\n- \"build\": {\n- \"name\": self.name,\n- \"version\": version\n- },\n- \"ci\": {\n- \"image\": {\n- \"name\": image_map[self.prefix]\n- }\n- },\n- \"components\": [],\n- }\n+ templates_base_path = os.path.join(self.manifests_path(), \"templates\")\n+ template_version_folder = version.split(\".\")[0] + \".x\"\n+ template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, \"manifest.yml\")\n+ if not os.path.exists(template_full_path):\n+ template_full_path = os.path.join(templates_base_path, self.prefix, \"default\", \"manifest.yml\")\n+\n+ manifest = InputManifest.from_file(open(template_full_path))\n+\n+ manifest.build.version = version\n+ manifests_components = []\n \n for component in components:\n logging.info(f\" Adding {component.name}\")\n- data[\"components\"].append(component.to_dict())\n+ manifests_components.append(component.to_dict())\n \n- return InputManifest(data)\n+ manifest.components = InputComponents(manifests_components) # type: ignore\n+ return manifest\n \n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n@@ -151,10 +149,7 @@\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n \n- data = data.replace(\n- \"parameterizedCron '''\\n\",\n- f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\"\n- )\n+ data = data.replace(\"parameterizedCron '''\\n\", f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\")\n \n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n", "issue": "Generated manifests for OpenSearch 2.x need custom JAVA_HOME\n### Is your feature request related to a problem? Please describe\n\nAutomation opened a PR to generate the OpenSearch 3.0 manifest. That manifest was missing `JAVA_HOME` that was added manually in https://github.com/opensearch-project/opensearch-build/pull/2042.\n\n### Describe the solution you'd like\n\nAdd a way to customize generated manifests for various products/versions, add `JAVA_HOME` to the OpenSearch ones.\n\n### Describe alternatives you've considered\n\nKeep doing it manually.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport glob\nimport logging\nimport os\nimport re\nfrom abc import abstractmethod\nfrom typing import Dict, List, Type, Union\n\nfrom manifests.input_manifest import InputManifest\nfrom manifests.manifests import Manifests\nfrom manifests_workflow.component_opensearch import ComponentOpenSearch\nfrom manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\nfrom manifests_workflow.component_opensearch_min import ComponentOpenSearchMin\nfrom system.temporary_directory import TemporaryDirectory\n\n\nclass InputManifests(Manifests):\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n super().__init__(InputManifest, InputManifests.files(self.prefix))\n\n @classmethod\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n\n @classmethod\n def cron_jenkinsfile(self) -> str:\n return os.path.join(self.jenkins_path(), \"check-for-build.jenkinsfile\")\n\n @classmethod\n def files(self, name: str) -> List:\n results = []\n for filename in glob.glob(os.path.join(self.manifests_path(), f\"**/{name}-*.yml\")):\n # avoids the -maven manifest\n match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n if match:\n results.append(filename)\n return results\n\n @abstractmethod\n def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n with TemporaryDirectory(keep=keep, chdir=True) as work_dir:\n logging.info(f\"Checking out components into {work_dir.name}\")\n\n # check out and build #main, 1.x, etc.\n branches = min_klass.branches()\n\n logging.info(f\"Checking {self.name} {branches} branches\")\n for branch in branches:\n c = min_klass.checkout(\n path=os.path.join(work_dir.name, self.name.replace(\" \", \"\"), branch),\n branch=branch,\n )\n\n version = c.version\n logging.info(f\"{self.name}#{branch} is version {version}\")\n if version not in main_versions.keys():\n main_versions[version] = [c]\n\n if component_klass is not None:\n # components can increment their own version first without incrementing min\n manifest = self.latest\n logging.info(f\"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})\")\n for component in manifest.components.values():\n if component.name == self.name:\n continue\n\n logging.info(f\"Checking out {component.name}#main\")\n component = component_klass.checkout(\n name=component.name,\n path=os.path.join(work_dir.name, component.name),\n opensearch_version=manifest.build.version,\n branch=\"main\",\n )\n\n component_version = component.version\n if component_version:\n release_version = \".\".join(component_version.split(\".\")[:3])\n if release_version not in main_versions.keys():\n main_versions[release_version] = []\n main_versions[release_version].append(component)\n logging.info(f\"{component.name}#main is version {release_version} (from {component_version})\")\n\n # summarize\n logging.info(\"Found versions on main:\")\n for main_version in main_versions.keys():\n for component in main_versions[main_version]:\n logging.info(f\" {component.name}={main_version}\")\n\n # generate new manifests\n for release_version in sorted(main_versions.keys() - known_versions):\n self.write_manifest(release_version, main_versions[release_version])\n self.add_to_cron(release_version)\n\n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n image_map = {\n \"opensearch\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2\",\n \"opensearch-dashboards\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2\"\n }\n\n data: Dict = {\n \"schema-version\": \"1.0\",\n \"build\": {\n \"name\": self.name,\n \"version\": version\n },\n \"ci\": {\n \"image\": {\n \"name\": image_map[self.prefix]\n }\n },\n \"components\": [],\n }\n\n for component in components:\n logging.info(f\" Adding {component.name}\")\n data[\"components\"].append(component.to_dict())\n\n return InputManifest(data)\n\n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n manifest = self.create_manifest(version, components)\n manifest_dir = os.path.join(self.manifests_path(), version)\n os.makedirs(manifest_dir, exist_ok=True)\n manifest_path = os.path.join(manifest_dir, f\"{self.prefix}-{version}.yml\")\n manifest.to_file(manifest_path)\n logging.info(f\"Wrote {manifest_path}\")\n\n def add_to_cron(self, version: str) -> None:\n logging.info(f\"Adding new version to cron: {version}\")\n jenkinsfile = self.cron_jenkinsfile()\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n\n cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n\n data = data.replace(\n \"parameterizedCron '''\\n\",\n f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\"\n )\n\n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n\n logging.info(f\"Wrote {jenkinsfile}\")\n", "path": "src/manifests_workflow/input_manifests.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport glob\nimport logging\nimport os\nimport re\nfrom abc import abstractmethod\nfrom typing import Dict, List, Type, Union\n\nfrom manifests.input_manifest import InputComponents, InputManifest\nfrom manifests.manifests import Manifests\nfrom manifests_workflow.component_opensearch import ComponentOpenSearch\nfrom manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\nfrom manifests_workflow.component_opensearch_min import ComponentOpenSearchMin\nfrom system.temporary_directory import TemporaryDirectory\n\n\nclass InputManifests(Manifests):\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n super().__init__(InputManifest, InputManifests.files(self.prefix))\n\n @classmethod\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n\n @classmethod\n def cron_jenkinsfile(self) -> str:\n return os.path.join(self.jenkins_path(), \"check-for-build.jenkinsfile\")\n\n @classmethod\n def files(self, name: str) -> List:\n results = []\n for filename in glob.glob(os.path.join(self.manifests_path(), f\"**/{name}-*.yml\")):\n # avoids the -maven manifest\n match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n if match:\n results.append(filename)\n return results\n\n @abstractmethod\n def update(\n self,\n min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],\n component_klass: Type[ComponentOpenSearch],\n keep: bool = False,\n ) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n with TemporaryDirectory(keep=keep, chdir=True) as work_dir:\n logging.info(f\"Checking out components into {work_dir.name}\")\n\n # check out and build #main, 1.x, etc.\n branches = min_klass.branches()\n\n logging.info(f\"Checking {self.name} {branches} branches\")\n for branch in branches:\n c = min_klass.checkout(\n path=os.path.join(work_dir.name, self.name.replace(\" \", \"\"), branch),\n branch=branch,\n )\n\n version = c.version\n logging.info(f\"{self.name}#{branch} is version {version}\")\n if version not in main_versions.keys():\n main_versions[version] = [c]\n\n if component_klass is not None:\n # components can increment their own version first without incrementing min\n manifest = self.latest\n logging.info(f\"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})\")\n for component in manifest.components.values():\n if component.name == self.name:\n continue\n\n logging.info(f\"Checking out {component.name}#main\")\n component = component_klass.checkout(\n name=component.name,\n path=os.path.join(work_dir.name, component.name),\n opensearch_version=manifest.build.version,\n branch=\"main\",\n )\n\n component_version = component.version\n if component_version:\n release_version = \".\".join(component_version.split(\".\")[:3])\n if release_version not in main_versions.keys():\n main_versions[release_version] = []\n main_versions[release_version].append(component)\n logging.info(f\"{component.name}#main is version {release_version} (from {component_version})\")\n\n # summarize\n logging.info(\"Found versions on main:\")\n for main_version in main_versions.keys():\n for component in main_versions[main_version]:\n logging.info(f\" {component.name}={main_version}\")\n\n # generate new manifests\n for release_version in sorted(main_versions.keys() - known_versions):\n self.write_manifest(release_version, main_versions[release_version])\n self.add_to_cron(release_version)\n\n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n templates_base_path = os.path.join(self.manifests_path(), \"templates\")\n template_version_folder = version.split(\".\")[0] + \".x\"\n template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, \"manifest.yml\")\n if not os.path.exists(template_full_path):\n template_full_path = os.path.join(templates_base_path, self.prefix, \"default\", \"manifest.yml\")\n\n manifest = InputManifest.from_file(open(template_full_path))\n\n manifest.build.version = version\n manifests_components = []\n\n for component in components:\n logging.info(f\" Adding {component.name}\")\n manifests_components.append(component.to_dict())\n\n manifest.components = InputComponents(manifests_components) # type: ignore\n return manifest\n\n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n manifest = self.create_manifest(version, components)\n manifest_dir = os.path.join(self.manifests_path(), version)\n os.makedirs(manifest_dir, exist_ok=True)\n manifest_path = os.path.join(manifest_dir, f\"{self.prefix}-{version}.yml\")\n manifest.to_file(manifest_path)\n logging.info(f\"Wrote {manifest_path}\")\n\n def add_to_cron(self, version: str) -> None:\n logging.info(f\"Adding new version to cron: {version}\")\n jenkinsfile = self.cron_jenkinsfile()\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n\n cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n\n data = data.replace(\"parameterizedCron '''\\n\", f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\")\n\n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n\n logging.info(f\"Wrote {jenkinsfile}\")\n", "path": "src/manifests_workflow/input_manifests.py"}]}
2,201
849
gh_patches_debug_39707
rasdani/github-patches
git_diff
kubeflow__pipelines-4831
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- VolumeOp doesn't support GC after workflow deletion The PVC created from VolumeOp is not automatically recycled after workflow is deleted. I wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC. If it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler. VolumeOp doesn't support GC after workflow deletion The PVC created from VolumeOp is not automatically recycled after workflow is deleted. I wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC. If it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/kfp/dsl/_resource_op.py` Content: ``` 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Dict, List, Optional 16 import warnings 17 18 from ._container_op import BaseOp, ContainerOp 19 from . import _pipeline_param 20 21 22 class Resource(object): 23 """ 24 A wrapper over Argo ResourceTemplate definition object 25 (io.argoproj.workflow.v1alpha1.ResourceTemplate) 26 which is used to represent the `resource` property in argo's workflow 27 template (io.argoproj.workflow.v1alpha1.Template). 28 """ 29 swagger_types = { 30 "action": "str", 31 "merge_strategy": "str", 32 "success_condition": "str", 33 "failure_condition": "str", 34 "manifest": "str", 35 "flags": "list[str]" 36 } 37 openapi_types = { 38 "action": "str", 39 "merge_strategy": "str", 40 "success_condition": "str", 41 "failure_condition": "str", 42 "manifest": "str", 43 "flags": "list[str]" 44 } 45 attribute_map = { 46 "action": "action", 47 "merge_strategy": "mergeStrategy", 48 "success_condition": "successCondition", 49 "failure_condition": "failureCondition", 50 "manifest": "manifest", 51 "flags": "flags" 52 } 53 54 def __init__(self, 55 action: str = None, 56 merge_strategy: str = None, 57 success_condition: str = None, 58 failure_condition: str = None, 59 manifest: str = None, 60 flags: Optional[List[str]] = None): 61 """Create a new instance of Resource""" 62 self.action = action 63 self.merge_strategy = merge_strategy 64 self.success_condition = success_condition 65 self.failure_condition = failure_condition 66 self.manifest = manifest 67 self.flags = flags 68 69 70 class ResourceOp(BaseOp): 71 """Represents an op which will be translated into a resource template. 72 73 TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the 74 stability level of this feature. 75 76 Args: 77 k8s_resource: A k8s resource which will be submitted to the cluster 78 action: One of "create"/"delete"/"apply"/"patch" (default is "create") 79 merge_strategy: The merge strategy for the "apply" action 80 success_condition: The successCondition of the template 81 failure_condition: The failureCondition of the template 82 For more info see: 83 https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml 84 attribute_outputs: Maps output labels to resource's json paths, 85 similarly to file_outputs of ContainerOp 86 kwargs: name, sidecars. See BaseOp definition 87 88 Raises: 89 ValueError: if not inside a pipeline 90 if the name is an invalid string 91 if no k8s_resource is provided 92 if merge_strategy is set without "apply" action 93 """ 94 95 def __init__(self, 96 k8s_resource=None, 97 action: str = "create", 98 merge_strategy: str = None, 99 success_condition: str = None, 100 failure_condition: str = None, 101 attribute_outputs: Optional[Dict[str, str]] = None, 102 flags: Optional[List[str]] = None, 103 **kwargs): 104 105 super().__init__(**kwargs) 106 self.attrs_with_pipelineparams = list(self.attrs_with_pipelineparams) 107 self.attrs_with_pipelineparams.extend( 108 ["_resource", "k8s_resource", "attribute_outputs"]) 109 110 if k8s_resource is None: 111 raise ValueError("You need to provide a k8s_resource.") 112 113 if action == "delete": 114 warnings.warn( 115 'Please use `kubernetes_resource_delete_op` instead of ' 116 '`ResourceOp(action="delete")`', 117 DeprecationWarning) 118 119 if merge_strategy and action != "apply": 120 raise ValueError("You can't set merge_strategy when action != 'apply'") 121 122 # if action is delete, there should not be any outputs, success_condition, 123 # and failure_condition 124 if action == "delete" and (success_condition or failure_condition or 125 attribute_outputs): 126 raise ValueError( 127 "You can't set success_condition, failure_condition, or " 128 "attribute_outputs when action == 'delete'" 129 ) 130 131 if action == "delete" and flags is None: 132 flags = ["--wait=false"] 133 init_resource = { 134 "action": action, 135 "merge_strategy": merge_strategy, 136 "success_condition": success_condition, 137 "failure_condition": failure_condition, 138 "flags": flags 139 } 140 # `resource` prop in `io.argoproj.workflow.v1alpha1.Template` 141 self._resource = Resource(**init_resource) 142 143 self.k8s_resource = k8s_resource 144 145 # if action is delete, there should not be any outputs, success_condition, 146 # and failure_condition 147 if action == "delete": 148 self.attribute_outputs = {} 149 self.outputs = {} 150 self.output = None 151 return 152 153 # Set attribute_outputs 154 extra_attribute_outputs = \ 155 attribute_outputs if attribute_outputs else {} 156 self.attribute_outputs = \ 157 self.attribute_outputs if hasattr(self, "attribute_outputs") \ 158 else {} 159 self.attribute_outputs.update(extra_attribute_outputs) 160 # Add name and manifest if not specified by the user 161 if "name" not in self.attribute_outputs: 162 self.attribute_outputs["name"] = "{.metadata.name}" 163 if "manifest" not in self.attribute_outputs: 164 self.attribute_outputs["manifest"] = "{}" 165 166 # Set outputs 167 self.outputs = { 168 name: _pipeline_param.PipelineParam(name, op_name=self.name) 169 for name in self.attribute_outputs.keys() 170 } 171 # If user set a single attribute_output, set self.output as that 172 # parameter, else set it as the resource name 173 self.output = self.outputs["name"] 174 if len(extra_attribute_outputs) == 1: 175 self.output = self.outputs[list(extra_attribute_outputs)[0]] 176 177 @property 178 def resource(self): 179 """`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`. 180 """ 181 return self._resource 182 183 def delete(self, flags: Optional[List[str]] = None): 184 """Returns a ResourceOp which deletes the resource.""" 185 if self.resource.action == "delete": 186 raise ValueError("This operation is already a resource deletion.") 187 188 if isinstance(self.k8s_resource, dict): 189 kind = self.k8s_resource["kind"] 190 else: 191 kind = self.k8s_resource.kind 192 193 return kubernetes_resource_delete_op( 194 name=self.outputs["name"], kind=kind, flags=flags or ["--wait=false"]) 195 196 197 def kubernetes_resource_delete_op( 198 name: str, 199 kind: str, 200 namespace: str = None, 201 flags: Optional[List[str]] = None, 202 ) -> ContainerOp: 203 """Operation that deletes a Kubernetes resource. 204 205 Outputs: 206 name: The name of the deleted resource 207 """ 208 209 command = [ 210 "kubectl", "delete", 211 str(kind), 212 str(name), "--ignore-not-found", "--output", "name" 213 ] 214 if namespace: 215 command.extend(["--namespace", str(namespace)]) 216 if flags: 217 command.extend(flags) 218 219 result = ContainerOp( 220 name="kubernetes_resource_delete", 221 image="gcr.io/cloud-builders/kubectl", 222 command=command, 223 ) 224 return result 225 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/kfp/dsl/_resource_op.py b/sdk/python/kfp/dsl/_resource_op.py --- a/sdk/python/kfp/dsl/_resource_op.py +++ b/sdk/python/kfp/dsl/_resource_op.py @@ -31,6 +31,7 @@ "merge_strategy": "str", "success_condition": "str", "failure_condition": "str", + "set_owner_reference": "bool", "manifest": "str", "flags": "list[str]" } @@ -39,6 +40,7 @@ "merge_strategy": "str", "success_condition": "str", "failure_condition": "str", + "set_owner_reference": "bool", "manifest": "str", "flags": "list[str]" } @@ -47,6 +49,7 @@ "merge_strategy": "mergeStrategy", "success_condition": "successCondition", "failure_condition": "failureCondition", + "set_owner_reference": "setOwnerReference", "manifest": "manifest", "flags": "flags" } @@ -56,6 +59,7 @@ merge_strategy: str = None, success_condition: str = None, failure_condition: str = None, + set_owner_reference: bool = None, manifest: str = None, flags: Optional[List[str]] = None): """Create a new instance of Resource""" @@ -63,6 +67,7 @@ self.merge_strategy = merge_strategy self.success_condition = success_condition self.failure_condition = failure_condition + self.set_owner_reference = set_owner_reference self.manifest = manifest self.flags = flags @@ -98,6 +103,7 @@ merge_strategy: str = None, success_condition: str = None, failure_condition: str = None, + set_owner_reference: bool = None, attribute_outputs: Optional[Dict[str, str]] = None, flags: Optional[List[str]] = None, **kwargs): @@ -135,6 +141,7 @@ "merge_strategy": merge_strategy, "success_condition": success_condition, "failure_condition": failure_condition, + "set_owner_reference": set_owner_reference, "flags": flags } # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`
{"golden_diff": "diff --git a/sdk/python/kfp/dsl/_resource_op.py b/sdk/python/kfp/dsl/_resource_op.py\n--- a/sdk/python/kfp/dsl/_resource_op.py\n+++ b/sdk/python/kfp/dsl/_resource_op.py\n@@ -31,6 +31,7 @@\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n+ \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n@@ -39,6 +40,7 @@\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n+ \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n@@ -47,6 +49,7 @@\n \"merge_strategy\": \"mergeStrategy\",\n \"success_condition\": \"successCondition\",\n \"failure_condition\": \"failureCondition\",\n+ \"set_owner_reference\": \"setOwnerReference\",\n \"manifest\": \"manifest\",\n \"flags\": \"flags\"\n }\n@@ -56,6 +59,7 @@\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n+ set_owner_reference: bool = None,\n manifest: str = None,\n flags: Optional[List[str]] = None):\n \"\"\"Create a new instance of Resource\"\"\"\n@@ -63,6 +67,7 @@\n self.merge_strategy = merge_strategy\n self.success_condition = success_condition\n self.failure_condition = failure_condition\n+ self.set_owner_reference = set_owner_reference\n self.manifest = manifest\n self.flags = flags\n \n@@ -98,6 +103,7 @@\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n+ set_owner_reference: bool = None,\n attribute_outputs: Optional[Dict[str, str]] = None,\n flags: Optional[List[str]] = None,\n **kwargs):\n@@ -135,6 +141,7 @@\n \"merge_strategy\": merge_strategy,\n \"success_condition\": success_condition,\n \"failure_condition\": failure_condition,\n+ \"set_owner_reference\": set_owner_reference,\n \"flags\": flags\n }\n # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`\n", "issue": "VolumeOp doesn't support GC after workflow deletion\nThe PVC created from VolumeOp is not automatically recycled after workflow is deleted.\r\n\r\nI wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.\r\n\r\nIf it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.\nVolumeOp doesn't support GC after workflow deletion\nThe PVC created from VolumeOp is not automatically recycled after workflow is deleted.\r\n\r\nI wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.\r\n\r\nIf it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List, Optional\nimport warnings\n\nfrom ._container_op import BaseOp, ContainerOp\nfrom . import _pipeline_param\n\n\nclass Resource(object):\n \"\"\"\n A wrapper over Argo ResourceTemplate definition object\n (io.argoproj.workflow.v1alpha1.ResourceTemplate)\n which is used to represent the `resource` property in argo's workflow\n template (io.argoproj.workflow.v1alpha1.Template).\n \"\"\"\n swagger_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n openapi_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n attribute_map = {\n \"action\": \"action\",\n \"merge_strategy\": \"mergeStrategy\",\n \"success_condition\": \"successCondition\",\n \"failure_condition\": \"failureCondition\",\n \"manifest\": \"manifest\",\n \"flags\": \"flags\"\n }\n\n def __init__(self,\n action: str = None,\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n manifest: str = None,\n flags: Optional[List[str]] = None):\n \"\"\"Create a new instance of Resource\"\"\"\n self.action = action\n self.merge_strategy = merge_strategy\n self.success_condition = success_condition\n self.failure_condition = failure_condition\n self.manifest = manifest\n self.flags = flags\n\n\nclass ResourceOp(BaseOp):\n \"\"\"Represents an op which will be translated into a resource template.\n\n TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the\n stability level of this feature.\n\n Args:\n k8s_resource: A k8s resource which will be submitted to the cluster\n action: One of \"create\"/\"delete\"/\"apply\"/\"patch\" (default is \"create\")\n merge_strategy: The merge strategy for the \"apply\" action\n success_condition: The successCondition of the template\n failure_condition: The failureCondition of the template\n For more info see:\n https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml\n attribute_outputs: Maps output labels to resource's json paths,\n similarly to file_outputs of ContainerOp\n kwargs: name, sidecars. See BaseOp definition\n\n Raises:\n ValueError: if not inside a pipeline\n if the name is an invalid string\n if no k8s_resource is provided\n if merge_strategy is set without \"apply\" action\n \"\"\"\n\n def __init__(self,\n k8s_resource=None,\n action: str = \"create\",\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n attribute_outputs: Optional[Dict[str, str]] = None,\n flags: Optional[List[str]] = None,\n **kwargs):\n\n super().__init__(**kwargs)\n self.attrs_with_pipelineparams = list(self.attrs_with_pipelineparams)\n self.attrs_with_pipelineparams.extend(\n [\"_resource\", \"k8s_resource\", \"attribute_outputs\"])\n\n if k8s_resource is None:\n raise ValueError(\"You need to provide a k8s_resource.\")\n\n if action == \"delete\":\n warnings.warn(\n 'Please use `kubernetes_resource_delete_op` instead of '\n '`ResourceOp(action=\"delete\")`',\n DeprecationWarning)\n\n if merge_strategy and action != \"apply\":\n raise ValueError(\"You can't set merge_strategy when action != 'apply'\")\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\" and (success_condition or failure_condition or\n attribute_outputs):\n raise ValueError(\n \"You can't set success_condition, failure_condition, or \"\n \"attribute_outputs when action == 'delete'\"\n )\n\n if action == \"delete\" and flags is None:\n flags = [\"--wait=false\"]\n init_resource = {\n \"action\": action,\n \"merge_strategy\": merge_strategy,\n \"success_condition\": success_condition,\n \"failure_condition\": failure_condition,\n \"flags\": flags\n }\n # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`\n self._resource = Resource(**init_resource)\n\n self.k8s_resource = k8s_resource\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\":\n self.attribute_outputs = {}\n self.outputs = {}\n self.output = None\n return\n\n # Set attribute_outputs\n extra_attribute_outputs = \\\n attribute_outputs if attribute_outputs else {}\n self.attribute_outputs = \\\n self.attribute_outputs if hasattr(self, \"attribute_outputs\") \\\n else {}\n self.attribute_outputs.update(extra_attribute_outputs)\n # Add name and manifest if not specified by the user\n if \"name\" not in self.attribute_outputs:\n self.attribute_outputs[\"name\"] = \"{.metadata.name}\"\n if \"manifest\" not in self.attribute_outputs:\n self.attribute_outputs[\"manifest\"] = \"{}\"\n\n # Set outputs\n self.outputs = {\n name: _pipeline_param.PipelineParam(name, op_name=self.name)\n for name in self.attribute_outputs.keys()\n }\n # If user set a single attribute_output, set self.output as that\n # parameter, else set it as the resource name\n self.output = self.outputs[\"name\"]\n if len(extra_attribute_outputs) == 1:\n self.output = self.outputs[list(extra_attribute_outputs)[0]]\n\n @property\n def resource(self):\n \"\"\"`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`.\n \"\"\"\n return self._resource\n\n def delete(self, flags: Optional[List[str]] = None):\n \"\"\"Returns a ResourceOp which deletes the resource.\"\"\"\n if self.resource.action == \"delete\":\n raise ValueError(\"This operation is already a resource deletion.\")\n\n if isinstance(self.k8s_resource, dict):\n kind = self.k8s_resource[\"kind\"]\n else:\n kind = self.k8s_resource.kind\n\n return kubernetes_resource_delete_op(\n name=self.outputs[\"name\"], kind=kind, flags=flags or [\"--wait=false\"])\n\n\ndef kubernetes_resource_delete_op(\n name: str,\n kind: str,\n namespace: str = None,\n flags: Optional[List[str]] = None,\n) -> ContainerOp:\n \"\"\"Operation that deletes a Kubernetes resource.\n\n Outputs:\n name: The name of the deleted resource\n \"\"\"\n\n command = [\n \"kubectl\", \"delete\",\n str(kind),\n str(name), \"--ignore-not-found\", \"--output\", \"name\"\n ]\n if namespace:\n command.extend([\"--namespace\", str(namespace)])\n if flags:\n command.extend(flags)\n\n result = ContainerOp(\n name=\"kubernetes_resource_delete\",\n image=\"gcr.io/cloud-builders/kubectl\",\n command=command,\n )\n return result\n", "path": "sdk/python/kfp/dsl/_resource_op.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List, Optional\nimport warnings\n\nfrom ._container_op import BaseOp, ContainerOp\nfrom . import _pipeline_param\n\n\nclass Resource(object):\n \"\"\"\n A wrapper over Argo ResourceTemplate definition object\n (io.argoproj.workflow.v1alpha1.ResourceTemplate)\n which is used to represent the `resource` property in argo's workflow\n template (io.argoproj.workflow.v1alpha1.Template).\n \"\"\"\n swagger_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n openapi_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n attribute_map = {\n \"action\": \"action\",\n \"merge_strategy\": \"mergeStrategy\",\n \"success_condition\": \"successCondition\",\n \"failure_condition\": \"failureCondition\",\n \"set_owner_reference\": \"setOwnerReference\",\n \"manifest\": \"manifest\",\n \"flags\": \"flags\"\n }\n\n def __init__(self,\n action: str = None,\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n set_owner_reference: bool = None,\n manifest: str = None,\n flags: Optional[List[str]] = None):\n \"\"\"Create a new instance of Resource\"\"\"\n self.action = action\n self.merge_strategy = merge_strategy\n self.success_condition = success_condition\n self.failure_condition = failure_condition\n self.set_owner_reference = set_owner_reference\n self.manifest = manifest\n self.flags = flags\n\n\nclass ResourceOp(BaseOp):\n \"\"\"Represents an op which will be translated into a resource template.\n\n TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the\n stability level of this feature.\n\n Args:\n k8s_resource: A k8s resource which will be submitted to the cluster\n action: One of \"create\"/\"delete\"/\"apply\"/\"patch\" (default is \"create\")\n merge_strategy: The merge strategy for the \"apply\" action\n success_condition: The successCondition of the template\n failure_condition: The failureCondition of the template\n For more info see:\n https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml\n attribute_outputs: Maps output labels to resource's json paths,\n similarly to file_outputs of ContainerOp\n kwargs: name, sidecars. See BaseOp definition\n\n Raises:\n ValueError: if not inside a pipeline\n if the name is an invalid string\n if no k8s_resource is provided\n if merge_strategy is set without \"apply\" action\n \"\"\"\n\n def __init__(self,\n k8s_resource=None,\n action: str = \"create\",\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n set_owner_reference: bool = None,\n attribute_outputs: Optional[Dict[str, str]] = None,\n flags: Optional[List[str]] = None,\n **kwargs):\n\n super().__init__(**kwargs)\n self.attrs_with_pipelineparams = list(self.attrs_with_pipelineparams)\n self.attrs_with_pipelineparams.extend(\n [\"_resource\", \"k8s_resource\", \"attribute_outputs\"])\n\n if k8s_resource is None:\n raise ValueError(\"You need to provide a k8s_resource.\")\n\n if action == \"delete\":\n warnings.warn(\n 'Please use `kubernetes_resource_delete_op` instead of '\n '`ResourceOp(action=\"delete\")`',\n DeprecationWarning)\n\n if merge_strategy and action != \"apply\":\n raise ValueError(\"You can't set merge_strategy when action != 'apply'\")\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\" and (success_condition or failure_condition or\n attribute_outputs):\n raise ValueError(\n \"You can't set success_condition, failure_condition, or \"\n \"attribute_outputs when action == 'delete'\"\n )\n\n if action == \"delete\" and flags is None:\n flags = [\"--wait=false\"]\n init_resource = {\n \"action\": action,\n \"merge_strategy\": merge_strategy,\n \"success_condition\": success_condition,\n \"failure_condition\": failure_condition,\n \"set_owner_reference\": set_owner_reference,\n \"flags\": flags\n }\n # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`\n self._resource = Resource(**init_resource)\n\n self.k8s_resource = k8s_resource\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\":\n self.attribute_outputs = {}\n self.outputs = {}\n self.output = None\n return\n\n # Set attribute_outputs\n extra_attribute_outputs = \\\n attribute_outputs if attribute_outputs else {}\n self.attribute_outputs = \\\n self.attribute_outputs if hasattr(self, \"attribute_outputs\") \\\n else {}\n self.attribute_outputs.update(extra_attribute_outputs)\n # Add name and manifest if not specified by the user\n if \"name\" not in self.attribute_outputs:\n self.attribute_outputs[\"name\"] = \"{.metadata.name}\"\n if \"manifest\" not in self.attribute_outputs:\n self.attribute_outputs[\"manifest\"] = \"{}\"\n\n # Set outputs\n self.outputs = {\n name: _pipeline_param.PipelineParam(name, op_name=self.name)\n for name in self.attribute_outputs.keys()\n }\n # If user set a single attribute_output, set self.output as that\n # parameter, else set it as the resource name\n self.output = self.outputs[\"name\"]\n if len(extra_attribute_outputs) == 1:\n self.output = self.outputs[list(extra_attribute_outputs)[0]]\n\n @property\n def resource(self):\n \"\"\"`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`.\n \"\"\"\n return self._resource\n\n def delete(self, flags: Optional[List[str]] = None):\n \"\"\"Returns a ResourceOp which deletes the resource.\"\"\"\n if self.resource.action == \"delete\":\n raise ValueError(\"This operation is already a resource deletion.\")\n\n if isinstance(self.k8s_resource, dict):\n kind = self.k8s_resource[\"kind\"]\n else:\n kind = self.k8s_resource.kind\n\n return kubernetes_resource_delete_op(\n name=self.outputs[\"name\"], kind=kind, flags=flags or [\"--wait=false\"])\n\n\ndef kubernetes_resource_delete_op(\n name: str,\n kind: str,\n namespace: str = None,\n flags: Optional[List[str]] = None,\n) -> ContainerOp:\n \"\"\"Operation that deletes a Kubernetes resource.\n\n Outputs:\n name: The name of the deleted resource\n \"\"\"\n\n command = [\n \"kubectl\", \"delete\",\n str(kind),\n str(name), \"--ignore-not-found\", \"--output\", \"name\"\n ]\n if namespace:\n command.extend([\"--namespace\", str(namespace)])\n if flags:\n command.extend(flags)\n\n result = ContainerOp(\n name=\"kubernetes_resource_delete\",\n image=\"gcr.io/cloud-builders/kubectl\",\n command=command,\n )\n return result\n", "path": "sdk/python/kfp/dsl/_resource_op.py"}]}
2,727
528
gh_patches_debug_1984
rasdani/github-patches
git_diff
mkdocs__mkdocs-694
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- current_page.ancestors only contains direct ancestor and not the full path of the page I'm using the mkdocs theme and tried to enhance it with a breadcrumb trail. The page navigation is created automatically by mkdocs (I don't use the pages confguration since I have almost 300 pages). I copied and adapted the `breadcrumbs.html` file from the readthedocs theme and integrated it in `content.html`: ``` <ol class="breadcrumb"> <li><a href="{{ homepage_url }}">Docs</a></li> {% if current_page %} {% for doc in current_page.ancestors %} {% if doc.link %} <li><a href="{{ doc.link|e }}">{{ doc.title }}</a></li> {% else %} <li>{{ doc.title }}</li> {% endif %} {% endfor %} {% endif %} {% if current_page %}<li>{{ current_page.title }}</li>{% endif %} </ol> ``` My file path (starting from the `docs_dir`) is: `beheerteam/diensten/algemeen/ActiveDirectory.md` The generated breadcrumb trail is: `Docs/algemeen/ActiveDirectory` `algemeen` is the only part that originates from the loop `for doc in current_page.ancestors`. Maybe this is a stupid question or it is just not possible, but I couldn't find i in the documentation and I'm just starting with mkdocs and couldn't understand the source on how this works. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/nav.py` Content: ``` 1 # coding: utf-8 2 3 """ 4 Deals with generating the site-wide navigation. 5 6 This consists of building a set of interlinked page and header objects. 7 """ 8 9 from __future__ import unicode_literals 10 import datetime 11 import logging 12 import os 13 14 from mkdocs import utils, exceptions 15 16 log = logging.getLogger(__name__) 17 18 19 def filename_to_title(filename): 20 """ 21 Automatically generate a default title, given a filename. 22 """ 23 if utils.is_homepage(filename): 24 return 'Home' 25 26 return utils.filename_to_title(filename) 27 28 29 class SiteNavigation(object): 30 def __init__(self, pages_config, use_directory_urls=True): 31 self.url_context = URLContext() 32 self.file_context = FileContext() 33 self.nav_items, self.pages = _generate_site_navigation( 34 pages_config, self.url_context, use_directory_urls) 35 self.homepage = self.pages[0] if self.pages else None 36 self.use_directory_urls = use_directory_urls 37 38 def __str__(self): 39 return ''.join([str(item) for item in self]) 40 41 def __iter__(self): 42 return iter(self.nav_items) 43 44 def walk_pages(self): 45 """ 46 Returns each page in the site in turn. 47 48 Additionally this sets the active status of the pages and headers, 49 in the site navigation, so that the rendered navbar can correctly 50 highlight the currently active page and/or header item. 51 """ 52 page = self.homepage 53 page.set_active() 54 self.url_context.set_current_url(page.abs_url) 55 self.file_context.set_current_path(page.input_path) 56 yield page 57 while page.next_page: 58 page.set_active(False) 59 page = page.next_page 60 page.set_active() 61 self.url_context.set_current_url(page.abs_url) 62 self.file_context.set_current_path(page.input_path) 63 yield page 64 page.set_active(False) 65 66 @property 67 def source_files(self): 68 if not hasattr(self, '_source_files'): 69 self._source_files = set([page.input_path for page in self.pages]) 70 return self._source_files 71 72 73 class URLContext(object): 74 """ 75 The URLContext is used to ensure that we can generate the appropriate 76 relative URLs to other pages from any given page in the site. 77 78 We use relative URLs so that static sites can be deployed to any location 79 without having to specify what the path component on the host will be 80 if the documentation is not hosted at the root path. 81 """ 82 83 def __init__(self): 84 self.base_path = '/' 85 86 def set_current_url(self, current_url): 87 self.base_path = os.path.dirname(current_url) 88 89 def make_relative(self, url): 90 """ 91 Given a URL path return it as a relative URL, 92 given the context of the current page. 93 """ 94 suffix = '/' if (url.endswith('/') and len(url) > 1) else '' 95 # Workaround for bug on `os.path.relpath()` in Python 2.6 96 if self.base_path == '/': 97 if url == '/': 98 # Workaround for static assets 99 return '.' 100 return url.lstrip('/') 101 # Under Python 2.6, relative_path adds an extra '/' at the end. 102 relative_path = os.path.relpath(url, start=self.base_path) 103 relative_path = relative_path.rstrip('/') + suffix 104 105 return utils.path_to_url(relative_path) 106 107 108 class FileContext(object): 109 """ 110 The FileContext is used to ensure that we can generate the appropriate 111 full path for other pages given their relative path from a particular page. 112 113 This is used when we have relative hyperlinks in the documentation, so that 114 we can ensure that they point to markdown documents that actually exist 115 in the `pages` config. 116 """ 117 def __init__(self): 118 self.current_file = None 119 self.base_path = '' 120 121 def set_current_path(self, current_path): 122 self.current_file = current_path 123 self.base_path = os.path.dirname(current_path) 124 125 def make_absolute(self, path): 126 """ 127 Given a relative file path return it as a POSIX-style 128 absolute filepath, given the context of the current page. 129 """ 130 return os.path.normpath(os.path.join(self.base_path, path)) 131 132 133 class Page(object): 134 def __init__(self, title, url, path, url_context): 135 136 self.title = title 137 self.abs_url = url 138 self.active = False 139 self.url_context = url_context 140 self.update_date = datetime.datetime.now().strftime("%Y-%m-%d") 141 142 # Relative paths to the input markdown file and output html file. 143 self.input_path = path 144 self.output_path = utils.get_html_path(path) 145 146 # Links to related pages 147 self.previous_page = None 148 self.next_page = None 149 self.ancestors = [] 150 151 @property 152 def url(self): 153 return self.url_context.make_relative(self.abs_url) 154 155 @property 156 def is_homepage(self): 157 return utils.is_homepage(self.input_path) 158 159 @property 160 def is_top_level(self): 161 return len(self.ancestors) == 0 162 163 def __str__(self): 164 return self.indent_print() 165 166 def indent_print(self, depth=0): 167 indent = ' ' * depth 168 active_marker = ' [*]' if self.active else '' 169 title = self.title if (self.title is not None) else '[blank]' 170 return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker) 171 172 def set_active(self, active=True): 173 self.active = active 174 for ancestor in self.ancestors: 175 ancestor.set_active(active) 176 177 178 class Header(object): 179 def __init__(self, title, children): 180 self.title, self.children = title, children 181 self.active = False 182 self.ancestors = [] 183 184 def __str__(self): 185 return self.indent_print() 186 187 @property 188 def is_top_level(self): 189 return len(self.ancestors) == 0 190 191 def indent_print(self, depth=0): 192 indent = ' ' * depth 193 active_marker = ' [*]' if self.active else '' 194 ret = '%s%s%s\n' % (indent, self.title, active_marker) 195 for item in self.children: 196 ret += item.indent_print(depth + 1) 197 return ret 198 199 def set_active(self, active=True): 200 self.active = active 201 for ancestor in self.ancestors: 202 ancestor.set_active(active) 203 204 205 def _path_to_page(path, title, url_context, use_directory_urls): 206 if title is None: 207 title = filename_to_title(path.split(os.path.sep)[-1]) 208 url = utils.get_url_path(path, use_directory_urls) 209 return Page(title=title, url=url, path=path, 210 url_context=url_context) 211 212 213 def _follow(config_line, url_context, use_dir_urls, header=None, title=None): 214 215 if isinstance(config_line, utils.string_types): 216 path = os.path.normpath(config_line) 217 page = _path_to_page(path, title, url_context, use_dir_urls) 218 219 if header: 220 page.ancestors = [header] 221 header.children.append(page) 222 223 yield page 224 raise StopIteration 225 226 elif not isinstance(config_line, dict): 227 msg = ("Line in 'page' config is of type {0}, dict or string " 228 "expected. Config: {1}").format(type(config_line), config_line) 229 raise exceptions.ConfigurationError(msg) 230 231 if len(config_line) > 1: 232 raise exceptions.ConfigurationError( 233 "Page configs should be in the format 'name: markdown.md'. The " 234 "config contains an invalid entry: {0}".format(config_line)) 235 elif len(config_line) == 0: 236 log.warning("Ignoring empty line in the pages config.") 237 raise StopIteration 238 239 next_cat_or_title, subpages_or_path = next(iter(config_line.items())) 240 241 if isinstance(subpages_or_path, utils.string_types): 242 path = subpages_or_path 243 for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title): 244 yield sub 245 raise StopIteration 246 247 elif not isinstance(subpages_or_path, list): 248 msg = ("Line in 'page' config is of type {0}, list or string " 249 "expected for sub pages. Config: {1}" 250 ).format(type(config_line), config_line) 251 raise exceptions.ConfigurationError(msg) 252 253 next_header = Header(title=next_cat_or_title, children=[]) 254 if header: 255 next_header.ancestors = [header] 256 header.children.append(next_header) 257 yield next_header 258 259 subpages = subpages_or_path 260 261 for subpage in subpages: 262 for sub in _follow(subpage, url_context, use_dir_urls, next_header): 263 yield sub 264 265 266 def _generate_site_navigation(pages_config, url_context, use_dir_urls=True): 267 """ 268 Returns a list of Page and Header instances that represent the 269 top level site navigation. 270 """ 271 nav_items = [] 272 pages = [] 273 274 previous = None 275 276 for config_line in pages_config: 277 278 for page_or_header in _follow( 279 config_line, url_context, use_dir_urls): 280 281 if isinstance(page_or_header, Header): 282 283 if page_or_header.is_top_level: 284 nav_items.append(page_or_header) 285 286 elif isinstance(page_or_header, Page): 287 288 if page_or_header.is_top_level: 289 nav_items.append(page_or_header) 290 291 pages.append(page_or_header) 292 293 if previous: 294 page_or_header.previous_page = previous 295 previous.next_page = page_or_header 296 previous = page_or_header 297 298 if len(pages) == 0: 299 raise exceptions.ConfigurationError( 300 "No pages found in the pages config. " 301 "Remove it entirely to enable automatic page discovery.") 302 303 return (nav_items, pages) 304 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/nav.py b/mkdocs/nav.py --- a/mkdocs/nav.py +++ b/mkdocs/nav.py @@ -217,7 +217,7 @@ page = _path_to_page(path, title, url_context, use_dir_urls) if header: - page.ancestors = [header] + page.ancestors = header.ancestors + [header, ] header.children.append(page) yield page
{"golden_diff": "diff --git a/mkdocs/nav.py b/mkdocs/nav.py\n--- a/mkdocs/nav.py\n+++ b/mkdocs/nav.py\n@@ -217,7 +217,7 @@\n page = _path_to_page(path, title, url_context, use_dir_urls)\n \n if header:\n- page.ancestors = [header]\n+ page.ancestors = header.ancestors + [header, ]\n header.children.append(page)\n \n yield page\n", "issue": "current_page.ancestors only contains direct ancestor and not the full path of the page\nI'm using the mkdocs theme and tried to enhance it with a breadcrumb trail. The page navigation is created automatically by mkdocs (I don't use the pages confguration since I have almost 300 pages).\nI copied and adapted the `breadcrumbs.html` file from the readthedocs theme and integrated it in `content.html`:\n\n```\n<ol class=\"breadcrumb\">\n<li><a href=\"{{ homepage_url }}\">Docs</a></li>\n{% if current_page %}\n {% for doc in current_page.ancestors %}\n {% if doc.link %}\n <li><a href=\"{{ doc.link|e }}\">{{ doc.title }}</a></li>\n {% else %}\n <li>{{ doc.title }}</li>\n {% endif %}\n {% endfor %}\n{% endif %}\n{% if current_page %}<li>{{ current_page.title }}</li>{% endif %}\n</ol>\n```\n\nMy file path (starting from the `docs_dir`) is:\n`beheerteam/diensten/algemeen/ActiveDirectory.md`\nThe generated breadcrumb trail is:\n`Docs/algemeen/ActiveDirectory`\n\n`algemeen` is the only part that originates from the loop `for doc in current_page.ancestors`.\n\nMaybe this is a stupid question or it is just not possible, but I couldn't find i in the documentation and I'm just starting with mkdocs and couldn't understand the source on how this works.\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport datetime\nimport logging\nimport os\n\nfrom mkdocs import utils, exceptions\n\nlog = logging.getLogger(__name__)\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n return utils.filename_to_title(filename)\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = _generate_site_navigation(\n pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = os.path.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `os.path.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n relative_path = os.path.relpath(url, start=self.base_path)\n relative_path = relative_path.rstrip('/') + suffix\n\n return utils.path_to_url(relative_path)\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return os.path.normpath(os.path.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n self.update_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n self.ancestors = []\n\n def __str__(self):\n return self.indent_print()\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\ndef _path_to_page(path, title, url_context, use_directory_urls):\n if title is None:\n title = filename_to_title(path.split(os.path.sep)[-1])\n url = utils.get_url_path(path, use_directory_urls)\n return Page(title=title, url=url, path=path,\n url_context=url_context)\n\n\ndef _follow(config_line, url_context, use_dir_urls, header=None, title=None):\n\n if isinstance(config_line, utils.string_types):\n path = os.path.normpath(config_line)\n page = _path_to_page(path, title, url_context, use_dir_urls)\n\n if header:\n page.ancestors = [header]\n header.children.append(page)\n\n yield page\n raise StopIteration\n\n elif not isinstance(config_line, dict):\n msg = (\"Line in 'page' config is of type {0}, dict or string \"\n \"expected. Config: {1}\").format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n if len(config_line) > 1:\n raise exceptions.ConfigurationError(\n \"Page configs should be in the format 'name: markdown.md'. The \"\n \"config contains an invalid entry: {0}\".format(config_line))\n elif len(config_line) == 0:\n log.warning(\"Ignoring empty line in the pages config.\")\n raise StopIteration\n\n next_cat_or_title, subpages_or_path = next(iter(config_line.items()))\n\n if isinstance(subpages_or_path, utils.string_types):\n path = subpages_or_path\n for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):\n yield sub\n raise StopIteration\n\n elif not isinstance(subpages_or_path, list):\n msg = (\"Line in 'page' config is of type {0}, list or string \"\n \"expected for sub pages. Config: {1}\"\n ).format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n next_header = Header(title=next_cat_or_title, children=[])\n if header:\n next_header.ancestors = [header]\n header.children.append(next_header)\n yield next_header\n\n subpages = subpages_or_path\n\n for subpage in subpages:\n for sub in _follow(subpage, url_context, use_dir_urls, next_header):\n yield sub\n\n\ndef _generate_site_navigation(pages_config, url_context, use_dir_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n\n previous = None\n\n for config_line in pages_config:\n\n for page_or_header in _follow(\n config_line, url_context, use_dir_urls):\n\n if isinstance(page_or_header, Header):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n elif isinstance(page_or_header, Page):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n pages.append(page_or_header)\n\n if previous:\n page_or_header.previous_page = previous\n previous.next_page = page_or_header\n previous = page_or_header\n\n if len(pages) == 0:\n raise exceptions.ConfigurationError(\n \"No pages found in the pages config. \"\n \"Remove it entirely to enable automatic page discovery.\")\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}], "after_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport datetime\nimport logging\nimport os\n\nfrom mkdocs import utils, exceptions\n\nlog = logging.getLogger(__name__)\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n return utils.filename_to_title(filename)\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = _generate_site_navigation(\n pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = os.path.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `os.path.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n relative_path = os.path.relpath(url, start=self.base_path)\n relative_path = relative_path.rstrip('/') + suffix\n\n return utils.path_to_url(relative_path)\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return os.path.normpath(os.path.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n self.update_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n self.ancestors = []\n\n def __str__(self):\n return self.indent_print()\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\ndef _path_to_page(path, title, url_context, use_directory_urls):\n if title is None:\n title = filename_to_title(path.split(os.path.sep)[-1])\n url = utils.get_url_path(path, use_directory_urls)\n return Page(title=title, url=url, path=path,\n url_context=url_context)\n\n\ndef _follow(config_line, url_context, use_dir_urls, header=None, title=None):\n\n if isinstance(config_line, utils.string_types):\n path = os.path.normpath(config_line)\n page = _path_to_page(path, title, url_context, use_dir_urls)\n\n if header:\n page.ancestors = header.ancestors + [header, ]\n header.children.append(page)\n\n yield page\n raise StopIteration\n\n elif not isinstance(config_line, dict):\n msg = (\"Line in 'page' config is of type {0}, dict or string \"\n \"expected. Config: {1}\").format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n if len(config_line) > 1:\n raise exceptions.ConfigurationError(\n \"Page configs should be in the format 'name: markdown.md'. The \"\n \"config contains an invalid entry: {0}\".format(config_line))\n elif len(config_line) == 0:\n log.warning(\"Ignoring empty line in the pages config.\")\n raise StopIteration\n\n next_cat_or_title, subpages_or_path = next(iter(config_line.items()))\n\n if isinstance(subpages_or_path, utils.string_types):\n path = subpages_or_path\n for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):\n yield sub\n raise StopIteration\n\n elif not isinstance(subpages_or_path, list):\n msg = (\"Line in 'page' config is of type {0}, list or string \"\n \"expected for sub pages. Config: {1}\"\n ).format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n next_header = Header(title=next_cat_or_title, children=[])\n if header:\n next_header.ancestors = [header]\n header.children.append(next_header)\n yield next_header\n\n subpages = subpages_or_path\n\n for subpage in subpages:\n for sub in _follow(subpage, url_context, use_dir_urls, next_header):\n yield sub\n\n\ndef _generate_site_navigation(pages_config, url_context, use_dir_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n\n previous = None\n\n for config_line in pages_config:\n\n for page_or_header in _follow(\n config_line, url_context, use_dir_urls):\n\n if isinstance(page_or_header, Header):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n elif isinstance(page_or_header, Page):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n pages.append(page_or_header)\n\n if previous:\n page_or_header.previous_page = previous\n previous.next_page = page_or_header\n previous = page_or_header\n\n if len(pages) == 0:\n raise exceptions.ConfigurationError(\n \"No pages found in the pages config. \"\n \"Remove it entirely to enable automatic page discovery.\")\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}]}
3,518
109
gh_patches_debug_25459
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-948
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- anytime_fitness.py null values The scraper currently includes address2 whether it is null or not, resulting in ", None" being appended to many (most?) of the addr:full fields. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/anytime_fitness.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 from locations.items import GeojsonPointItem 4 import json 5 6 class AnytimeFitnessSpider(scrapy.Spider): 7 name = 'anytime_fitness' 8 allowed_domains = ['www.anytimefitness.com'] 9 10 def start_requests(self): 11 url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json' 12 yield scrapy.Request(url, callback=self.parse) 13 14 def parse(self, response): 15 gyms = json.loads(response.body_as_unicode()) 16 17 for gym in gyms: 18 yield GeojsonPointItem( 19 lat = gym['latitude'], 20 lon = gym['longitude'], 21 addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']), 22 city = gym['content']['city'], 23 phone = gym['content']['phone'], 24 state = gym['content']['state_abbr'], 25 postcode = gym['content']['zip'], 26 ref = gym['content']['url'], 27 country = gym['content']['country'] 28 ) 29 30 31 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py --- a/locations/spiders/anytime_fitness.py +++ b/locations/spiders/anytime_fitness.py @@ -2,6 +2,8 @@ import scrapy from locations.items import GeojsonPointItem import json +import html + class AnytimeFitnessSpider(scrapy.Spider): name = 'anytime_fitness' @@ -18,14 +20,13 @@ yield GeojsonPointItem( lat = gym['latitude'], lon = gym['longitude'], - addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']), + addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])), city = gym['content']['city'], phone = gym['content']['phone'], state = gym['content']['state_abbr'], postcode = gym['content']['zip'], ref = gym['content']['url'], - country = gym['content']['country'] + country = gym['content']['country'], + name = html.unescape(gym['content']['title']), + extras = {"number": gym['content']['number']} ) - - -
{"golden_diff": "diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py\n--- a/locations/spiders/anytime_fitness.py\n+++ b/locations/spiders/anytime_fitness.py\n@@ -2,6 +2,8 @@\n import scrapy\n from locations.items import GeojsonPointItem\n import json\n+import html\n+\n \n class AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n@@ -18,14 +20,13 @@\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n+ addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n- country = gym['content']['country']\n+ country = gym['content']['country'],\n+ name = html.unescape(gym['content']['title']),\n+ extras = {\"number\": gym['content']['number']}\n )\n- \n- \n-\n", "issue": "anytime_fitness.py null values\nThe scraper currently includes address2 whether it is null or not, resulting in \", None\" being appended to many (most?) of the addr:full fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country']\n )\n \n \n \n", "path": "locations/spiders/anytime_fitness.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport html\n\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country'],\n name = html.unescape(gym['content']['title']),\n extras = {\"number\": gym['content']['number']}\n )\n", "path": "locations/spiders/anytime_fitness.py"}]}
576
283
gh_patches_debug_29017
rasdani/github-patches
git_diff
pwndbg__pwndbg-750
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Help: disasm with duplicate visual pointing references I'm learning about RE and just find out pwndbg. Using splitmind (saw it in FEATURES readme) with a custom setup of window/pane splitting, I got some "duplicate" visual references pointing to the same instruction in DISASM section. I don't know if it's the right place to ask for help or not, sorry if it's not the right one ![image](https://user-images.githubusercontent.com/6990857/80813910-4c0ae780-8ba1-11ea-9ef3-239a5153c32d.png) I'm playing aroung with this: https://crackmes.one/crackme/5d22b9d833c5d410dc4d0c9f ``` Gdb: 10.0.50.20200307-git Python: 3.8.2 (default, Apr 8 2020, 14:31:25) [GCC 9.3.0] Pwndbg: 1.1.0 build: 3cf9b31 Capstone: 4.0.1024 Unicorn: 1.0.1 alacritty: 0.4.2 tmux: 3.1 zsh: zsh 5.8 (x86_64-pc-linux-gnu) OS: DISTRIB_ID=ManjaroLinux DISTRIB_RELEASE=20.0 DISTRIB_CODENAME=Lysia DISTRIB_DESCRIPTION="Manjaro Linux" ``` My .gdbinit ``` source /home/lucasbertin/Projetos/OSS/pwndbg/gdbinit.py set context-clear-screen on set follow-fork-mode parent source /home/lucasbertin/Projetos/OSS/splitmind/gdbinit.py python import splitmind (splitmind.Mind() .tell_splitter(show_titles=True) .tell_splitter(set_title="Main") .right(display="backtrace", size="25%") .above(of="main", display="disasm", banner="top", size="85%") .right(of="disasm", display="legend") .below(of="legend", display="stack") .show("regs", on="legend") .below(of="backtrace", cmd='tty; tail -f /dev/null', clearing=False) .below(cmd="python3") ).build(nobanner=True) end set context-code-lines 30 set context-source-code-lines 30 set context-sections "regs args code disasm stack backtrace" ``` Any tips or hints are welcome, thanks!! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/commands/nearpc.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import argparse 9 import codecs 10 11 import gdb 12 from capstone import * 13 14 import pwndbg.arguments 15 import pwndbg.color 16 import pwndbg.color.context as C 17 import pwndbg.color.disasm as D 18 import pwndbg.color.nearpc as N 19 import pwndbg.color.theme 20 import pwndbg.config 21 import pwndbg.disasm 22 import pwndbg.functions 23 import pwndbg.ida 24 import pwndbg.regs 25 import pwndbg.strings 26 import pwndbg.symbol 27 import pwndbg.ui 28 import pwndbg.vmmap 29 from pwndbg.color import message 30 31 32 def ljust_padding(lst): 33 longest_len = max(map(len, lst)) if lst else 0 34 return [s.ljust(longest_len) for s in lst] 35 36 nearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' ↓', 'branch marker line for nearpc command') 37 nearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command') 38 pwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction') 39 pwndbg.color.theme.Parameter('nearpc-prefix', '►', 'prefix marker for nearpc command') 40 pwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly') 41 nearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command') 42 show_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction') 43 44 parser = argparse.ArgumentParser(description='''Disassemble near a specified address.''') 45 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to dissassemble near.") 46 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") 47 #parser.add_argument("to_string", type=bool, nargs="?", default=False, help="Whether to print it or not.") #TODO make sure this should not be exposed 48 parser.add_argument("emulate", type=bool, nargs="?", default=False, help="Whether to emulate instructions to find the next ones or just linearly disassemble.") 49 @pwndbg.commands.ArgparsedCommand(parser) 50 @pwndbg.commands.OnlyWhenRunning 51 def nearpc(pc=None, lines=None, to_string=False, emulate=False): 52 """ 53 Disassemble near a specified address. 54 """ 55 56 # Repeating nearpc (pressing enter) makes it show next addresses 57 # (writing nearpc explicitly again will reset its state) 58 if nearpc.repeat: 59 pc = nearpc.next_pc 60 61 result = [] 62 63 # Fix the case where we only have one argument, and 64 # it's a small value. 65 if lines is None and (pc is None or int(pc) < 0x100): 66 lines = pc 67 pc = None 68 69 if pc is None: 70 pc = pwndbg.regs.pc 71 72 if lines is None: 73 lines = nearpc_lines // 2 74 75 pc = int(pc) 76 lines = int(lines) 77 78 # Check whether we can even read this address 79 if not pwndbg.memory.peek(pc): 80 result.append(message.error('Invalid address %#x' % pc)) 81 82 # # Load source data if it's available 83 # pc_to_linenos = collections.defaultdict(lambda: []) 84 # lineno_to_src = {} 85 # frame = gdb.selected_frame() 86 # if frame: 87 # sal = frame.find_sal() 88 # if sal: 89 # symtab = sal.symtab 90 # objfile = symtab.objfile 91 # sourcefilename = symtab.filename 92 # with open(sourcefilename, 'r') as sourcefile: 93 # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())} 94 95 # for line in symtab.linetable(): 96 # pc_to_linenos[line.pc].append(line.line) 97 instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat) 98 99 if pwndbg.memory.peek(pc) and not instructions: 100 result.append(message.error('Invalid instructions at %#x' % pc)) 101 102 # In case $pc is in a new map we don't know about, 103 # this will trigger an exploratory search. 104 pwndbg.vmmap.find(pc) 105 106 # Gather all addresses and symbols for each instruction 107 symbols = [pwndbg.symbol.get(i.address) for i in instructions] 108 addresses = ['%#x' % i.address for i in instructions] 109 110 nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0 111 112 # Format the symbol name for each instruction 113 symbols = ['<%s> ' % sym if sym else '' for sym in symbols] 114 115 # Pad out all of the symbols and addresses 116 if pwndbg.config.left_pad_disasm and not nearpc.repeat: 117 symbols = ljust_padding(symbols) 118 addresses = ljust_padding(addresses) 119 120 prev = None 121 122 # Print out each instruction 123 for address_str, symbol, instr in zip(addresses, symbols, instructions): 124 asm = D.instruction(instr) 125 prefix_sign = pwndbg.config.nearpc_prefix 126 127 # Show prefix only on the specified address and don't show it while in repeat-mode 128 show_prefix = instr.address == pc and not nearpc.repeat 129 prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign)) 130 prefix = N.prefix(prefix) 131 132 pre = pwndbg.ida.Anterior(instr.address) 133 if pre: 134 result.append(N.ida_anterior(pre)) 135 136 # Colorize address and symbol if not highlighted 137 # symbol is fetched from gdb and it can be e.g. '<main+8>' 138 if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat: 139 address_str = N.address(address_str) 140 symbol = N.symbol(symbol) 141 elif pwndbg.config.highlight_pc: 142 prefix = C.highlight(prefix) 143 address_str = C.highlight(address_str) 144 symbol = C.highlight(symbol) 145 146 line = ' '.join((prefix, address_str, symbol, asm)) 147 148 # If there was a branch before this instruction which was not 149 # contiguous, put in some ellipses. 150 if prev and prev.address + prev.size != instr.address: 151 result.append(N.branch_marker('%s' % nearpc_branch_marker)) 152 153 # Otherwise if it's a branch and it *is* contiguous, just put 154 # and empty line. 155 elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)): 156 if len('%s' % nearpc_branch_marker_contiguous) > 0: 157 result.append('%s' % nearpc_branch_marker_contiguous) 158 159 # For syscall instructions, put the name on the side 160 if instr.address == pc: 161 syscall_name = pwndbg.arguments.get_syscall_name(instr) 162 if syscall_name: 163 line += ' <%s>' % N.syscall_name(syscall_name) 164 165 result.append(line) 166 167 # For call instructions, attempt to resolve the target and 168 # determine the number of arguments. 169 if show_args: 170 result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)]) 171 172 prev = instr 173 174 if not to_string: 175 print('\n'.join(result)) 176 177 return result 178 179 180 parser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''') 181 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to emulate near.") 182 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") 183 @pwndbg.commands.ArgparsedCommand(parser) 184 @pwndbg.commands.OnlyWhenRunning 185 def emulate(pc=None, lines=None, to_string=False, emulate=True): 186 """ 187 Like nearpc, but will emulate instructions from the current $PC forward. 188 """ 189 nearpc.repeat = emulate_command.repeat 190 return nearpc(pc, lines, to_string, emulate) 191 192 193 emulate_command = emulate 194 195 196 parser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''') 197 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to disassemble near.") 198 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.") 199 @pwndbg.commands.ArgparsedCommand(parser) 200 @pwndbg.commands.OnlyWhenRunning 201 def pdisass(pc=None, lines=None, to_string=False): 202 """ 203 Compatibility layer for PEDA's pdisass command 204 """ 205 nearpc.repeat = pdisass.repeat 206 return nearpc(pc, lines, to_string, False) 207 208 209 nearpc.next_pc = 0 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/commands/nearpc.py b/pwndbg/commands/nearpc.py --- a/pwndbg/commands/nearpc.py +++ b/pwndbg/commands/nearpc.py @@ -119,13 +119,16 @@ prev = None + first_pc = True + # Print out each instruction for address_str, symbol, instr in zip(addresses, symbols, instructions): asm = D.instruction(instr) prefix_sign = pwndbg.config.nearpc_prefix # Show prefix only on the specified address and don't show it while in repeat-mode - show_prefix = instr.address == pc and not nearpc.repeat + # or when showing current instruction for the second time + show_prefix = instr.address == pc and not nearpc.repeat and first_pc prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign)) prefix = N.prefix(prefix) @@ -138,10 +141,11 @@ if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat: address_str = N.address(address_str) symbol = N.symbol(symbol) - elif pwndbg.config.highlight_pc: + elif pwndbg.config.highlight_pc and first_pc: prefix = C.highlight(prefix) address_str = C.highlight(address_str) symbol = C.highlight(symbol) + first_pc = False line = ' '.join((prefix, address_str, symbol, asm))
{"golden_diff": "diff --git a/pwndbg/commands/nearpc.py b/pwndbg/commands/nearpc.py\n--- a/pwndbg/commands/nearpc.py\n+++ b/pwndbg/commands/nearpc.py\n@@ -119,13 +119,16 @@\n \n prev = None\n \n+ first_pc = True\n+\n # Print out each instruction\n for address_str, symbol, instr in zip(addresses, symbols, instructions):\n asm = D.instruction(instr)\n prefix_sign = pwndbg.config.nearpc_prefix\n \n # Show prefix only on the specified address and don't show it while in repeat-mode\n- show_prefix = instr.address == pc and not nearpc.repeat\n+ # or when showing current instruction for the second time\n+ show_prefix = instr.address == pc and not nearpc.repeat and first_pc\n prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))\n prefix = N.prefix(prefix)\n \n@@ -138,10 +141,11 @@\n if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:\n address_str = N.address(address_str)\n symbol = N.symbol(symbol)\n- elif pwndbg.config.highlight_pc:\n+ elif pwndbg.config.highlight_pc and first_pc:\n prefix = C.highlight(prefix)\n address_str = C.highlight(address_str)\n symbol = C.highlight(symbol)\n+ first_pc = False\n \n line = ' '.join((prefix, address_str, symbol, asm))\n", "issue": "Help: disasm with duplicate visual pointing references\nI'm learning about RE and just find out pwndbg.\r\n\r\nUsing splitmind (saw it in FEATURES readme) with a custom setup of window/pane splitting, I got some \"duplicate\" visual references pointing to the same instruction in DISASM section.\r\n\r\nI don't know if it's the right place to ask for help or not, sorry if it's not the right one \r\n\r\n![image](https://user-images.githubusercontent.com/6990857/80813910-4c0ae780-8ba1-11ea-9ef3-239a5153c32d.png)\r\n\r\nI'm playing aroung with this:\r\nhttps://crackmes.one/crackme/5d22b9d833c5d410dc4d0c9f\r\n\r\n```\r\nGdb: 10.0.50.20200307-git\r\nPython: 3.8.2 (default, Apr 8 2020, 14:31:25) [GCC 9.3.0]\r\nPwndbg: 1.1.0 build: 3cf9b31\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\nalacritty: 0.4.2\r\ntmux: 3.1\r\nzsh: zsh 5.8 (x86_64-pc-linux-gnu)\r\nOS:\r\nDISTRIB_ID=ManjaroLinux\r\nDISTRIB_RELEASE=20.0\r\nDISTRIB_CODENAME=Lysia\r\nDISTRIB_DESCRIPTION=\"Manjaro Linux\"\r\n```\r\n\r\nMy .gdbinit\r\n```\r\nsource /home/lucasbertin/Projetos/OSS/pwndbg/gdbinit.py\r\nset context-clear-screen on\r\nset follow-fork-mode parent\r\n\r\nsource /home/lucasbertin/Projetos/OSS/splitmind/gdbinit.py\r\npython\r\nimport splitmind\r\n(splitmind.Mind()\r\n .tell_splitter(show_titles=True)\r\n .tell_splitter(set_title=\"Main\")\r\n\r\n .right(display=\"backtrace\", size=\"25%\")\r\n\r\n .above(of=\"main\", display=\"disasm\", banner=\"top\", size=\"85%\")\r\n .right(of=\"disasm\", display=\"legend\")\r\n .below(of=\"legend\", display=\"stack\")\r\n\r\n .show(\"regs\", on=\"legend\")\r\n .below(of=\"backtrace\", cmd='tty; tail -f /dev/null', clearing=False)\r\n .below(cmd=\"python3\")\r\n).build(nobanner=True)\r\nend\r\n\r\nset context-code-lines 30\r\nset context-source-code-lines 30\r\nset context-sections \"regs args code disasm stack backtrace\"\r\n```\r\n\r\nAny tips or hints are welcome, thanks!!\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport codecs\n\nimport gdb\nfrom capstone import *\n\nimport pwndbg.arguments\nimport pwndbg.color\nimport pwndbg.color.context as C\nimport pwndbg.color.disasm as D\nimport pwndbg.color.nearpc as N\nimport pwndbg.color.theme\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.functions\nimport pwndbg.ida\nimport pwndbg.regs\nimport pwndbg.strings\nimport pwndbg.symbol\nimport pwndbg.ui\nimport pwndbg.vmmap\nfrom pwndbg.color import message\n\n\ndef ljust_padding(lst):\n longest_len = max(map(len, lst)) if lst else 0\n return [s.ljust(longest_len) for s in lst]\n\nnearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' \u2193', 'branch marker line for nearpc command')\nnearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')\npwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')\npwndbg.color.theme.Parameter('nearpc-prefix', '\u25ba', 'prefix marker for nearpc command')\npwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')\nnearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')\nshow_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')\n\nparser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to dissassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\n#parser.add_argument(\"to_string\", type=bool, nargs=\"?\", default=False, help=\"Whether to print it or not.\") #TODO make sure this should not be exposed\nparser.add_argument(\"emulate\", type=bool, nargs=\"?\", default=False, help=\"Whether to emulate instructions to find the next ones or just linearly disassemble.\")\[email protected](parser)\[email protected]\ndef nearpc(pc=None, lines=None, to_string=False, emulate=False):\n \"\"\"\n Disassemble near a specified address.\n \"\"\"\n\n # Repeating nearpc (pressing enter) makes it show next addresses\n # (writing nearpc explicitly again will reset its state)\n if nearpc.repeat:\n pc = nearpc.next_pc\n\n result = []\n\n # Fix the case where we only have one argument, and\n # it's a small value.\n if lines is None and (pc is None or int(pc) < 0x100):\n lines = pc\n pc = None\n\n if pc is None:\n pc = pwndbg.regs.pc\n\n if lines is None:\n lines = nearpc_lines // 2\n\n pc = int(pc)\n lines = int(lines)\n\n # Check whether we can even read this address\n if not pwndbg.memory.peek(pc):\n result.append(message.error('Invalid address %#x' % pc))\n\n # # Load source data if it's available\n # pc_to_linenos = collections.defaultdict(lambda: [])\n # lineno_to_src = {}\n # frame = gdb.selected_frame()\n # if frame:\n # sal = frame.find_sal()\n # if sal:\n # symtab = sal.symtab\n # objfile = symtab.objfile\n # sourcefilename = symtab.filename\n # with open(sourcefilename, 'r') as sourcefile:\n # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}\n\n # for line in symtab.linetable():\n # pc_to_linenos[line.pc].append(line.line)\n instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)\n\n if pwndbg.memory.peek(pc) and not instructions:\n result.append(message.error('Invalid instructions at %#x' % pc))\n\n # In case $pc is in a new map we don't know about,\n # this will trigger an exploratory search.\n pwndbg.vmmap.find(pc)\n\n # Gather all addresses and symbols for each instruction\n symbols = [pwndbg.symbol.get(i.address) for i in instructions]\n addresses = ['%#x' % i.address for i in instructions]\n\n nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0\n\n # Format the symbol name for each instruction\n symbols = ['<%s> ' % sym if sym else '' for sym in symbols]\n\n # Pad out all of the symbols and addresses\n if pwndbg.config.left_pad_disasm and not nearpc.repeat:\n symbols = ljust_padding(symbols)\n addresses = ljust_padding(addresses)\n\n prev = None\n\n # Print out each instruction\n for address_str, symbol, instr in zip(addresses, symbols, instructions):\n asm = D.instruction(instr)\n prefix_sign = pwndbg.config.nearpc_prefix\n\n # Show prefix only on the specified address and don't show it while in repeat-mode\n show_prefix = instr.address == pc and not nearpc.repeat\n prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))\n prefix = N.prefix(prefix)\n\n pre = pwndbg.ida.Anterior(instr.address)\n if pre:\n result.append(N.ida_anterior(pre))\n\n # Colorize address and symbol if not highlighted\n # symbol is fetched from gdb and it can be e.g. '<main+8>'\n if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:\n address_str = N.address(address_str)\n symbol = N.symbol(symbol)\n elif pwndbg.config.highlight_pc:\n prefix = C.highlight(prefix)\n address_str = C.highlight(address_str)\n symbol = C.highlight(symbol)\n\n line = ' '.join((prefix, address_str, symbol, asm))\n\n # If there was a branch before this instruction which was not\n # contiguous, put in some ellipses.\n if prev and prev.address + prev.size != instr.address:\n result.append(N.branch_marker('%s' % nearpc_branch_marker))\n\n # Otherwise if it's a branch and it *is* contiguous, just put\n # and empty line.\n elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):\n if len('%s' % nearpc_branch_marker_contiguous) > 0:\n result.append('%s' % nearpc_branch_marker_contiguous)\n\n # For syscall instructions, put the name on the side\n if instr.address == pc:\n syscall_name = pwndbg.arguments.get_syscall_name(instr)\n if syscall_name:\n line += ' <%s>' % N.syscall_name(syscall_name)\n\n result.append(line)\n\n # For call instructions, attempt to resolve the target and\n # determine the number of arguments.\n if show_args:\n result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])\n\n prev = instr\n\n if not to_string:\n print('\\n'.join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to emulate near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef emulate(pc=None, lines=None, to_string=False, emulate=True):\n \"\"\"\n Like nearpc, but will emulate instructions from the current $PC forward.\n \"\"\"\n nearpc.repeat = emulate_command.repeat\n return nearpc(pc, lines, to_string, emulate)\n\n\nemulate_command = emulate\n\n\nparser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to disassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef pdisass(pc=None, lines=None, to_string=False):\n \"\"\"\n Compatibility layer for PEDA's pdisass command\n \"\"\"\n nearpc.repeat = pdisass.repeat\n return nearpc(pc, lines, to_string, False)\n\n\nnearpc.next_pc = 0\n", "path": "pwndbg/commands/nearpc.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport codecs\n\nimport gdb\nfrom capstone import *\n\nimport pwndbg.arguments\nimport pwndbg.color\nimport pwndbg.color.context as C\nimport pwndbg.color.disasm as D\nimport pwndbg.color.nearpc as N\nimport pwndbg.color.theme\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.functions\nimport pwndbg.ida\nimport pwndbg.regs\nimport pwndbg.strings\nimport pwndbg.symbol\nimport pwndbg.ui\nimport pwndbg.vmmap\nfrom pwndbg.color import message\n\n\ndef ljust_padding(lst):\n longest_len = max(map(len, lst)) if lst else 0\n return [s.ljust(longest_len) for s in lst]\n\nnearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' \u2193', 'branch marker line for nearpc command')\nnearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')\npwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')\npwndbg.color.theme.Parameter('nearpc-prefix', '\u25ba', 'prefix marker for nearpc command')\npwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')\nnearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')\nshow_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')\n\nparser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to dissassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\n#parser.add_argument(\"to_string\", type=bool, nargs=\"?\", default=False, help=\"Whether to print it or not.\") #TODO make sure this should not be exposed\nparser.add_argument(\"emulate\", type=bool, nargs=\"?\", default=False, help=\"Whether to emulate instructions to find the next ones or just linearly disassemble.\")\[email protected](parser)\[email protected]\ndef nearpc(pc=None, lines=None, to_string=False, emulate=False):\n \"\"\"\n Disassemble near a specified address.\n \"\"\"\n\n # Repeating nearpc (pressing enter) makes it show next addresses\n # (writing nearpc explicitly again will reset its state)\n if nearpc.repeat:\n pc = nearpc.next_pc\n\n result = []\n\n # Fix the case where we only have one argument, and\n # it's a small value.\n if lines is None and (pc is None or int(pc) < 0x100):\n lines = pc\n pc = None\n\n if pc is None:\n pc = pwndbg.regs.pc\n\n if lines is None:\n lines = nearpc_lines // 2\n\n pc = int(pc)\n lines = int(lines)\n\n # Check whether we can even read this address\n if not pwndbg.memory.peek(pc):\n result.append(message.error('Invalid address %#x' % pc))\n\n # # Load source data if it's available\n # pc_to_linenos = collections.defaultdict(lambda: [])\n # lineno_to_src = {}\n # frame = gdb.selected_frame()\n # if frame:\n # sal = frame.find_sal()\n # if sal:\n # symtab = sal.symtab\n # objfile = symtab.objfile\n # sourcefilename = symtab.filename\n # with open(sourcefilename, 'r') as sourcefile:\n # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}\n\n # for line in symtab.linetable():\n # pc_to_linenos[line.pc].append(line.line)\n instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)\n\n if pwndbg.memory.peek(pc) and not instructions:\n result.append(message.error('Invalid instructions at %#x' % pc))\n\n # In case $pc is in a new map we don't know about,\n # this will trigger an exploratory search.\n pwndbg.vmmap.find(pc)\n\n # Gather all addresses and symbols for each instruction\n symbols = [pwndbg.symbol.get(i.address) for i in instructions]\n addresses = ['%#x' % i.address for i in instructions]\n\n nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0\n\n # Format the symbol name for each instruction\n symbols = ['<%s> ' % sym if sym else '' for sym in symbols]\n\n # Pad out all of the symbols and addresses\n if pwndbg.config.left_pad_disasm and not nearpc.repeat:\n symbols = ljust_padding(symbols)\n addresses = ljust_padding(addresses)\n\n prev = None\n\n first_pc = True\n\n # Print out each instruction\n for address_str, symbol, instr in zip(addresses, symbols, instructions):\n asm = D.instruction(instr)\n prefix_sign = pwndbg.config.nearpc_prefix\n\n # Show prefix only on the specified address and don't show it while in repeat-mode\n # or when showing current instruction for the second time\n show_prefix = instr.address == pc and not nearpc.repeat and first_pc\n prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))\n prefix = N.prefix(prefix)\n\n pre = pwndbg.ida.Anterior(instr.address)\n if pre:\n result.append(N.ida_anterior(pre))\n\n # Colorize address and symbol if not highlighted\n # symbol is fetched from gdb and it can be e.g. '<main+8>'\n if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:\n address_str = N.address(address_str)\n symbol = N.symbol(symbol)\n elif pwndbg.config.highlight_pc and first_pc:\n prefix = C.highlight(prefix)\n address_str = C.highlight(address_str)\n symbol = C.highlight(symbol)\n first_pc = False\n\n line = ' '.join((prefix, address_str, symbol, asm))\n\n # If there was a branch before this instruction which was not\n # contiguous, put in some ellipses.\n if prev and prev.address + prev.size != instr.address:\n result.append(N.branch_marker('%s' % nearpc_branch_marker))\n\n # Otherwise if it's a branch and it *is* contiguous, just put\n # and empty line.\n elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):\n if len('%s' % nearpc_branch_marker_contiguous) > 0:\n result.append('%s' % nearpc_branch_marker_contiguous)\n\n # For syscall instructions, put the name on the side\n if instr.address == pc:\n syscall_name = pwndbg.arguments.get_syscall_name(instr)\n if syscall_name:\n line += ' <%s>' % N.syscall_name(syscall_name)\n\n result.append(line)\n\n # For call instructions, attempt to resolve the target and\n # determine the number of arguments.\n if show_args:\n result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])\n\n prev = instr\n\n if not to_string:\n print('\\n'.join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to emulate near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef emulate(pc=None, lines=None, to_string=False, emulate=True):\n \"\"\"\n Like nearpc, but will emulate instructions from the current $PC forward.\n \"\"\"\n nearpc.repeat = emulate_command.repeat\n return nearpc(pc, lines, to_string, emulate)\n\n\nemulate_command = emulate\n\n\nparser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to disassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef pdisass(pc=None, lines=None, to_string=False):\n \"\"\"\n Compatibility layer for PEDA's pdisass command\n \"\"\"\n nearpc.repeat = pdisass.repeat\n return nearpc(pc, lines, to_string, False)\n\n\nnearpc.next_pc = 0\n", "path": "pwndbg/commands/nearpc.py"}]}
3,452
341
gh_patches_debug_9873
rasdani/github-patches
git_diff
wright-group__WrightTools-992
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- collection.convert Would like to support syntax `collection.convert(unit)`. Would convert all contained data objects recursively. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `WrightTools/collection/_collection.py` Content: ``` 1 """Collection.""" 2 3 4 # --- import -------------------------------------------------------------------------------------- 5 6 7 import numpy as np 8 9 from .. import data as wt_data 10 from .. import exceptions as wt_exceptions 11 from .._group import Group 12 13 14 # --- define -------------------------------------------------------------------------------------- 15 16 17 __all__ = ["Collection"] 18 19 20 # --- classes ------------------------------------------------------------------------------------- 21 22 23 class Collection(Group): 24 """Nestable Collection of Data objects.""" 25 26 class_name = "Collection" 27 28 def __iter__(self): 29 self.__n = 0 30 return self 31 32 def __len__(self): 33 return len(self.item_names) 34 35 def __next__(self): 36 if self.__n < len(self): 37 out = self.item_names[self.__n] 38 self.__n += 1 39 else: 40 raise StopIteration 41 return out 42 43 def __repr__(self): 44 return "<WrightTools.Collection '{0}' {1} at {2}>".format( 45 self.natural_name, self.item_names, "::".join([self.filepath, self.name]) 46 ) 47 48 def __getitem__(self, key): 49 if isinstance(key, int): 50 key = self.item_names[key] 51 if key == "": 52 return None 53 return super().__getitem__(key) 54 55 def __setitem__(self, key, value): 56 raise NotImplementedError 57 58 @property 59 def _leaf(self): 60 return self.natural_name 61 62 def _print_branch(self, prefix, depth, verbose): 63 for i, name in enumerate(self.item_names): 64 item = self[name] 65 if i + 1 == len(self.item_names): 66 s = prefix + "└── {0}: {1}".format(i, item._leaf) 67 p = prefix + " " 68 else: 69 s = prefix + "├── {0}: {1}".format(i, item._leaf) 70 p = prefix + "│ " 71 print(s) 72 if depth > 1 and hasattr(item, "_print_branch"): 73 item._print_branch(p, depth=depth - 1, verbose=verbose) 74 75 def create_collection(self, name="collection", position=None, **kwargs): 76 """Create a new child colleciton. 77 78 Parameters 79 ---------- 80 name : string 81 Unique identifier. 82 position : integer (optional) 83 Location to insert. Default is None (append). 84 kwargs 85 Additional arguments to child collection instantiation. 86 87 Returns 88 ------- 89 WrightTools Collection 90 New child. 91 """ 92 if name in self.item_names: 93 wt_exceptions.ObjectExistsWarning.warn(name) 94 return self[name] 95 collection = Collection( 96 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs 97 ) 98 if position is not None: 99 self.attrs["item_names"] = np.insert( 100 self.attrs["item_names"][:-1], position, collection.natural_name.encode() 101 ) 102 setattr(self, name, collection) 103 return collection 104 105 def create_data(self, name="data", position=None, **kwargs): 106 """Create a new child data. 107 108 Parameters 109 ---------- 110 name : string 111 Unique identifier. 112 position : integer (optional) 113 Location to insert. Default is None (append). 114 kwargs 115 Additional arguments to child data instantiation. 116 117 Returns 118 ------- 119 WrightTools Data 120 New child. 121 """ 122 if name in self.item_names: 123 wt_exceptions.ObjectExistsWarning.warn(name) 124 return self[name] 125 126 if name == "": 127 data = None 128 natural_name = "".encode() 129 else: 130 data = wt_data.Data( 131 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs 132 ) 133 natural_name = data.natural_name.encode() 134 if position is not None: 135 self.attrs["item_names"] = np.insert( 136 self.attrs["item_names"][:-1], position, natural_name 137 ) 138 setattr(self, name, data) 139 return data 140 141 def index(self): 142 """Index.""" 143 raise NotImplementedError 144 145 def print_tree(self, depth=9, *, verbose=False): 146 """Print a ascii-formatted tree representation of the collection contents. 147 148 Parameters 149 ---------- 150 depth : integer (optional) 151 Number of layers to include in the tree. Default is 9. 152 verbose : boolean (optional) 153 Toggle inclusion of extra information. Default is True. 154 """ 155 print("{0} ({1})".format(self.natural_name, self.filepath)) 156 self._print_branch("", depth=depth, verbose=verbose) 157 158 def flush(self): 159 """Ensure contents are written to file.""" 160 for name in self.item_names: 161 item = self[name] 162 item.flush() 163 self.file.flush() 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py --- a/WrightTools/collection/_collection.py +++ b/WrightTools/collection/_collection.py @@ -102,6 +102,18 @@ setattr(self, name, collection) return collection + def convert(self, units, convert_variables=False, verbose=True): + """Convert units of a collection. + Parameters + ---------- + units: string + Units to convert to. + """ + + for name in self.item_names: + item = self[name] + item.convert(units, convert_variables=convert_variables, verbose=verbose) + def create_data(self, name="data", position=None, **kwargs): """Create a new child data.
{"golden_diff": "diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py\n--- a/WrightTools/collection/_collection.py\n+++ b/WrightTools/collection/_collection.py\n@@ -102,6 +102,18 @@\n setattr(self, name, collection)\n return collection\n \n+ def convert(self, units, convert_variables=False, verbose=True):\n+ \"\"\"Convert units of a collection.\n+ Parameters\n+ ----------\n+ units: string\n+ Units to convert to.\n+ \"\"\"\n+\n+ for name in self.item_names:\n+ item = self[name]\n+ item.convert(units, convert_variables=convert_variables, verbose=verbose)\n+\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n", "issue": "collection.convert\nWould like to support syntax `collection.convert(unit)`.\r\n\r\nWould convert all contained data objects recursively.\n", "before_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}], "after_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def convert(self, units, convert_variables=False, verbose=True):\n \"\"\"Convert units of a collection.\n Parameters\n ----------\n units: string\n Units to convert to.\n \"\"\"\n\n for name in self.item_names:\n item = self[name]\n item.convert(units, convert_variables=convert_variables, verbose=verbose)\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}]}
1,671
176
gh_patches_debug_51104
rasdani/github-patches
git_diff
ivy-llc__ivy-14373
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- take --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py` Content: ``` 1 # local 2 import ivy 3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 4 5 6 @to_ivy_arrays_and_back 7 def adjoint(input): 8 return ivy.adjoint(input) 9 10 11 @to_ivy_arrays_and_back 12 def cat(tensors, dim=0, *, out=None): 13 return ivy.concat(tensors, axis=dim, out=out) 14 15 16 @to_ivy_arrays_and_back 17 def chunk(input, chunks, dim=0): 18 if ivy.shape(input) == (): 19 return [input] 20 else: 21 dim_size = ivy.shape(input)[dim] 22 chunk_size = dim_size // chunks 23 if chunk_size == 0: 24 return ivy.split(input, num_or_size_splits=dim_size, axis=dim) 25 else: 26 remainder = dim_size % chunks 27 if remainder == 0: 28 return ivy.split(input, num_or_size_splits=chunks, axis=dim) 29 else: 30 return ivy.split( 31 input, 32 num_or_size_splits=tuple( 33 [chunk_size + remainder] + [chunk_size] * (chunks - 1) 34 ), 35 axis=dim, 36 ) 37 38 39 @to_ivy_arrays_and_back 40 def concat(tensors, dim=0, *, out=None): 41 return ivy.concat(tensors, axis=dim, out=out) 42 43 44 @to_ivy_arrays_and_back 45 def gather(input, dim, index, *, sparse_grad=False, out=None): 46 if sparse_grad: 47 raise ivy.utils.exceptions.IvyException( 48 "Gather does not yet support the sparse grad functionality" 49 ) 50 51 dim = dim % len(input.shape) 52 all_indices = ivy.argwhere(ivy.full(index.shape, True)) 53 gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))]) 54 55 gather_indices = [] 56 for axis in range(len(index.shape)): 57 if axis == dim: 58 gather_indices.append(ivy.array(gather_locations, dtype=index.dtype)) 59 else: 60 gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype)) 61 62 gather_indices = ivy.stack(gather_indices, axis=-1) 63 gathered = ivy.gather_nd(input, gather_indices) 64 reshaped = ivy.reshape(gathered, index.shape) 65 return reshaped 66 67 68 @to_ivy_arrays_and_back 69 def nonzero(input, *, out=None, as_tuple=False): 70 ret = ivy.nonzero(input) 71 if as_tuple is False: 72 ret = ivy.matrix_transpose(ivy.stack(ret)) 73 74 if ivy.exists(out): 75 return ivy.inplace_update(out, ret) 76 return ret 77 78 79 @to_ivy_arrays_and_back 80 def permute(input, dims): 81 return ivy.permute_dims(input, axes=dims) 82 83 84 @to_ivy_arrays_and_back 85 def reshape(input, shape): 86 return ivy.reshape(input, shape) 87 88 89 @to_ivy_arrays_and_back 90 def squeeze(input, dim): 91 if isinstance(dim, int) and input.ndim > 0: 92 if input.shape[dim] > 1: 93 return input 94 return ivy.squeeze(input, dim) 95 96 97 @to_ivy_arrays_and_back 98 def stack(tensors, dim=0, *, out=None): 99 return ivy.stack(tensors, axis=dim, out=out) 100 101 102 @to_ivy_arrays_and_back 103 def swapaxes(input, axis0, axis1): 104 return ivy.swapaxes(input, axis0, axis1) 105 106 107 @to_ivy_arrays_and_back 108 def swapdims(input, dim0, dim1): 109 return ivy.swapaxes(input, dim0, dim1) 110 111 112 @to_ivy_arrays_and_back 113 def transpose(input, dim0, dim1): 114 return ivy.swapaxes(input, dim0, dim1) 115 116 117 @to_ivy_arrays_and_back 118 def t(input): 119 if input.ndim > 2: 120 raise ivy.utils.exceptions.IvyException( 121 "t(input) expects a tensor with <= 2 dimensions, but self is %dD" 122 % input.ndim 123 ) 124 if input.ndim == 2: 125 return ivy.swapaxes(input, 0, 1) 126 else: 127 return input 128 129 130 @to_ivy_arrays_and_back 131 def tile(input, dims): 132 try: 133 tup = tuple(dims) 134 except TypeError: 135 tup = (dims,) 136 d = len(tup) 137 res = 0 138 if len(input.shape) > len([dims]) - 1: 139 res = input 140 if d < input.ndim: 141 tup = (1,) * (input.ndim - d) + tup 142 res = ivy.tile(input, tup) 143 144 else: 145 res = ivy.tile(input, repeats=dims, out=None) 146 return res 147 148 149 @to_ivy_arrays_and_back 150 def unsqueeze(input, dim=0): 151 return ivy.expand_dims(input, axis=dim) 152 153 154 @to_ivy_arrays_and_back 155 def argwhere(input): 156 return ivy.argwhere(input) 157 158 159 @to_ivy_arrays_and_back 160 def movedim(input, source, destination): 161 return ivy.moveaxis(input, source, destination) 162 163 164 @to_ivy_arrays_and_back 165 def moveaxis(input, source, destination): 166 return ivy.moveaxis(input, source, destination) 167 168 169 @to_ivy_arrays_and_back 170 def hstack(tensors, *, out=None): 171 return ivy.hstack(tensors, out=out) 172 173 174 @to_ivy_arrays_and_back 175 def index_select(input, dim, index, *, out=None): 176 return ivy.gather(input, index, axis=dim, out=out) 177 178 179 @to_ivy_arrays_and_back 180 def dstack(tensors, *, out=None): 181 return ivy.dstack(tensors, out=out) 182 183 184 @to_ivy_arrays_and_back 185 def take_along_dim(input, indices, dim, *, out=None): 186 return ivy.take_along_axis(input, indices, dim, out=out) 187 188 189 @to_ivy_arrays_and_back 190 def vstack(tensors, *, out=None): 191 return ivy.vstack(tensors, out=out) 192 193 194 @to_ivy_arrays_and_back 195 def split(tensor, split_size_or_sections, dim=0): 196 if isinstance(split_size_or_sections, int): 197 split_size = split_size_or_sections 198 split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size) 199 if tensor.shape[dim] % split_size: 200 split_size_or_sections.append(tensor.shape[dim] % split_size) 201 return tuple( 202 ivy.split( 203 tensor, 204 num_or_size_splits=split_size_or_sections, 205 axis=dim, 206 with_remainder=True, 207 ) 208 ) 209 210 211 @to_ivy_arrays_and_back 212 def tensor_split(input, indices_or_sections, dim=0): 213 if isinstance(indices_or_sections, (list, tuple)): 214 indices_or_sections = ( 215 ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]]) 216 .astype(ivy.int8) 217 .to_list() 218 ) 219 return ivy.split( 220 input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False 221 ) 222 223 224 @to_ivy_arrays_and_back 225 def unbind(input, dim=0): 226 shape = list(input.shape) 227 shape.pop(dim) 228 return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)]) 229 230 231 def _get_indices_or_sections(indices_or_sections, indices, sections): 232 if not ivy.exists(indices_or_sections): 233 if ivy.exists(indices) and not ivy.exists(sections): 234 indices_or_sections = indices 235 elif ivy.exists(sections) and not ivy.exists(indices): 236 indices_or_sections = sections 237 else: 238 raise ivy.utils.exception.IvyError( 239 "got invalid argument for indices_or_sections" 240 ) 241 return indices_or_sections 242 243 244 @to_ivy_arrays_and_back 245 def dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 246 indices_or_sections = _get_indices_or_sections( 247 indices_or_sections, indices, sections 248 ) 249 return tuple(ivy.dsplit(input, indices_or_sections)) 250 251 252 @to_ivy_arrays_and_back 253 def hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 254 indices_or_sections = _get_indices_or_sections( 255 indices_or_sections, indices, sections 256 ) 257 return tuple(ivy.hsplit(input, indices_or_sections)) 258 259 260 @to_ivy_arrays_and_back 261 def vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 262 indices_or_sections = _get_indices_or_sections( 263 indices_or_sections, indices, sections 264 ) 265 return tuple(ivy.vsplit(input, indices_or_sections)) 266 267 268 @to_ivy_arrays_and_back 269 def row_stack(tensors, *, out=None): 270 return ivy.vstack(tensors, out=out) 271 272 273 @to_ivy_arrays_and_back 274 def where(condition, input=None, other=None): 275 if not ivy.exists(input) and not ivy.exists(other): 276 return nonzero(condition, as_tuple=True) 277 return ivy.where(condition, input, other) 278 279 280 @to_ivy_arrays_and_back 281 def conj(input): 282 return ivy.conj(input) 283 284 285 @to_ivy_arrays_and_back 286 def index_add(input, dim, index, source, *, alpha=1, out=None): 287 input = ivy.swapaxes(input, dim, 0) 288 source = ivy.swapaxes(source, dim, 0) 289 _to_adds = [] 290 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0])) 291 while index: 292 _curr_idx = index[0][0] 293 while len(_to_adds) < _curr_idx: 294 _to_adds.append(ivy.zeros_like(source[0])) 295 _to_add_cum = ivy.get_item(source, index[0][1]) 296 while (1 < len(index)) and (index[0][0] == index[1][0]): 297 _to_add_cum = _to_add_cum + ivy.get_item(source, index.pop(1)[1]) 298 index.pop(0) 299 _to_adds.append(_to_add_cum) 300 while len(_to_adds) < input.shape[0]: 301 _to_adds.append(ivy.zeros_like(source[0])) 302 _to_adds = ivy.stack(_to_adds) 303 if len(input.shape) < 2: 304 # Added this line due to the paddle backend treating scalars as 1-d arrays 305 _to_adds = ivy.flatten(_to_adds) 306 307 ret = ivy.add(input, _to_adds, alpha=alpha) 308 ret = ivy.swapaxes(ret, 0, dim, out=out) 309 return ret 310 311 312 @to_ivy_arrays_and_back 313 def index_copy(input, dim, index, source, *, out=None): 314 input = ivy.swapaxes(input, dim, 0) 315 source = ivy.swapaxes(source, dim, 0) 316 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0])) 317 res = [] 318 while index: 319 _curr_idx = index[0][0] 320 for i in range(len(res), _curr_idx): 321 res.append(ivy.get_item(input, i)) 322 while (1 < len(index)) and (index[0][0] == index[1][0]): 323 index.pop(0) 324 res.append(ivy.get_item(source, index[0][1])) 325 index.pop(0) 326 for i in range(len(res), input.shape[0]): 327 res.append(ivy.get_item(input, i)) 328 res = ivy.stack(res) 329 if len(input.shape) < 2: 330 res = ivy.flatten(res) 331 332 return ivy.swapaxes(res, 0, dim, out=out) 333 334 335 @to_ivy_arrays_and_back 336 def masked_select(input, mask, out=None): 337 return ivy.flatten(input[mask], out=out) 338 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py --- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py +++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py @@ -335,3 +335,9 @@ @to_ivy_arrays_and_back def masked_select(input, mask, out=None): return ivy.flatten(input[mask], out=out) + + +@to_ivy_arrays_and_back +def take(input, index): + input = ivy.reshape(input, (-1,)) + return ivy.gather(input, index, axis=0) \ No newline at end of file
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n--- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n+++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n@@ -335,3 +335,9 @@\n @to_ivy_arrays_and_back\r\n def masked_select(input, mask, out=None):\r\n return ivy.flatten(input[mask], out=out)\r\n+ \r\n+ \r\n+@to_ivy_arrays_and_back\r\n+def take(input, index):\r\n+ input = ivy.reshape(input, (-1,))\r\n+ return ivy.gather(input, index, axis=0) \n\\ No newline at end of file\n", "issue": "take\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef adjoint(input):\r\n return ivy.adjoint(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef cat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef chunk(input, chunks, dim=0):\r\n if ivy.shape(input) == ():\r\n return [input]\r\n else:\r\n dim_size = ivy.shape(input)[dim]\r\n chunk_size = dim_size // chunks\r\n if chunk_size == 0:\r\n return ivy.split(input, num_or_size_splits=dim_size, axis=dim)\r\n else:\r\n remainder = dim_size % chunks\r\n if remainder == 0:\r\n return ivy.split(input, num_or_size_splits=chunks, axis=dim)\r\n else:\r\n return ivy.split(\r\n input,\r\n num_or_size_splits=tuple(\r\n [chunk_size + remainder] + [chunk_size] * (chunks - 1)\r\n ),\r\n axis=dim,\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef concat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef gather(input, dim, index, *, sparse_grad=False, out=None):\r\n if sparse_grad:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"Gather does not yet support the sparse grad functionality\"\r\n )\r\n\r\n dim = dim % len(input.shape)\r\n all_indices = ivy.argwhere(ivy.full(index.shape, True))\r\n gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))])\r\n\r\n gather_indices = []\r\n for axis in range(len(index.shape)):\r\n if axis == dim:\r\n gather_indices.append(ivy.array(gather_locations, dtype=index.dtype))\r\n else:\r\n gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype))\r\n\r\n gather_indices = ivy.stack(gather_indices, axis=-1)\r\n gathered = ivy.gather_nd(input, gather_indices)\r\n reshaped = ivy.reshape(gathered, index.shape)\r\n return reshaped\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nonzero(input, *, out=None, as_tuple=False):\r\n ret = ivy.nonzero(input)\r\n if as_tuple is False:\r\n ret = ivy.matrix_transpose(ivy.stack(ret))\r\n\r\n if ivy.exists(out):\r\n return ivy.inplace_update(out, ret)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef permute(input, dims):\r\n return ivy.permute_dims(input, axes=dims)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef reshape(input, shape):\r\n return ivy.reshape(input, shape)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef squeeze(input, dim):\r\n if isinstance(dim, int) and input.ndim > 0:\r\n if input.shape[dim] > 1:\r\n return input\r\n return ivy.squeeze(input, dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef stack(tensors, dim=0, *, out=None):\r\n return ivy.stack(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapaxes(input, axis0, axis1):\r\n return ivy.swapaxes(input, axis0, axis1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapdims(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef transpose(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef t(input):\r\n if input.ndim > 2:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"t(input) expects a tensor with <= 2 dimensions, but self is %dD\"\r\n % input.ndim\r\n )\r\n if input.ndim == 2:\r\n return ivy.swapaxes(input, 0, 1)\r\n else:\r\n return input\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tile(input, dims):\r\n try:\r\n tup = tuple(dims)\r\n except TypeError:\r\n tup = (dims,)\r\n d = len(tup)\r\n res = 0\r\n if len(input.shape) > len([dims]) - 1:\r\n res = input\r\n if d < input.ndim:\r\n tup = (1,) * (input.ndim - d) + tup\r\n res = ivy.tile(input, tup)\r\n\r\n else:\r\n res = ivy.tile(input, repeats=dims, out=None)\r\n return res\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unsqueeze(input, dim=0):\r\n return ivy.expand_dims(input, axis=dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argwhere(input):\r\n return ivy.argwhere(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef movedim(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hstack(tensors, *, out=None):\r\n return ivy.hstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_select(input, dim, index, *, out=None):\r\n return ivy.gather(input, index, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dstack(tensors, *, out=None):\r\n return ivy.dstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef take_along_dim(input, indices, dim, *, out=None):\r\n return ivy.take_along_axis(input, indices, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vstack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef split(tensor, split_size_or_sections, dim=0):\r\n if isinstance(split_size_or_sections, int):\r\n split_size = split_size_or_sections\r\n split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size)\r\n if tensor.shape[dim] % split_size:\r\n split_size_or_sections.append(tensor.shape[dim] % split_size)\r\n return tuple(\r\n ivy.split(\r\n tensor,\r\n num_or_size_splits=split_size_or_sections,\r\n axis=dim,\r\n with_remainder=True,\r\n )\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tensor_split(input, indices_or_sections, dim=0):\r\n if isinstance(indices_or_sections, (list, tuple)):\r\n indices_or_sections = (\r\n ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]])\r\n .astype(ivy.int8)\r\n .to_list()\r\n )\r\n return ivy.split(\r\n input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unbind(input, dim=0):\r\n shape = list(input.shape)\r\n shape.pop(dim)\r\n return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)])\r\n\r\n\r\ndef _get_indices_or_sections(indices_or_sections, indices, sections):\r\n if not ivy.exists(indices_or_sections):\r\n if ivy.exists(indices) and not ivy.exists(sections):\r\n indices_or_sections = indices\r\n elif ivy.exists(sections) and not ivy.exists(indices):\r\n indices_or_sections = sections\r\n else:\r\n raise ivy.utils.exception.IvyError(\r\n \"got invalid argument for indices_or_sections\"\r\n )\r\n return indices_or_sections\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.dsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.hsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.vsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef row_stack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef where(condition, input=None, other=None):\r\n if not ivy.exists(input) and not ivy.exists(other):\r\n return nonzero(condition, as_tuple=True)\r\n return ivy.where(condition, input, other)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef conj(input):\r\n return ivy.conj(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_add(input, dim, index, source, *, alpha=1, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n _to_adds = []\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n while index:\r\n _curr_idx = index[0][0]\r\n while len(_to_adds) < _curr_idx:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_add_cum = ivy.get_item(source, index[0][1])\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n _to_add_cum = _to_add_cum + ivy.get_item(source, index.pop(1)[1])\r\n index.pop(0)\r\n _to_adds.append(_to_add_cum)\r\n while len(_to_adds) < input.shape[0]:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_adds = ivy.stack(_to_adds)\r\n if len(input.shape) < 2:\r\n # Added this line due to the paddle backend treating scalars as 1-d arrays\r\n _to_adds = ivy.flatten(_to_adds)\r\n\r\n ret = ivy.add(input, _to_adds, alpha=alpha)\r\n ret = ivy.swapaxes(ret, 0, dim, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_copy(input, dim, index, source, *, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n res = []\r\n while index:\r\n _curr_idx = index[0][0]\r\n for i in range(len(res), _curr_idx):\r\n res.append(ivy.get_item(input, i))\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n index.pop(0)\r\n res.append(ivy.get_item(source, index[0][1]))\r\n index.pop(0)\r\n for i in range(len(res), input.shape[0]):\r\n res.append(ivy.get_item(input, i))\r\n res = ivy.stack(res)\r\n if len(input.shape) < 2:\r\n res = ivy.flatten(res)\r\n\r\n return ivy.swapaxes(res, 0, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef masked_select(input, mask, out=None):\r\n return ivy.flatten(input[mask], out=out)\r\n", "path": "ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py"}], "after_files": [{"content": "# local\r\nimport ivy\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef adjoint(input):\r\n return ivy.adjoint(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef cat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef chunk(input, chunks, dim=0):\r\n if ivy.shape(input) == ():\r\n return [input]\r\n else:\r\n dim_size = ivy.shape(input)[dim]\r\n chunk_size = dim_size // chunks\r\n if chunk_size == 0:\r\n return ivy.split(input, num_or_size_splits=dim_size, axis=dim)\r\n else:\r\n remainder = dim_size % chunks\r\n if remainder == 0:\r\n return ivy.split(input, num_or_size_splits=chunks, axis=dim)\r\n else:\r\n return ivy.split(\r\n input,\r\n num_or_size_splits=tuple(\r\n [chunk_size + remainder] + [chunk_size] * (chunks - 1)\r\n ),\r\n axis=dim,\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef concat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef gather(input, dim, index, *, sparse_grad=False, out=None):\r\n if sparse_grad:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"Gather does not yet support the sparse grad functionality\"\r\n )\r\n\r\n dim = dim % len(input.shape)\r\n all_indices = ivy.argwhere(ivy.full(index.shape, True))\r\n gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))])\r\n\r\n gather_indices = []\r\n for axis in range(len(index.shape)):\r\n if axis == dim:\r\n gather_indices.append(ivy.array(gather_locations, dtype=index.dtype))\r\n else:\r\n gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype))\r\n\r\n gather_indices = ivy.stack(gather_indices, axis=-1)\r\n gathered = ivy.gather_nd(input, gather_indices)\r\n reshaped = ivy.reshape(gathered, index.shape)\r\n return reshaped\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nonzero(input, *, out=None, as_tuple=False):\r\n ret = ivy.nonzero(input)\r\n if as_tuple is False:\r\n ret = ivy.matrix_transpose(ivy.stack(ret))\r\n\r\n if ivy.exists(out):\r\n return ivy.inplace_update(out, ret)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef permute(input, dims):\r\n return ivy.permute_dims(input, axes=dims)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef reshape(input, shape):\r\n return ivy.reshape(input, shape)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef squeeze(input, dim):\r\n if isinstance(dim, int) and input.ndim > 0:\r\n if input.shape[dim] > 1:\r\n return input\r\n return ivy.squeeze(input, dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef stack(tensors, dim=0, *, out=None):\r\n return ivy.stack(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapaxes(input, axis0, axis1):\r\n return ivy.swapaxes(input, axis0, axis1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapdims(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef transpose(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef t(input):\r\n if input.ndim > 2:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"t(input) expects a tensor with <= 2 dimensions, but self is %dD\"\r\n % input.ndim\r\n )\r\n if input.ndim == 2:\r\n return ivy.swapaxes(input, 0, 1)\r\n else:\r\n return input\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tile(input, dims):\r\n try:\r\n tup = tuple(dims)\r\n except TypeError:\r\n tup = (dims,)\r\n d = len(tup)\r\n res = 0\r\n if len(input.shape) > len([dims]) - 1:\r\n res = input\r\n if d < input.ndim:\r\n tup = (1,) * (input.ndim - d) + tup\r\n res = ivy.tile(input, tup)\r\n\r\n else:\r\n res = ivy.tile(input, repeats=dims, out=None)\r\n return res\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unsqueeze(input, dim=0):\r\n return ivy.expand_dims(input, axis=dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argwhere(input):\r\n return ivy.argwhere(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef movedim(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hstack(tensors, *, out=None):\r\n return ivy.hstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_select(input, dim, index, *, out=None):\r\n return ivy.gather(input, index, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dstack(tensors, *, out=None):\r\n return ivy.dstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef take_along_dim(input, indices, dim, *, out=None):\r\n return ivy.take_along_axis(input, indices, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vstack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef split(tensor, split_size_or_sections, dim=0):\r\n if isinstance(split_size_or_sections, int):\r\n split_size = split_size_or_sections\r\n split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size)\r\n if tensor.shape[dim] % split_size:\r\n split_size_or_sections.append(tensor.shape[dim] % split_size)\r\n return tuple(\r\n ivy.split(\r\n tensor,\r\n num_or_size_splits=split_size_or_sections,\r\n axis=dim,\r\n with_remainder=True,\r\n )\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tensor_split(input, indices_or_sections, dim=0):\r\n if isinstance(indices_or_sections, (list, tuple)):\r\n indices_or_sections = (\r\n ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]])\r\n .astype(ivy.int8)\r\n .to_list()\r\n )\r\n return ivy.split(\r\n input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unbind(input, dim=0):\r\n shape = list(input.shape)\r\n shape.pop(dim)\r\n return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)])\r\n\r\n\r\ndef _get_indices_or_sections(indices_or_sections, indices, sections):\r\n if not ivy.exists(indices_or_sections):\r\n if ivy.exists(indices) and not ivy.exists(sections):\r\n indices_or_sections = indices\r\n elif ivy.exists(sections) and not ivy.exists(indices):\r\n indices_or_sections = sections\r\n else:\r\n raise ivy.utils.exception.IvyError(\r\n \"got invalid argument for indices_or_sections\"\r\n )\r\n return indices_or_sections\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.dsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.hsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.vsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef row_stack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef where(condition, input=None, other=None):\r\n if not ivy.exists(input) and not ivy.exists(other):\r\n return nonzero(condition, as_tuple=True)\r\n return ivy.where(condition, input, other)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef conj(input):\r\n return ivy.conj(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_add(input, dim, index, source, *, alpha=1, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n _to_adds = []\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n while index:\r\n _curr_idx = index[0][0]\r\n while len(_to_adds) < _curr_idx:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_add_cum = ivy.get_item(source, index[0][1])\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n _to_add_cum = _to_add_cum + ivy.get_item(source, index.pop(1)[1])\r\n index.pop(0)\r\n _to_adds.append(_to_add_cum)\r\n while len(_to_adds) < input.shape[0]:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_adds = ivy.stack(_to_adds)\r\n if len(input.shape) < 2:\r\n # Added this line due to the paddle backend treating scalars as 1-d arrays\r\n _to_adds = ivy.flatten(_to_adds)\r\n\r\n ret = ivy.add(input, _to_adds, alpha=alpha)\r\n ret = ivy.swapaxes(ret, 0, dim, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_copy(input, dim, index, source, *, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n res = []\r\n while index:\r\n _curr_idx = index[0][0]\r\n for i in range(len(res), _curr_idx):\r\n res.append(ivy.get_item(input, i))\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n index.pop(0)\r\n res.append(ivy.get_item(source, index[0][1]))\r\n index.pop(0)\r\n for i in range(len(res), input.shape[0]):\r\n res.append(ivy.get_item(input, i))\r\n res = ivy.stack(res)\r\n if len(input.shape) < 2:\r\n res = ivy.flatten(res)\r\n\r\n return ivy.swapaxes(res, 0, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef masked_select(input, mask, out=None):\r\n return ivy.flatten(input[mask], out=out)\r\n \r\n \r\n@to_ivy_arrays_and_back\r\ndef take(input, index):\r\n input = ivy.reshape(input, (-1,))\r\n return ivy.gather(input, index, axis=0) ", "path": "ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py"}]}
3,793
184
gh_patches_debug_7299
rasdani/github-patches
git_diff
litestar-org__litestar-1488
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `litestar/contrib/htmx/response.py` Content: ``` 1 from __future__ import annotations 2 3 from dataclasses import dataclass, field 4 from typing import TYPE_CHECKING, Any, Generic, TypeVar 5 from urllib.parse import quote 6 7 from litestar import Litestar, MediaType, Request, Response 8 from litestar.contrib.htmx._utils import HTMX_STOP_POLLING, get_headers 9 from litestar.contrib.htmx.types import ( 10 EventAfterType, 11 HtmxHeaderType, 12 LocationType, 13 PushUrlType, 14 ReSwapMethod, 15 TriggerEventType, 16 ) 17 from litestar.response import TemplateResponse 18 from litestar.response_containers import ResponseContainer, Template 19 from litestar.status_codes import HTTP_200_OK 20 21 __all__ = ( 22 "ClientRedirect", 23 "ClientRefresh", 24 "HTMXTemplate", 25 "HXLocation", 26 "HXStopPolling", 27 "PushUrl", 28 "ReplaceUrl", 29 "Reswap", 30 "Retarget", 31 "TriggerEvent", 32 ) 33 34 if TYPE_CHECKING: 35 from litestar.background_tasks import BackgroundTask, BackgroundTasks 36 from litestar.datastructures import Cookie 37 38 39 # HTMX defined HTTP status code. 40 # Response carrying this status code will ask client to stop Polling. 41 T = TypeVar("T") 42 43 44 class HXStopPolling(Response): 45 """Stop HTMX client from Polling.""" 46 47 def __init__(self) -> None: 48 """Initialize""" 49 super().__init__(content=None) 50 self.status_code = HTMX_STOP_POLLING 51 52 53 class ClientRedirect(Response): 54 """HTMX Response class to support client side redirect.""" 55 56 def __init__(self, redirect_to: str) -> None: 57 """Set status code to 200 (required by HTMX), and pass redirect url.""" 58 super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(redirect=redirect_to))) 59 del self.headers["Location"] 60 61 62 class ClientRefresh(Response): 63 """Response to support HTMX client page refresh""" 64 65 def __init__(self) -> None: 66 """Set Status code to 200 and set headers.""" 67 super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(refresh=True))) 68 69 70 class PushUrl(Generic[T], Response[T]): 71 """Response to push new url into the history stack.""" 72 73 def __init__(self, content: T, push_url: PushUrlType, **kwargs: Any) -> None: 74 """Initialize PushUrl.""" 75 super().__init__( 76 content=content, 77 status_code=HTTP_200_OK, 78 headers=get_headers(hx_headers=HtmxHeaderType(push_url=push_url)), 79 **kwargs, 80 ) 81 82 83 class ReplaceUrl(Generic[T], Response[T]): 84 """Response to replace url in the Browser Location bar.""" 85 86 def __init__(self, content: T, replace_url: PushUrlType, **kwargs: Any) -> None: 87 """Initialize ReplaceUrl.""" 88 super().__init__( 89 content=content, 90 status_code=HTTP_200_OK, 91 headers=get_headers(hx_headers=HtmxHeaderType(replace_url=replace_url)), 92 **kwargs, 93 ) 94 95 96 class Reswap(Generic[T], Response[T]): 97 """Response to specify how the response will be swapped.""" 98 99 def __init__( 100 self, 101 content: T, 102 method: ReSwapMethod, 103 **kwargs: Any, 104 ) -> None: 105 """Initialize Reswap.""" 106 super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_swap=method)), **kwargs) 107 108 109 class Retarget(Generic[T], Response[T]): 110 """Response to target different element on the page.""" 111 112 def __init__(self, content: T, target: str, **kwargs: Any) -> None: 113 """Initialize Retarget.""" 114 super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_target=target)), **kwargs) 115 116 117 class TriggerEvent(Generic[T], Response[T]): 118 """Trigger Client side event.""" 119 120 def __init__( 121 self, 122 content: T, 123 name: str, 124 after: EventAfterType, 125 params: dict[str, Any] | None = None, 126 **kwargs: Any, 127 ) -> None: 128 """Initialize TriggerEvent.""" 129 event = TriggerEventType(name=name, params=params, after=after) 130 headers = get_headers(hx_headers=HtmxHeaderType(trigger_event=event)) 131 super().__init__(content=content, headers=headers, **kwargs) 132 133 134 class HXLocation(Response): 135 """Client side redirect without full page reload.""" 136 137 def __init__( 138 self, 139 redirect_to: str, 140 source: str | None = None, 141 event: str | None = None, 142 target: str | None = None, 143 swap: ReSwapMethod = None, 144 hx_headers: dict[str, Any] | None = None, 145 values: dict[str, str] | None = None, 146 **kwargs: Any, 147 ) -> None: 148 """Initialize HXLocation, Set status code to 200 (required by HTMX), 149 and pass redirect url. 150 """ 151 super().__init__( 152 content=None, 153 headers={"Location": quote(redirect_to, safe="/#%[]=:;$&()+,!?*@'~")}, 154 **kwargs, 155 ) 156 spec: dict[str, Any] = get_headers( 157 hx_headers=HtmxHeaderType( 158 location=LocationType( 159 path=str(self.headers.get("Location")), 160 source=source, 161 event=event, 162 target=target, 163 swap=swap, 164 values=values, 165 hx_headers=hx_headers, 166 ) 167 ) 168 ) 169 del self.headers["Location"] 170 self.headers.update(spec) 171 172 173 @dataclass 174 class HTMXTemplate(ResponseContainer[TemplateResponse]): 175 """HTMX template wrapper""" 176 177 name: str 178 """Path-like name for the template to be rendered, e.g. "index.html".""" 179 context: dict[str, Any] = field(default_factory=dict) 180 """A dictionary of key/value pairs to be passed to the temple engine's render method. 181 182 Defaults to None. 183 """ 184 background: BackgroundTask | BackgroundTasks | None = field(default=None) 185 """A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or 186 :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished. Defaults to 187 ``None``. 188 """ 189 headers: dict[str, Any] = field(default_factory=dict) 190 """A string/string dictionary of response headers.Header keys are insensitive. Defaults to ``None``.""" 191 cookies: list[Cookie] = field(default_factory=list) 192 """A list of :class:`Cookies <.datastructures.Cookie>` to be set under the response ``Set-Cookie`` header. Defaults 193 to ``None``. 194 """ 195 media_type: MediaType | str | None = field(default=None) 196 """If defined, overrides the media type configured in the route decorator.""" 197 encoding: str = field(default="utf-8") 198 """The encoding to be used for the response headers.""" 199 push_url: PushUrlType | None = field(default=None) 200 """Either a string value specifying a URL to push to browser history or ``False`` to prevent HTMX client from 201 pushing a url to browser history.""" 202 re_swap: ReSwapMethod | None = field(default=None) 203 """Method value to instruct HTMX which swapping method to use.""" 204 re_target: str | None = field(default=None) 205 """Value for 'id of target element' to apply changes to.""" 206 trigger_event: str | None = field(default=None) 207 """Event name to trigger.""" 208 params: dict[str, Any] | None = field(default=None) 209 """Dictionary of parameters if any required with trigger event parameter.""" 210 after: EventAfterType | None = field(default=None) 211 """Changes to apply after ``receive``, ``settle`` or ``swap`` event.""" 212 213 def to_response( 214 self, 215 headers: dict[str, Any], 216 media_type: MediaType | str, 217 status_code: int, 218 app: Litestar, 219 request: Request, 220 ) -> TemplateResponse: 221 """Add HTMX headers and return a :class:`TemplateResponse <.response.TemplateResponse>`.""" 222 223 event: TriggerEventType | None = None 224 if self.trigger_event: 225 event = TriggerEventType(name=str(self.trigger_event), params=self.params, after=self.after) 226 227 hx_headers: dict[str, Any] = get_headers( 228 hx_headers=HtmxHeaderType( 229 push_url=self.push_url, re_swap=self.re_swap, re_target=self.re_target, trigger_event=event 230 ) 231 ) 232 233 template = Template( 234 name=self.name, 235 background=self.background, 236 encoding=self.encoding, 237 ) 238 239 return template.to_response( 240 headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request 241 ) 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/litestar/contrib/htmx/response.py b/litestar/contrib/htmx/response.py --- a/litestar/contrib/htmx/response.py +++ b/litestar/contrib/htmx/response.py @@ -230,11 +230,7 @@ ) ) - template = Template( - name=self.name, - background=self.background, - encoding=self.encoding, - ) + template = Template(name=self.name, background=self.background, context=self.context, encoding=self.encoding) return template.to_response( headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request
{"golden_diff": "diff --git a/litestar/contrib/htmx/response.py b/litestar/contrib/htmx/response.py\n--- a/litestar/contrib/htmx/response.py\n+++ b/litestar/contrib/htmx/response.py\n@@ -230,11 +230,7 @@\n )\n )\n \n- template = Template(\n- name=self.name,\n- background=self.background,\n- encoding=self.encoding,\n- )\n+ template = Template(name=self.name, background=self.background, context=self.context, encoding=self.encoding)\n \n return template.to_response(\n headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar\nfrom urllib.parse import quote\n\nfrom litestar import Litestar, MediaType, Request, Response\nfrom litestar.contrib.htmx._utils import HTMX_STOP_POLLING, get_headers\nfrom litestar.contrib.htmx.types import (\n EventAfterType,\n HtmxHeaderType,\n LocationType,\n PushUrlType,\n ReSwapMethod,\n TriggerEventType,\n)\nfrom litestar.response import TemplateResponse\nfrom litestar.response_containers import ResponseContainer, Template\nfrom litestar.status_codes import HTTP_200_OK\n\n__all__ = (\n \"ClientRedirect\",\n \"ClientRefresh\",\n \"HTMXTemplate\",\n \"HXLocation\",\n \"HXStopPolling\",\n \"PushUrl\",\n \"ReplaceUrl\",\n \"Reswap\",\n \"Retarget\",\n \"TriggerEvent\",\n)\n\nif TYPE_CHECKING:\n from litestar.background_tasks import BackgroundTask, BackgroundTasks\n from litestar.datastructures import Cookie\n\n\n# HTMX defined HTTP status code.\n# Response carrying this status code will ask client to stop Polling.\nT = TypeVar(\"T\")\n\n\nclass HXStopPolling(Response):\n \"\"\"Stop HTMX client from Polling.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize\"\"\"\n super().__init__(content=None)\n self.status_code = HTMX_STOP_POLLING\n\n\nclass ClientRedirect(Response):\n \"\"\"HTMX Response class to support client side redirect.\"\"\"\n\n def __init__(self, redirect_to: str) -> None:\n \"\"\"Set status code to 200 (required by HTMX), and pass redirect url.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(redirect=redirect_to)))\n del self.headers[\"Location\"]\n\n\nclass ClientRefresh(Response):\n \"\"\"Response to support HTMX client page refresh\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set Status code to 200 and set headers.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(refresh=True)))\n\n\nclass PushUrl(Generic[T], Response[T]):\n \"\"\"Response to push new url into the history stack.\"\"\"\n\n def __init__(self, content: T, push_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize PushUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(push_url=push_url)),\n **kwargs,\n )\n\n\nclass ReplaceUrl(Generic[T], Response[T]):\n \"\"\"Response to replace url in the Browser Location bar.\"\"\"\n\n def __init__(self, content: T, replace_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize ReplaceUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(replace_url=replace_url)),\n **kwargs,\n )\n\n\nclass Reswap(Generic[T], Response[T]):\n \"\"\"Response to specify how the response will be swapped.\"\"\"\n\n def __init__(\n self,\n content: T,\n method: ReSwapMethod,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize Reswap.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_swap=method)), **kwargs)\n\n\nclass Retarget(Generic[T], Response[T]):\n \"\"\"Response to target different element on the page.\"\"\"\n\n def __init__(self, content: T, target: str, **kwargs: Any) -> None:\n \"\"\"Initialize Retarget.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_target=target)), **kwargs)\n\n\nclass TriggerEvent(Generic[T], Response[T]):\n \"\"\"Trigger Client side event.\"\"\"\n\n def __init__(\n self,\n content: T,\n name: str,\n after: EventAfterType,\n params: dict[str, Any] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize TriggerEvent.\"\"\"\n event = TriggerEventType(name=name, params=params, after=after)\n headers = get_headers(hx_headers=HtmxHeaderType(trigger_event=event))\n super().__init__(content=content, headers=headers, **kwargs)\n\n\nclass HXLocation(Response):\n \"\"\"Client side redirect without full page reload.\"\"\"\n\n def __init__(\n self,\n redirect_to: str,\n source: str | None = None,\n event: str | None = None,\n target: str | None = None,\n swap: ReSwapMethod = None,\n hx_headers: dict[str, Any] | None = None,\n values: dict[str, str] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize HXLocation, Set status code to 200 (required by HTMX),\n and pass redirect url.\n \"\"\"\n super().__init__(\n content=None,\n headers={\"Location\": quote(redirect_to, safe=\"/#%[]=:;$&()+,!?*@'~\")},\n **kwargs,\n )\n spec: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n location=LocationType(\n path=str(self.headers.get(\"Location\")),\n source=source,\n event=event,\n target=target,\n swap=swap,\n values=values,\n hx_headers=hx_headers,\n )\n )\n )\n del self.headers[\"Location\"]\n self.headers.update(spec)\n\n\n@dataclass\nclass HTMXTemplate(ResponseContainer[TemplateResponse]):\n \"\"\"HTMX template wrapper\"\"\"\n\n name: str\n \"\"\"Path-like name for the template to be rendered, e.g. \"index.html\".\"\"\"\n context: dict[str, Any] = field(default_factory=dict)\n \"\"\"A dictionary of key/value pairs to be passed to the temple engine's render method.\n\n Defaults to None.\n \"\"\"\n background: BackgroundTask | BackgroundTasks | None = field(default=None)\n \"\"\"A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or\n :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished. Defaults to\n ``None``.\n \"\"\"\n headers: dict[str, Any] = field(default_factory=dict)\n \"\"\"A string/string dictionary of response headers.Header keys are insensitive. Defaults to ``None``.\"\"\"\n cookies: list[Cookie] = field(default_factory=list)\n \"\"\"A list of :class:`Cookies <.datastructures.Cookie>` to be set under the response ``Set-Cookie`` header. Defaults\n to ``None``.\n \"\"\"\n media_type: MediaType | str | None = field(default=None)\n \"\"\"If defined, overrides the media type configured in the route decorator.\"\"\"\n encoding: str = field(default=\"utf-8\")\n \"\"\"The encoding to be used for the response headers.\"\"\"\n push_url: PushUrlType | None = field(default=None)\n \"\"\"Either a string value specifying a URL to push to browser history or ``False`` to prevent HTMX client from\n pushing a url to browser history.\"\"\"\n re_swap: ReSwapMethod | None = field(default=None)\n \"\"\"Method value to instruct HTMX which swapping method to use.\"\"\"\n re_target: str | None = field(default=None)\n \"\"\"Value for 'id of target element' to apply changes to.\"\"\"\n trigger_event: str | None = field(default=None)\n \"\"\"Event name to trigger.\"\"\"\n params: dict[str, Any] | None = field(default=None)\n \"\"\"Dictionary of parameters if any required with trigger event parameter.\"\"\"\n after: EventAfterType | None = field(default=None)\n \"\"\"Changes to apply after ``receive``, ``settle`` or ``swap`` event.\"\"\"\n\n def to_response(\n self,\n headers: dict[str, Any],\n media_type: MediaType | str,\n status_code: int,\n app: Litestar,\n request: Request,\n ) -> TemplateResponse:\n \"\"\"Add HTMX headers and return a :class:`TemplateResponse <.response.TemplateResponse>`.\"\"\"\n\n event: TriggerEventType | None = None\n if self.trigger_event:\n event = TriggerEventType(name=str(self.trigger_event), params=self.params, after=self.after)\n\n hx_headers: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n push_url=self.push_url, re_swap=self.re_swap, re_target=self.re_target, trigger_event=event\n )\n )\n\n template = Template(\n name=self.name,\n background=self.background,\n encoding=self.encoding,\n )\n\n return template.to_response(\n headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request\n )\n", "path": "litestar/contrib/htmx/response.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar\nfrom urllib.parse import quote\n\nfrom litestar import Litestar, MediaType, Request, Response\nfrom litestar.contrib.htmx._utils import HTMX_STOP_POLLING, get_headers\nfrom litestar.contrib.htmx.types import (\n EventAfterType,\n HtmxHeaderType,\n LocationType,\n PushUrlType,\n ReSwapMethod,\n TriggerEventType,\n)\nfrom litestar.response import TemplateResponse\nfrom litestar.response_containers import ResponseContainer, Template\nfrom litestar.status_codes import HTTP_200_OK\n\n__all__ = (\n \"ClientRedirect\",\n \"ClientRefresh\",\n \"HTMXTemplate\",\n \"HXLocation\",\n \"HXStopPolling\",\n \"PushUrl\",\n \"ReplaceUrl\",\n \"Reswap\",\n \"Retarget\",\n \"TriggerEvent\",\n)\n\nif TYPE_CHECKING:\n from litestar.background_tasks import BackgroundTask, BackgroundTasks\n from litestar.datastructures import Cookie\n\n\n# HTMX defined HTTP status code.\n# Response carrying this status code will ask client to stop Polling.\nT = TypeVar(\"T\")\n\n\nclass HXStopPolling(Response):\n \"\"\"Stop HTMX client from Polling.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize\"\"\"\n super().__init__(content=None)\n self.status_code = HTMX_STOP_POLLING\n\n\nclass ClientRedirect(Response):\n \"\"\"HTMX Response class to support client side redirect.\"\"\"\n\n def __init__(self, redirect_to: str) -> None:\n \"\"\"Set status code to 200 (required by HTMX), and pass redirect url.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(redirect=redirect_to)))\n del self.headers[\"Location\"]\n\n\nclass ClientRefresh(Response):\n \"\"\"Response to support HTMX client page refresh\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set Status code to 200 and set headers.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(refresh=True)))\n\n\nclass PushUrl(Generic[T], Response[T]):\n \"\"\"Response to push new url into the history stack.\"\"\"\n\n def __init__(self, content: T, push_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize PushUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(push_url=push_url)),\n **kwargs,\n )\n\n\nclass ReplaceUrl(Generic[T], Response[T]):\n \"\"\"Response to replace url in the Browser Location bar.\"\"\"\n\n def __init__(self, content: T, replace_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize ReplaceUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(replace_url=replace_url)),\n **kwargs,\n )\n\n\nclass Reswap(Generic[T], Response[T]):\n \"\"\"Response to specify how the response will be swapped.\"\"\"\n\n def __init__(\n self,\n content: T,\n method: ReSwapMethod,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize Reswap.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_swap=method)), **kwargs)\n\n\nclass Retarget(Generic[T], Response[T]):\n \"\"\"Response to target different element on the page.\"\"\"\n\n def __init__(self, content: T, target: str, **kwargs: Any) -> None:\n \"\"\"Initialize Retarget.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_target=target)), **kwargs)\n\n\nclass TriggerEvent(Generic[T], Response[T]):\n \"\"\"Trigger Client side event.\"\"\"\n\n def __init__(\n self,\n content: T,\n name: str,\n after: EventAfterType,\n params: dict[str, Any] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize TriggerEvent.\"\"\"\n event = TriggerEventType(name=name, params=params, after=after)\n headers = get_headers(hx_headers=HtmxHeaderType(trigger_event=event))\n super().__init__(content=content, headers=headers, **kwargs)\n\n\nclass HXLocation(Response):\n \"\"\"Client side redirect without full page reload.\"\"\"\n\n def __init__(\n self,\n redirect_to: str,\n source: str | None = None,\n event: str | None = None,\n target: str | None = None,\n swap: ReSwapMethod = None,\n hx_headers: dict[str, Any] | None = None,\n values: dict[str, str] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize HXLocation, Set status code to 200 (required by HTMX),\n and pass redirect url.\n \"\"\"\n super().__init__(\n content=None,\n headers={\"Location\": quote(redirect_to, safe=\"/#%[]=:;$&()+,!?*@'~\")},\n **kwargs,\n )\n spec: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n location=LocationType(\n path=str(self.headers.get(\"Location\")),\n source=source,\n event=event,\n target=target,\n swap=swap,\n values=values,\n hx_headers=hx_headers,\n )\n )\n )\n del self.headers[\"Location\"]\n self.headers.update(spec)\n\n\n@dataclass\nclass HTMXTemplate(ResponseContainer[TemplateResponse]):\n \"\"\"HTMX template wrapper\"\"\"\n\n name: str\n \"\"\"Path-like name for the template to be rendered, e.g. \"index.html\".\"\"\"\n context: dict[str, Any] = field(default_factory=dict)\n \"\"\"A dictionary of key/value pairs to be passed to the temple engine's render method.\n\n Defaults to None.\n \"\"\"\n background: BackgroundTask | BackgroundTasks | None = field(default=None)\n \"\"\"A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or\n :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished. Defaults to\n ``None``.\n \"\"\"\n headers: dict[str, Any] = field(default_factory=dict)\n \"\"\"A string/string dictionary of response headers.Header keys are insensitive. Defaults to ``None``.\"\"\"\n cookies: list[Cookie] = field(default_factory=list)\n \"\"\"A list of :class:`Cookies <.datastructures.Cookie>` to be set under the response ``Set-Cookie`` header. Defaults\n to ``None``.\n \"\"\"\n media_type: MediaType | str | None = field(default=None)\n \"\"\"If defined, overrides the media type configured in the route decorator.\"\"\"\n encoding: str = field(default=\"utf-8\")\n \"\"\"The encoding to be used for the response headers.\"\"\"\n push_url: PushUrlType | None = field(default=None)\n \"\"\"Either a string value specifying a URL to push to browser history or ``False`` to prevent HTMX client from\n pushing a url to browser history.\"\"\"\n re_swap: ReSwapMethod | None = field(default=None)\n \"\"\"Method value to instruct HTMX which swapping method to use.\"\"\"\n re_target: str | None = field(default=None)\n \"\"\"Value for 'id of target element' to apply changes to.\"\"\"\n trigger_event: str | None = field(default=None)\n \"\"\"Event name to trigger.\"\"\"\n params: dict[str, Any] | None = field(default=None)\n \"\"\"Dictionary of parameters if any required with trigger event parameter.\"\"\"\n after: EventAfterType | None = field(default=None)\n \"\"\"Changes to apply after ``receive``, ``settle`` or ``swap`` event.\"\"\"\n\n def to_response(\n self,\n headers: dict[str, Any],\n media_type: MediaType | str,\n status_code: int,\n app: Litestar,\n request: Request,\n ) -> TemplateResponse:\n \"\"\"Add HTMX headers and return a :class:`TemplateResponse <.response.TemplateResponse>`.\"\"\"\n\n event: TriggerEventType | None = None\n if self.trigger_event:\n event = TriggerEventType(name=str(self.trigger_event), params=self.params, after=self.after)\n\n hx_headers: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n push_url=self.push_url, re_swap=self.re_swap, re_target=self.re_target, trigger_event=event\n )\n )\n\n template = Template(name=self.name, background=self.background, context=self.context, encoding=self.encoding)\n\n return template.to_response(\n headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request\n )\n", "path": "litestar/contrib/htmx/response.py"}]}
2,968
151
gh_patches_debug_26291
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3117
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The requirement of superuser postgresql access is problematic ## Problem Mathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/ ## Proposed solution The mathesar user should not require superuser access. ## Additional context The superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `db/install.py` Content: ``` 1 from sqlalchemy import text 2 from sqlalchemy.exc import OperationalError 3 4 from db import engine 5 from db.sql import install as sql_install 6 from db.types import install as types_install 7 8 9 def install_mathesar( 10 database_name, username, password, hostname, port, skip_confirm 11 ): 12 """Create database and install Mathesar on it.""" 13 user_db_engine = engine.create_future_engine( 14 username, password, hostname, database_name, port, 15 connect_args={"connect_timeout": 10} 16 ) 17 try: 18 user_db_engine.connect() 19 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...") 20 sql_install.install(user_db_engine) 21 types_install.install_mathesar_on_database(user_db_engine) 22 user_db_engine.dispose() 23 except OperationalError: 24 database_created = _create_database( 25 database_name=database_name, 26 hostname=hostname, 27 username=username, 28 password=password, 29 port=port, 30 skip_confirm=skip_confirm 31 ) 32 if database_created: 33 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...") 34 sql_install.install(user_db_engine) 35 types_install.install_mathesar_on_database(user_db_engine) 36 user_db_engine.dispose() 37 else: 38 print(f"Skipping installing on DB with key {database_name}.") 39 40 41 def _create_database(database_name, hostname, username, password, port, skip_confirm=True): 42 if skip_confirm is True: 43 create_database = "y" 44 else: 45 create_database = input( 46 f"Create a new Database called {database_name}? (y/n) > " 47 ) 48 if create_database.lower() in ["y", "yes"]: 49 # We need to connect to an existing database inorder to create a new Database. 50 # So we use the default Database `postgres` that comes with postgres. 51 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible) 52 root_database = "postgres" 53 root_db_engine = engine.create_future_engine( 54 username, password, hostname, root_database, port, 55 connect_args={"connect_timeout": 10} 56 ) 57 with root_db_engine.connect() as conn: 58 conn.execution_options(isolation_level="AUTOCOMMIT") 59 conn.execute(text(f'CREATE DATABASE "{database_name}"')) 60 root_db_engine.dispose() 61 print(f"Created DB is {database_name}.") 62 return True 63 else: 64 print(f"Database {database_name} not created!") 65 return False 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/db/install.py b/db/install.py --- a/db/install.py +++ b/db/install.py @@ -1,5 +1,6 @@ +from psycopg.errors import InsufficientPrivilege from sqlalchemy import text -from sqlalchemy.exc import OperationalError +from sqlalchemy.exc import OperationalError, ProgrammingError from db import engine from db.sql import install as sql_install @@ -54,12 +55,20 @@ username, password, hostname, root_database, port, connect_args={"connect_timeout": 10} ) - with root_db_engine.connect() as conn: - conn.execution_options(isolation_level="AUTOCOMMIT") - conn.execute(text(f'CREATE DATABASE "{database_name}"')) - root_db_engine.dispose() - print(f"Created DB is {database_name}.") - return True + try: + with root_db_engine.connect() as conn: + conn.execution_options(isolation_level="AUTOCOMMIT") + conn.execute(text(f'CREATE DATABASE "{database_name}"')) + root_db_engine.dispose() + print(f"Created DB is {database_name}.") + return True + except ProgrammingError as e: + if isinstance(e.orig, InsufficientPrivilege): + print(f"Database {database_name} could not be created due to Insufficient Privilege") + return False + except Exception: + print(f"Database {database_name} could not be created!") + return False else: print(f"Database {database_name} not created!") return False
{"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -1,5 +1,6 @@\n+from psycopg.errors import InsufficientPrivilege\n from sqlalchemy import text\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n \n from db import engine\n from db.sql import install as sql_install\n@@ -54,12 +55,20 @@\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n- with root_db_engine.connect() as conn:\n- conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n- root_db_engine.dispose()\n- print(f\"Created DB is {database_name}.\")\n- return True\n+ try:\n+ with root_db_engine.connect() as conn:\n+ conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n+ root_db_engine.dispose()\n+ print(f\"Created DB is {database_name}.\")\n+ return True\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, InsufficientPrivilege):\n+ print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n+ return False\n+ except Exception:\n+ print(f\"Database {database_name} could not be created!\")\n+ return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "issue": "The requirement of superuser postgresql access is problematic\n## Problem\r\nMathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/ \r\n\r\n## Proposed solution\r\nThe mathesar user should not require superuser access. \r\n\r\n## Additional context\r\nThe superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}], "after_files": [{"content": "from psycopg.errors import InsufficientPrivilege\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n except ProgrammingError as e:\n if isinstance(e.orig, InsufficientPrivilege):\n print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n return False\n except Exception:\n print(f\"Database {database_name} could not be created!\")\n return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]}
1,072
342
gh_patches_debug_24580
rasdani/github-patches
git_diff
qtile__qtile-3261
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Systray widget crash 2022-01-27 09:48:40,366 ERROR libqtile bar.py:_configure_widget():L324 Systray widget crashed during _configure with error: ConfigError('Only one Systray can be used.') [config.txt](https://github.com/qtile/qtile/files/7948239/config.txt) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/widget/systray.py` Content: ``` 1 # Copyright (c) 2010 Aldo Cortesi 2 # Copyright (c) 2010-2011 dequis 3 # Copyright (c) 2010, 2012 roger 4 # Copyright (c) 2011 Mounier Florian 5 # Copyright (c) 2011-2012, 2014 Tycho Andersen 6 # Copyright (c) 2012 dmpayton 7 # Copyright (c) 2012-2013 Craig Barnes 8 # Copyright (c) 2013 hbc 9 # Copyright (c) 2013 Tao Sauvage 10 # Copyright (c) 2014 Sean Vig 11 # 12 # Permission is hereby granted, free of charge, to any person obtaining a copy 13 # of this software and associated documentation files (the "Software"), to deal 14 # in the Software without restriction, including without limitation the rights 15 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 # copies of the Software, and to permit persons to whom the Software is 17 # furnished to do so, subject to the following conditions: 18 # 19 # The above copyright notice and this permission notice shall be included in 20 # all copies or substantial portions of the Software. 21 # 22 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 28 # SOFTWARE. 29 from typing import TYPE_CHECKING 30 31 import xcffib 32 from xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode 33 34 from libqtile import bar 35 from libqtile.backend.x11 import window 36 from libqtile.confreader import ConfigError 37 from libqtile.widget import base 38 39 if TYPE_CHECKING: 40 from typing import List, Optional 41 42 XEMBED_PROTOCOL_VERSION = 0 43 44 45 class Icon(window._Window): 46 _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure 47 48 def __init__(self, win, qtile, systray): 49 window._Window.__init__(self, win, qtile) 50 self.systray = systray 51 # win.get_name() may return None when apps provide a temporary window before the icon window 52 # we need something in self.name in order to sort icons so we use the window's WID. 53 self.name = win.get_name() or str(win.wid) 54 self.update_size() 55 56 def __eq__(self, other): 57 if not isinstance(other, Icon): 58 return False 59 60 return self.window.wid == other.window.wid 61 62 def update_size(self): 63 icon_size = self.systray.icon_size 64 self.update_hints() 65 66 width = self.hints.get("min_width", icon_size) 67 height = self.hints.get("min_height", icon_size) 68 69 width = max(width, icon_size) 70 height = max(height, icon_size) 71 72 if height > icon_size: 73 width = width * icon_size // height 74 height = icon_size 75 76 self.width = width 77 self.height = height 78 return False 79 80 def handle_PropertyNotify(self, e): # noqa: N802 81 name = self.qtile.core.conn.atoms.get_name(e.atom) 82 if name == "_XEMBED_INFO": 83 info = self.window.get_property("_XEMBED_INFO", unpack=int) 84 if info and info[1]: 85 self.systray.bar.draw() 86 87 return False 88 89 def handle_DestroyNotify(self, event): # noqa: N802 90 wid = event.window 91 icon = self.qtile.windows_map.pop(wid) 92 self.systray.tray_icons.remove(icon) 93 self.systray.bar.draw() 94 return False 95 96 handle_UnmapNotify = handle_DestroyNotify # noqa: N815 97 98 99 class Systray(window._Window, base._Widget): 100 """ 101 A widget that manages system tray. 102 103 Only one Systray widget is allowed. Adding additional Systray 104 widgets will result in a ConfigError. 105 106 .. note:: 107 Icons will not render correctly where the bar/widget is 108 drawn with a semi-transparent background. Instead, icons 109 will be drawn with a transparent background. 110 111 If using this widget it is therefore recommended to use 112 a fully opaque background colour or a fully transparent 113 one. 114 """ 115 116 _instances = 0 117 118 _window_mask = EventMask.StructureNotify | EventMask.Exposure 119 120 orientations = base.ORIENTATION_BOTH 121 122 defaults = [ 123 ("icon_size", 20, "Icon width"), 124 ("padding", 5, "Padding between icons"), 125 ] 126 127 def __init__(self, **config): 128 base._Widget.__init__(self, bar.CALCULATED, **config) 129 self.add_defaults(Systray.defaults) 130 self.tray_icons = [] 131 self.screen = 0 132 self._name = config.get("name", "systray") 133 self._wm_class: Optional[List[str]] = None 134 135 def calculate_length(self): 136 if self.bar.horizontal: 137 length = sum(i.width for i in self.tray_icons) 138 else: 139 length = sum(i.height for i in self.tray_icons) 140 length += self.padding * len(self.tray_icons) 141 return length 142 143 def _configure(self, qtile, bar): 144 base._Widget._configure(self, qtile, bar) 145 146 if Systray._instances > 0: 147 raise ConfigError("Only one Systray can be used.") 148 149 if self.configured: 150 return 151 152 self.conn = conn = qtile.core.conn 153 win = conn.create_window(-1, -1, 1, 1) 154 window._Window.__init__(self, window.XWindow(conn, win.wid), qtile) 155 qtile.windows_map[win.wid] = self 156 157 # window._Window.__init__ overwrites the widget name so we need to restore it 158 self.name = self._name 159 160 # Even when we have multiple "Screen"s, we are setting up as the system 161 # tray on a particular X display, that is the screen we need to 162 # reference in the atom 163 if qtile.current_screen: 164 self.screen = qtile.current_screen.index 165 self.bar = bar 166 atoms = conn.atoms 167 168 # We need tray to tell icons which visual to use. 169 # This needs to be the same as the bar/widget. 170 # This mainly benefits transparent bars. 171 conn.conn.core.ChangeProperty( 172 xcffib.xproto.PropMode.Replace, 173 win.wid, 174 atoms["_NET_SYSTEM_TRAY_VISUAL"], 175 xcffib.xproto.Atom.VISUALID, 176 32, 177 1, 178 [self.drawer._visual.visual_id], 179 ) 180 181 conn.conn.core.SetSelectionOwner( 182 win.wid, atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)], xcffib.CurrentTime 183 ) 184 data = [ 185 xcffib.CurrentTime, 186 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)], 187 win.wid, 188 0, 189 0, 190 ] 191 union = ClientMessageData.synthetic(data, "I" * 5) 192 event = ClientMessageEvent.synthetic( 193 format=32, window=qtile.core._root.wid, type=atoms["MANAGER"], data=union 194 ) 195 qtile.core._root.send_event(event, mask=EventMask.StructureNotify) 196 197 Systray._instances += 1 198 199 def create_mirror(self): 200 """ 201 Systray cannot be mirrored as we do not use a Drawer object to render icons. 202 203 Return itself so that, when the bar tries to configure it again, a ConfigError 204 is raised. 205 """ 206 return self 207 208 def handle_ClientMessage(self, event): # noqa: N802 209 atoms = self.conn.atoms 210 211 opcode = event.type 212 data = event.data.data32 213 message = data[1] 214 wid = data[2] 215 216 parent = self.bar.window.window 217 218 if opcode == atoms["_NET_SYSTEM_TRAY_OPCODE"] and message == 0: 219 w = window.XWindow(self.conn, wid) 220 icon = Icon(w, self.qtile, self) 221 if icon not in self.tray_icons: 222 self.tray_icons.append(icon) 223 self.tray_icons.sort(key=lambda icon: icon.name) 224 self.qtile.windows_map[wid] = icon 225 226 self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid) 227 self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0) 228 self.conn.conn.flush() 229 230 info = icon.window.get_property("_XEMBED_INFO", unpack=int) 231 232 if not info: 233 self.bar.draw() 234 return False 235 236 if info[1]: 237 self.bar.draw() 238 239 return False 240 241 def draw(self): 242 offset = self.padding 243 self.drawer.clear(self.background or self.bar.background) 244 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length) 245 for pos, icon in enumerate(self.tray_icons): 246 icon.window.set_attribute(backpixmap=self.drawer.pixmap) 247 if self.bar.horizontal: 248 xoffset = self.offsetx + offset 249 yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety 250 step = icon.width 251 else: 252 xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx 253 yoffset = self.offsety + offset 254 step = icon.height 255 256 icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None) 257 if icon.hidden: 258 icon.unhide() 259 data = [ 260 self.conn.atoms["_XEMBED_EMBEDDED_NOTIFY"], 261 xcffib.xproto.Time.CurrentTime, 262 0, 263 self.bar.window.wid, 264 XEMBED_PROTOCOL_VERSION, 265 ] 266 u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5) 267 event = xcffib.xproto.ClientMessageEvent.synthetic( 268 format=32, window=icon.wid, type=self.conn.atoms["_XEMBED"], data=u 269 ) 270 self.window.send_event(event) 271 272 offset += step + self.padding 273 274 def finalize(self): 275 base._Widget.finalize(self) 276 atoms = self.conn.atoms 277 self.conn.conn.core.SetSelectionOwner( 278 0, 279 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)], 280 xcffib.CurrentTime, 281 ) 282 self.hide() 283 284 root = self.qtile.core._root.wid 285 for icon in self.tray_icons: 286 self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0) 287 self.conn.conn.flush() 288 289 del self.qtile.windows_map[self.wid] 290 self.conn.conn.core.DestroyWindow(self.wid) 291 292 Systray._instances -= 1 293 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py --- a/libqtile/widget/systray.py +++ b/libqtile/widget/systray.py @@ -143,12 +143,12 @@ def _configure(self, qtile, bar): base._Widget._configure(self, qtile, bar) - if Systray._instances > 0: - raise ConfigError("Only one Systray can be used.") - if self.configured: return + if Systray._instances > 0: + raise ConfigError("Only one Systray can be used.") + self.conn = conn = qtile.core.conn win = conn.create_window(-1, -1, 1, 1) window._Window.__init__(self, window.XWindow(conn, win.wid), qtile) @@ -200,10 +200,10 @@ """ Systray cannot be mirrored as we do not use a Drawer object to render icons. - Return itself so that, when the bar tries to configure it again, a ConfigError - is raised. + Return new, unconfigured instance so that, when the bar tries to configure it + again, a ConfigError is raised. """ - return self + return Systray() def handle_ClientMessage(self, event): # noqa: N802 atoms = self.conn.atoms
{"golden_diff": "diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py\n--- a/libqtile/widget/systray.py\n+++ b/libqtile/widget/systray.py\n@@ -143,12 +143,12 @@\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n \n- if Systray._instances > 0:\n- raise ConfigError(\"Only one Systray can be used.\")\n-\n if self.configured:\n return\n \n+ if Systray._instances > 0:\n+ raise ConfigError(\"Only one Systray can be used.\")\n+\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n@@ -200,10 +200,10 @@\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n \n- Return itself so that, when the bar tries to configure it again, a ConfigError\n- is raised.\n+ Return new, unconfigured instance so that, when the bar tries to configure it\n+ again, a ConfigError is raised.\n \"\"\"\n- return self\n+ return Systray()\n \n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n", "issue": "Systray widget crash\n2022-01-27 09:48:40,366 ERROR libqtile bar.py:_configure_widget():L324 Systray widget crashed during _configure with error: ConfigError('Only one Systray can be used.')\r\n[config.txt](https://github.com/qtile/qtile/files/7948239/config.txt)\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import TYPE_CHECKING\n\nimport xcffib\nfrom xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.confreader import ConfigError\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from typing import List, Optional\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n # win.get_name() may return None when apps provide a temporary window before the icon window\n # we need something in self.name in order to sort icons so we use the window's WID.\n self.name = win.get_name() or str(win.wid)\n self.update_size()\n\n def __eq__(self, other):\n if not isinstance(other, Icon):\n return False\n\n return self.window.wid == other.window.wid\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property(\"_XEMBED_INFO\", unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n icon = self.qtile.windows_map.pop(wid)\n self.systray.tray_icons.remove(icon)\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"\n A widget that manages system tray.\n\n Only one Systray widget is allowed. Adding additional Systray\n widgets will result in a ConfigError.\n\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n will be drawn with a transparent background.\n\n If using this widget it is therefore recommended to use\n a fully opaque background colour or a fully transparent\n one.\n \"\"\"\n\n _instances = 0\n\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n\n orientations = base.ORIENTATION_BOTH\n\n defaults = [\n (\"icon_size\", 20, \"Icon width\"),\n (\"padding\", 5, \"Padding between icons\"),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.tray_icons = []\n self.screen = 0\n self._name = config.get(\"name\", \"systray\")\n self._wm_class: Optional[List[str]] = None\n\n def calculate_length(self):\n if self.bar.horizontal:\n length = sum(i.width for i in self.tray_icons)\n else:\n length = sum(i.height for i in self.tray_icons)\n length += self.padding * len(self.tray_icons)\n return length\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if Systray._instances > 0:\n raise ConfigError(\"Only one Systray can be used.\")\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # window._Window.__init__ overwrites the widget name so we need to restore it\n self.name = self._name\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n # We need tray to tell icons which visual to use.\n # This needs to be the same as the bar/widget.\n # This mainly benefits transparent bars.\n conn.conn.core.ChangeProperty(\n xcffib.xproto.PropMode.Replace,\n win.wid,\n atoms[\"_NET_SYSTEM_TRAY_VISUAL\"],\n xcffib.xproto.Atom.VISUALID,\n 32,\n 1,\n [self.drawer._visual.visual_id],\n )\n\n conn.conn.core.SetSelectionOwner(\n win.wid, atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)], xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n win.wid,\n 0,\n 0,\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32, window=qtile.core._root.wid, type=atoms[\"MANAGER\"], data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n Systray._instances += 1\n\n def create_mirror(self):\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n\n Return itself so that, when the bar tries to configure it again, a ConfigError\n is raised.\n \"\"\"\n return self\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms[\"_NET_SYSTEM_TRAY_OPCODE\"] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n if icon not in self.tray_icons:\n self.tray_icons.append(icon)\n self.tray_icons.sort(key=lambda icon: icon.name)\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property(\"_XEMBED_INFO\", unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n offset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)\n for pos, icon in enumerate(self.tray_icons):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n if self.bar.horizontal:\n xoffset = self.offsetx + offset\n yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety\n step = icon.width\n else:\n xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx\n yoffset = self.offsety + offset\n step = icon.height\n\n icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION,\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32, window=icon.wid, type=self.conn.atoms[\"_XEMBED\"], data=u\n )\n self.window.send_event(event)\n\n offset += step + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n\n root = self.qtile.core._root.wid\n for icon in self.tray_icons:\n self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)\n self.conn.conn.flush()\n\n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n\n Systray._instances -= 1\n", "path": "libqtile/widget/systray.py"}], "after_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import TYPE_CHECKING\n\nimport xcffib\nfrom xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.confreader import ConfigError\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from typing import List, Optional\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n # win.get_name() may return None when apps provide a temporary window before the icon window\n # we need something in self.name in order to sort icons so we use the window's WID.\n self.name = win.get_name() or str(win.wid)\n self.update_size()\n\n def __eq__(self, other):\n if not isinstance(other, Icon):\n return False\n\n return self.window.wid == other.window.wid\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property(\"_XEMBED_INFO\", unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n icon = self.qtile.windows_map.pop(wid)\n self.systray.tray_icons.remove(icon)\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"\n A widget that manages system tray.\n\n Only one Systray widget is allowed. Adding additional Systray\n widgets will result in a ConfigError.\n\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n will be drawn with a transparent background.\n\n If using this widget it is therefore recommended to use\n a fully opaque background colour or a fully transparent\n one.\n \"\"\"\n\n _instances = 0\n\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n\n orientations = base.ORIENTATION_BOTH\n\n defaults = [\n (\"icon_size\", 20, \"Icon width\"),\n (\"padding\", 5, \"Padding between icons\"),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.tray_icons = []\n self.screen = 0\n self._name = config.get(\"name\", \"systray\")\n self._wm_class: Optional[List[str]] = None\n\n def calculate_length(self):\n if self.bar.horizontal:\n length = sum(i.width for i in self.tray_icons)\n else:\n length = sum(i.height for i in self.tray_icons)\n length += self.padding * len(self.tray_icons)\n return length\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if self.configured:\n return\n\n if Systray._instances > 0:\n raise ConfigError(\"Only one Systray can be used.\")\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # window._Window.__init__ overwrites the widget name so we need to restore it\n self.name = self._name\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n # We need tray to tell icons which visual to use.\n # This needs to be the same as the bar/widget.\n # This mainly benefits transparent bars.\n conn.conn.core.ChangeProperty(\n xcffib.xproto.PropMode.Replace,\n win.wid,\n atoms[\"_NET_SYSTEM_TRAY_VISUAL\"],\n xcffib.xproto.Atom.VISUALID,\n 32,\n 1,\n [self.drawer._visual.visual_id],\n )\n\n conn.conn.core.SetSelectionOwner(\n win.wid, atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)], xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n win.wid,\n 0,\n 0,\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32, window=qtile.core._root.wid, type=atoms[\"MANAGER\"], data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n Systray._instances += 1\n\n def create_mirror(self):\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n\n Return new, unconfigured instance so that, when the bar tries to configure it\n again, a ConfigError is raised.\n \"\"\"\n return Systray()\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms[\"_NET_SYSTEM_TRAY_OPCODE\"] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n if icon not in self.tray_icons:\n self.tray_icons.append(icon)\n self.tray_icons.sort(key=lambda icon: icon.name)\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property(\"_XEMBED_INFO\", unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n offset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)\n for pos, icon in enumerate(self.tray_icons):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n if self.bar.horizontal:\n xoffset = self.offsetx + offset\n yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety\n step = icon.width\n else:\n xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx\n yoffset = self.offsety + offset\n step = icon.height\n\n icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION,\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32, window=icon.wid, type=self.conn.atoms[\"_XEMBED\"], data=u\n )\n self.window.send_event(event)\n\n offset += step + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n\n root = self.qtile.core._root.wid\n for icon in self.tray_icons:\n self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)\n self.conn.conn.flush()\n\n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n\n Systray._instances -= 1\n", "path": "libqtile/widget/systray.py"}]}
3,613
337
gh_patches_debug_32051
rasdani/github-patches
git_diff
apache__airflow-18224
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- POST /api/v1/users fails with exception ### Apache Airflow version main (development) ### Operating System From Astronomer’s QA team ### Versions of Apache Airflow Providers _No response_ ### Deployment Astronomer ### Deployment details _No response_ ### What happened When adding a new user, The following exception is emitted: ``` Traceback (most recent call last): File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py", line 48, in wrapper response = function(request) File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py", line 144, in wrapper response = function(request) File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py", line 184, in wrapper response = function(request) File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py", line 103, in wrapper response = function(request) File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py", line 121, in wrapper return function(**kwargs) File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py", line 47, in decorated return func(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py", line 105, in post_user user.roles.extend(roles_to_add) AttributeError: 'bool' object has no attribute 'roles' ``` The immediate cause to this exception is F.A.B. returns `False` when it fails to add a new user. The problem, however, is _why_ excactly it failed. This is the payload used: ```json { "username": "username6", "password": "password1", "email": "[email protected]", "first_name": "user2", "last_name": "test1", "roles":[{"name":"Admin"},{"name":"Viewer"}] } ``` This went through validation, therefore we know 1. The POST-ing user has permission to create a new user. 2. The format is correct (including the nested roles). 3. There is not already an existing `username6` in the database. 4. All listed roles exist. (All these are already covered by unit tests.) Further complicating the issue is F.A.B.’s security manager swallows an exception when this happens, and only logs the exception to the server. And we’re having trouble locating that line of log. It’s quite difficult to diagnose further, so I’m posting this hoping someone has better luck reproducing this. I will submit a fix to correct the immediate issue, making the API emit 500 with something like “Failed to create user for unknown reason” to make the failure _slightly_ less confusing. ### What you expected to happen _No response_ ### How to reproduce _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/api_connexion/endpoints/user_endpoint.py` Content: ``` 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from flask import current_app, request 18 from flask_appbuilder.security.sqla.models import User 19 from marshmallow import ValidationError 20 from sqlalchemy import func 21 from werkzeug.security import generate_password_hash 22 23 from airflow.api_connexion import security 24 from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound 25 from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters 26 from airflow.api_connexion.schemas.user_schema import ( 27 UserCollection, 28 user_collection_item_schema, 29 user_collection_schema, 30 user_schema, 31 ) 32 from airflow.security import permissions 33 34 35 @security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)]) 36 def get_user(username): 37 """Get a user""" 38 ab_security_manager = current_app.appbuilder.sm 39 user = ab_security_manager.find_user(username=username) 40 if not user: 41 raise NotFound(title="User not found", detail=f"The User with username `{username}` was not found") 42 return user_collection_item_schema.dump(user) 43 44 45 @security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)]) 46 @format_parameters({'limit': check_limit}) 47 def get_users(limit, order_by='id', offset=None): 48 """Get users""" 49 appbuilder = current_app.appbuilder 50 session = appbuilder.get_session 51 total_entries = session.query(func.count(User.id)).scalar() 52 to_replace = {"user_id": "id"} 53 allowed_filter_attrs = [ 54 "user_id", 55 'id', 56 "first_name", 57 "last_name", 58 "user_name", 59 "email", 60 "is_active", 61 "role", 62 ] 63 query = session.query(User) 64 query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs) 65 users = query.offset(offset).limit(limit).all() 66 67 return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries)) 68 69 70 @security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)]) 71 def post_user(): 72 """Create a new user""" 73 try: 74 data = user_schema.load(request.json) 75 except ValidationError as e: 76 raise BadRequest(detail=str(e.messages)) 77 78 security_manager = current_app.appbuilder.sm 79 80 user = security_manager.find_user(username=data["username"]) 81 if user is not None: 82 detail = f"Username `{user.username}` already exists. Use PATCH to update." 83 raise AlreadyExists(detail=detail) 84 85 roles_to_add = [] 86 missing_role_names = [] 87 for role_data in data.pop("roles", ()): 88 role_name = role_data["name"] 89 role = security_manager.find_role(role_name) 90 if role is None: 91 missing_role_names.append(role_name) 92 else: 93 roles_to_add.append(role) 94 if missing_role_names: 95 detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}" 96 raise BadRequest(detail=detail) 97 98 if roles_to_add: 99 default_role = roles_to_add.pop() 100 else: # No roles provided, use the F.A.B's default registered user role. 101 default_role = security_manager.find_role(security_manager.auth_user_registration_role) 102 103 user = security_manager.add_user(role=default_role, **data) 104 if roles_to_add: 105 user.roles.extend(roles_to_add) 106 security_manager.update_user(user) 107 return user_schema.dump(user) 108 109 110 @security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)]) 111 def patch_user(username, update_mask=None): 112 """Update a role""" 113 try: 114 data = user_schema.load(request.json) 115 except ValidationError as e: 116 raise BadRequest(detail=str(e.messages)) 117 118 security_manager = current_app.appbuilder.sm 119 120 user = security_manager.find_user(username=username) 121 if user is None: 122 detail = f"The User with username `{username}` was not found" 123 raise NotFound(title="User not found", detail=detail) 124 125 # Get fields to update. 'username' is always excluded (and it's an error to 126 # include it in update_maek). 127 if update_mask is not None: 128 masked_data = {} 129 missing_mask_names = [] 130 for field in update_mask: 131 field = field.strip() 132 try: 133 masked_data[field] = data[field] 134 except KeyError: 135 missing_mask_names.append(field) 136 if missing_mask_names: 137 detail = f"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}" 138 raise BadRequest(detail=detail) 139 if "username" in masked_data: 140 raise BadRequest("Cannot update fields: 'username'") 141 data = masked_data 142 else: 143 data.pop("username", None) 144 145 if "roles" in data: 146 roles_to_update = [] 147 missing_role_names = [] 148 for role_data in data.pop("roles", ()): 149 role_name = role_data["name"] 150 role = security_manager.find_role(role_name) 151 if role is None: 152 missing_role_names.append(role_name) 153 else: 154 roles_to_update.append(role) 155 if missing_role_names: 156 detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}" 157 raise BadRequest(detail=detail) 158 else: 159 roles_to_update = None # Don't change existing value. 160 161 if "password" in data: 162 user.password = generate_password_hash(data.pop("password")) 163 if roles_to_update is not None: 164 user.roles = roles_to_update 165 for key, value in data.items(): 166 setattr(user, key, value) 167 security_manager.update_user(user) 168 169 return user_schema.dump(user) 170 171 172 @security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)]) 173 def delete_user(username): 174 """Delete a user""" 175 security_manager = current_app.appbuilder.sm 176 177 user = security_manager.find_user(username=username) 178 if user is None: 179 detail = f"The User with username `{username}` was not found" 180 raise NotFound(title="User not found", detail=detail) 181 182 user.roles = [] # Clear foreign keys on this user first. 183 security_manager.get_session.delete(user) 184 security_manager.get_session.commit() 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/api_connexion/endpoints/user_endpoint.py b/airflow/api_connexion/endpoints/user_endpoint.py --- a/airflow/api_connexion/endpoints/user_endpoint.py +++ b/airflow/api_connexion/endpoints/user_endpoint.py @@ -21,7 +21,7 @@ from werkzeug.security import generate_password_hash from airflow.api_connexion import security -from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound +from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters from airflow.api_connexion.schemas.user_schema import ( UserCollection, @@ -76,10 +76,14 @@ raise BadRequest(detail=str(e.messages)) security_manager = current_app.appbuilder.sm + username = data["username"] + email = data["email"] - user = security_manager.find_user(username=data["username"]) - if user is not None: - detail = f"Username `{user.username}` already exists. Use PATCH to update." + if security_manager.find_user(username=username): + detail = f"Username `{username}` already exists. Use PATCH to update." + raise AlreadyExists(detail=detail) + if security_manager.find_user(email=email): + detail = f"The email `{email}` is already taken." raise AlreadyExists(detail=detail) roles_to_add = [] @@ -101,6 +105,10 @@ default_role = security_manager.find_role(security_manager.auth_user_registration_role) user = security_manager.add_user(role=default_role, **data) + if not user: + detail = f"Failed to add user `{username}`." + return Unknown(detail=detail) + if roles_to_add: user.roles.extend(roles_to_add) security_manager.update_user(user)
{"golden_diff": "diff --git a/airflow/api_connexion/endpoints/user_endpoint.py b/airflow/api_connexion/endpoints/user_endpoint.py\n--- a/airflow/api_connexion/endpoints/user_endpoint.py\n+++ b/airflow/api_connexion/endpoints/user_endpoint.py\n@@ -21,7 +21,7 @@\n from werkzeug.security import generate_password_hash\n \n from airflow.api_connexion import security\n-from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound\n+from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown\n from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters\n from airflow.api_connexion.schemas.user_schema import (\n UserCollection,\n@@ -76,10 +76,14 @@\n raise BadRequest(detail=str(e.messages))\n \n security_manager = current_app.appbuilder.sm\n+ username = data[\"username\"]\n+ email = data[\"email\"]\n \n- user = security_manager.find_user(username=data[\"username\"])\n- if user is not None:\n- detail = f\"Username `{user.username}` already exists. Use PATCH to update.\"\n+ if security_manager.find_user(username=username):\n+ detail = f\"Username `{username}` already exists. Use PATCH to update.\"\n+ raise AlreadyExists(detail=detail)\n+ if security_manager.find_user(email=email):\n+ detail = f\"The email `{email}` is already taken.\"\n raise AlreadyExists(detail=detail)\n \n roles_to_add = []\n@@ -101,6 +105,10 @@\n default_role = security_manager.find_role(security_manager.auth_user_registration_role)\n \n user = security_manager.add_user(role=default_role, **data)\n+ if not user:\n+ detail = f\"Failed to add user `{username}`.\"\n+ return Unknown(detail=detail)\n+\n if roles_to_add:\n user.roles.extend(roles_to_add)\n security_manager.update_user(user)\n", "issue": "POST /api/v1/users fails with exception\n### Apache Airflow version\n\nmain (development)\n\n### Operating System\n\nFrom Astronomer\u2019s QA team\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nAstronomer\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nWhen adding a new user, The following exception is emitted:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/local/lib/python3.9/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py\", line 48, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py\", line 144, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py\", line 184, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py\", line 103, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py\", line 121, in wrapper\r\n return function(**kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py\", line 47, in decorated\r\n return func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py\", line 105, in post_user\r\n user.roles.extend(roles_to_add)\r\nAttributeError: 'bool' object has no attribute 'roles'\r\n```\r\n\r\nThe immediate cause to this exception is F.A.B. returns `False` when it fails to add a new user. The problem, however, is _why_ excactly it failed. This is the payload used:\r\n\r\n```json\r\n{\r\n \"username\": \"username6\",\r\n \"password\": \"password1\",\r\n \"email\": \"[email protected]\",\r\n \"first_name\": \"user2\",\r\n \"last_name\": \"test1\",\r\n \"roles\":[{\"name\":\"Admin\"},{\"name\":\"Viewer\"}]\r\n}\r\n```\r\n\r\nThis went through validation, therefore we know\r\n\r\n1. The POST-ing user has permission to create a new user.\r\n2. The format is correct (including the nested roles).\r\n3. There is not already an existing `username6` in the database.\r\n4. All listed roles exist.\r\n\r\n(All these are already covered by unit tests.)\r\n\r\nFurther complicating the issue is F.A.B.\u2019s security manager swallows an exception when this happens, and only logs the exception to the server. And we\u2019re having trouble locating that line of log. It\u2019s quite difficult to diagnose further, so I\u2019m posting this hoping someone has better luck reproducing this.\r\n\r\nI will submit a fix to correct the immediate issue, making the API emit 500 with something like \u201cFailed to create user for unknown reason\u201d to make the failure _slightly_ less confusing.\n\n### What you expected to happen\n\n_No response_\n\n### How to reproduce\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom flask import current_app, request\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import ValidationError\nfrom sqlalchemy import func\nfrom werkzeug.security import generate_password_hash\n\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound\nfrom airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters\nfrom airflow.api_connexion.schemas.user_schema import (\n UserCollection,\n user_collection_item_schema,\n user_collection_schema,\n user_schema,\n)\nfrom airflow.security import permissions\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\ndef get_user(username):\n \"\"\"Get a user\"\"\"\n ab_security_manager = current_app.appbuilder.sm\n user = ab_security_manager.find_user(username=username)\n if not user:\n raise NotFound(title=\"User not found\", detail=f\"The User with username `{username}` was not found\")\n return user_collection_item_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\n@format_parameters({'limit': check_limit})\ndef get_users(limit, order_by='id', offset=None):\n \"\"\"Get users\"\"\"\n appbuilder = current_app.appbuilder\n session = appbuilder.get_session\n total_entries = session.query(func.count(User.id)).scalar()\n to_replace = {\"user_id\": \"id\"}\n allowed_filter_attrs = [\n \"user_id\",\n 'id',\n \"first_name\",\n \"last_name\",\n \"user_name\",\n \"email\",\n \"is_active\",\n \"role\",\n ]\n query = session.query(User)\n query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)\n users = query.offset(offset).limit(limit).all()\n\n return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries))\n\n\[email protected]_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)])\ndef post_user():\n \"\"\"Create a new user\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=data[\"username\"])\n if user is not None:\n detail = f\"Username `{user.username}` already exists. Use PATCH to update.\"\n raise AlreadyExists(detail=detail)\n\n roles_to_add = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_add.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n\n if roles_to_add:\n default_role = roles_to_add.pop()\n else: # No roles provided, use the F.A.B's default registered user role.\n default_role = security_manager.find_role(security_manager.auth_user_registration_role)\n\n user = security_manager.add_user(role=default_role, **data)\n if roles_to_add:\n user.roles.extend(roles_to_add)\n security_manager.update_user(user)\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)])\ndef patch_user(username, update_mask=None):\n \"\"\"Update a role\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n # Get fields to update. 'username' is always excluded (and it's an error to\n # include it in update_maek).\n if update_mask is not None:\n masked_data = {}\n missing_mask_names = []\n for field in update_mask:\n field = field.strip()\n try:\n masked_data[field] = data[field]\n except KeyError:\n missing_mask_names.append(field)\n if missing_mask_names:\n detail = f\"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}\"\n raise BadRequest(detail=detail)\n if \"username\" in masked_data:\n raise BadRequest(\"Cannot update fields: 'username'\")\n data = masked_data\n else:\n data.pop(\"username\", None)\n\n if \"roles\" in data:\n roles_to_update = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_update.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n else:\n roles_to_update = None # Don't change existing value.\n\n if \"password\" in data:\n user.password = generate_password_hash(data.pop(\"password\"))\n if roles_to_update is not None:\n user.roles = roles_to_update\n for key, value in data.items():\n setattr(user, key, value)\n security_manager.update_user(user)\n\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)])\ndef delete_user(username):\n \"\"\"Delete a user\"\"\"\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n user.roles = [] # Clear foreign keys on this user first.\n security_manager.get_session.delete(user)\n security_manager.get_session.commit()\n", "path": "airflow/api_connexion/endpoints/user_endpoint.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom flask import current_app, request\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import ValidationError\nfrom sqlalchemy import func\nfrom werkzeug.security import generate_password_hash\n\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown\nfrom airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters\nfrom airflow.api_connexion.schemas.user_schema import (\n UserCollection,\n user_collection_item_schema,\n user_collection_schema,\n user_schema,\n)\nfrom airflow.security import permissions\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\ndef get_user(username):\n \"\"\"Get a user\"\"\"\n ab_security_manager = current_app.appbuilder.sm\n user = ab_security_manager.find_user(username=username)\n if not user:\n raise NotFound(title=\"User not found\", detail=f\"The User with username `{username}` was not found\")\n return user_collection_item_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\n@format_parameters({'limit': check_limit})\ndef get_users(limit, order_by='id', offset=None):\n \"\"\"Get users\"\"\"\n appbuilder = current_app.appbuilder\n session = appbuilder.get_session\n total_entries = session.query(func.count(User.id)).scalar()\n to_replace = {\"user_id\": \"id\"}\n allowed_filter_attrs = [\n \"user_id\",\n 'id',\n \"first_name\",\n \"last_name\",\n \"user_name\",\n \"email\",\n \"is_active\",\n \"role\",\n ]\n query = session.query(User)\n query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)\n users = query.offset(offset).limit(limit).all()\n\n return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries))\n\n\[email protected]_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)])\ndef post_user():\n \"\"\"Create a new user\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n username = data[\"username\"]\n email = data[\"email\"]\n\n if security_manager.find_user(username=username):\n detail = f\"Username `{username}` already exists. Use PATCH to update.\"\n raise AlreadyExists(detail=detail)\n if security_manager.find_user(email=email):\n detail = f\"The email `{email}` is already taken.\"\n raise AlreadyExists(detail=detail)\n\n roles_to_add = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_add.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n\n if roles_to_add:\n default_role = roles_to_add.pop()\n else: # No roles provided, use the F.A.B's default registered user role.\n default_role = security_manager.find_role(security_manager.auth_user_registration_role)\n\n user = security_manager.add_user(role=default_role, **data)\n if not user:\n detail = f\"Failed to add user `{username}`.\"\n return Unknown(detail=detail)\n\n if roles_to_add:\n user.roles.extend(roles_to_add)\n security_manager.update_user(user)\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)])\ndef patch_user(username, update_mask=None):\n \"\"\"Update a role\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n # Get fields to update. 'username' is always excluded (and it's an error to\n # include it in update_maek).\n if update_mask is not None:\n masked_data = {}\n missing_mask_names = []\n for field in update_mask:\n field = field.strip()\n try:\n masked_data[field] = data[field]\n except KeyError:\n missing_mask_names.append(field)\n if missing_mask_names:\n detail = f\"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}\"\n raise BadRequest(detail=detail)\n if \"username\" in masked_data:\n raise BadRequest(\"Cannot update fields: 'username'\")\n data = masked_data\n else:\n data.pop(\"username\", None)\n\n if \"roles\" in data:\n roles_to_update = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_update.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n else:\n roles_to_update = None # Don't change existing value.\n\n if \"password\" in data:\n user.password = generate_password_hash(data.pop(\"password\"))\n if roles_to_update is not None:\n user.roles = roles_to_update\n for key, value in data.items():\n setattr(user, key, value)\n security_manager.update_user(user)\n\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)])\ndef delete_user(username):\n \"\"\"Delete a user\"\"\"\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n user.roles = [] # Clear foreign keys on this user first.\n security_manager.get_session.delete(user)\n security_manager.get_session.commit()\n", "path": "airflow/api_connexion/endpoints/user_endpoint.py"}]}
3,148
411
gh_patches_debug_30331
rasdani/github-patches
git_diff
e-valuation__EvaP-424
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- UserProfile missing when User is not created during import Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior. UserProfile missing when User is not created during import Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `evap/evaluation/views.py` Content: ``` 1 from django.contrib import messages 2 from django.contrib.auth import login as auth_login 3 from django.shortcuts import redirect, render_to_response 4 from django.template import RequestContext 5 from django.utils.translation import ugettext as _ 6 7 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm 8 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate 9 10 11 def index(request): 12 """Main entry page into EvaP providing all the login options available. THe username/password 13 login is thought to be used for internal users, e.g. by connecting to a LDAP directory. 14 The login key mechanism is meant to be used to include external participants, e.g. visiting 15 students or visiting contributors. 16 """ 17 18 # parse the form data into the respective form 19 submit_type = request.POST.get("submit_type", "no_submit") 20 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None) 21 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None) 22 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None) 23 24 # process form data 25 if request.method == 'POST': 26 if new_key_form.is_valid(): 27 # user wants a new login key 28 profile = new_key_form.get_profile() 29 profile.generate_login_key() 30 profile.save() 31 32 EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user()) 33 34 messages.success(request, _(u"Successfully sent email with new login key.")) 35 elif login_key_form.is_valid(): 36 # user would like to login with a login key and passed key test 37 auth_login(request, login_key_form.get_user()) 38 elif login_username_form.is_valid(): 39 # user would like to login with username and password and passed password test 40 auth_login(request, login_username_form.get_user()) 41 42 # clean up our test cookie 43 if request.session.test_cookie_worked(): 44 request.session.delete_test_cookie() 45 46 # if not logged in by now, render form 47 if not request.user.is_active: 48 # set test cookie to verify whether they work in the next step 49 request.session.set_test_cookie() 50 51 return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request)) 52 else: 53 # check for redirect variable 54 redirect_to = request.GET.get("next", None) 55 if redirect_to is not None: 56 if redirect_to.startswith("/fsr/"): 57 if request.user.is_staff: 58 return redirect(redirect_to) 59 elif redirect_to.startswith("/contributor/"): 60 if UserProfile.get_for_user(request.user).is_contributor: 61 return redirect(redirect_to) 62 else: 63 return redirect(redirect_to) 64 65 # redirect user to appropriate start page 66 if request.user.is_staff: 67 return redirect('evap.fsr.views.index') 68 elif UserProfile.get_for_user(request.user).is_editor_or_delegate: 69 return redirect('evap.contributor.views.index') 70 else: 71 return redirect('evap.student.views.index') 72 73 74 def faq(request): 75 return render_to_response("faq.html", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request)) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py --- a/evap/evaluation/views.py +++ b/evap/evaluation/views.py @@ -50,6 +50,8 @@ return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request)) else: + userprofile, _ = UserProfile.objects.get_or_create(user=request.user) + # check for redirect variable redirect_to = request.GET.get("next", None) if redirect_to is not None: @@ -57,7 +59,7 @@ if request.user.is_staff: return redirect(redirect_to) elif redirect_to.startswith("/contributor/"): - if UserProfile.get_for_user(request.user).is_contributor: + if userprofile.is_contributor: return redirect(redirect_to) else: return redirect(redirect_to) @@ -65,7 +67,7 @@ # redirect user to appropriate start page if request.user.is_staff: return redirect('evap.fsr.views.index') - elif UserProfile.get_for_user(request.user).is_editor_or_delegate: + elif userprofile.is_editor_or_delegate: return redirect('evap.contributor.views.index') else: return redirect('evap.student.views.index')
{"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -50,6 +50,8 @@\n \n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n+\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n@@ -57,7 +59,7 @@\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n- if UserProfile.get_for_user(request.user).is_contributor:\n+ if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n@@ -65,7 +67,7 @@\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n+ elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n", "issue": "UserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\nUserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if UserProfile.get_for_user(request.user).is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}]}
1,221
304
gh_patches_debug_21308
rasdani/github-patches
git_diff
python-gitlab__python-gitlab-601
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gitlab --help requires a config file ## Description of the problem, including code/CLI snippet `gitlab --help` reads its config file even when displaying help ## Expected Behavior Print out the help text. ## Actual Behavior Exits with error if no configuration file is found. ## Specifications - python-gitlab version: 1.5.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gitlab/cli.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU Lesser General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU Lesser General Public License for more details. 15 # 16 # You should have received a copy of the GNU Lesser General Public License 17 # along with this program. If not, see <http://www.gnu.org/licenses/>. 18 19 from __future__ import print_function 20 import argparse 21 import functools 22 import importlib 23 import re 24 import sys 25 26 import gitlab.config 27 28 camel_re = re.compile('(.)([A-Z])') 29 30 # custom_actions = { 31 # cls: { 32 # action: (mandatory_args, optional_args, in_obj), 33 # }, 34 # } 35 custom_actions = {} 36 37 38 def register_custom_action(cls_names, mandatory=tuple(), optional=tuple()): 39 def wrap(f): 40 @functools.wraps(f) 41 def wrapped_f(*args, **kwargs): 42 return f(*args, **kwargs) 43 44 # in_obj defines whether the method belongs to the obj or the manager 45 in_obj = True 46 classes = cls_names 47 if type(cls_names) != tuple: 48 classes = (cls_names, ) 49 50 for cls_name in classes: 51 final_name = cls_name 52 if cls_name.endswith('Manager'): 53 final_name = cls_name.replace('Manager', '') 54 in_obj = False 55 if final_name not in custom_actions: 56 custom_actions[final_name] = {} 57 58 action = f.__name__.replace('_', '-') 59 custom_actions[final_name][action] = (mandatory, optional, in_obj) 60 61 return wrapped_f 62 return wrap 63 64 65 def die(msg, e=None): 66 if e: 67 msg = "%s (%s)" % (msg, e) 68 sys.stderr.write(msg + "\n") 69 sys.exit(1) 70 71 72 def what_to_cls(what): 73 return "".join([s.capitalize() for s in what.split("-")]) 74 75 76 def cls_to_what(cls): 77 return camel_re.sub(r'\1-\2', cls.__name__).lower() 78 79 80 def _get_base_parser(add_help=True): 81 parser = argparse.ArgumentParser( 82 add_help=add_help, 83 description="GitLab API Command Line Interface") 84 parser.add_argument("--version", help="Display the version.", 85 action="store_true") 86 parser.add_argument("-v", "--verbose", "--fancy", 87 help="Verbose mode (legacy format only)", 88 action="store_true") 89 parser.add_argument("-d", "--debug", 90 help="Debug mode (display HTTP requests)", 91 action="store_true") 92 parser.add_argument("-c", "--config-file", action='append', 93 help=("Configuration file to use. Can be used " 94 "multiple times.")) 95 parser.add_argument("-g", "--gitlab", 96 help=("Which configuration section should " 97 "be used. If not defined, the default selection " 98 "will be used."), 99 required=False) 100 parser.add_argument("-o", "--output", 101 help=("Output format (v4 only): json|legacy|yaml"), 102 required=False, 103 choices=['json', 'legacy', 'yaml'], 104 default="legacy") 105 parser.add_argument("-f", "--fields", 106 help=("Fields to display in the output (comma " 107 "separated). Not used with legacy output"), 108 required=False) 109 110 return parser 111 112 113 def _get_parser(cli_module): 114 parser = _get_base_parser() 115 return cli_module.extend_parser(parser) 116 117 118 def _parse_value(v): 119 if isinstance(v, str) and v.startswith('@'): 120 # If the user-provided value starts with @, we try to read the file 121 # path provided after @ as the real value. Exit on any error. 122 try: 123 with open(v[1:]) as fl: 124 return fl.read() 125 except Exception as e: 126 sys.stderr.write("%s\n" % e) 127 sys.exit(1) 128 129 return v 130 131 132 def main(): 133 if "--version" in sys.argv: 134 print(gitlab.__version__) 135 exit(0) 136 137 parser = _get_base_parser(add_help=False) 138 # This first parsing step is used to find the gitlab config to use, and 139 # load the propermodule (v3 or v4) accordingly. At that point we don't have 140 # any subparser setup 141 (options, args) = parser.parse_known_args(sys.argv) 142 143 config = gitlab.config.GitlabConfigParser(options.gitlab, 144 options.config_file) 145 cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version) 146 147 # Now we build the entire set of subcommands and do the complete parsing 148 parser = _get_parser(cli_module) 149 args = parser.parse_args(sys.argv[1:]) 150 151 config_files = args.config_file 152 gitlab_id = args.gitlab 153 verbose = args.verbose 154 output = args.output 155 fields = [] 156 if args.fields: 157 fields = [x.strip() for x in args.fields.split(',')] 158 debug = args.debug 159 action = args.action 160 what = args.what 161 162 args = args.__dict__ 163 # Remove CLI behavior-related args 164 for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action', 165 'version', 'output'): 166 args.pop(item) 167 args = {k: _parse_value(v) for k, v in args.items() if v is not None} 168 169 try: 170 gl = gitlab.Gitlab.from_config(gitlab_id, config_files) 171 if gl.private_token or gl.oauth_token: 172 gl.auth() 173 except Exception as e: 174 die(str(e)) 175 176 if debug: 177 gl.enable_debug() 178 179 cli_module.run(gl, what, action, args, verbose, output, fields) 180 181 sys.exit(0) 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gitlab/cli.py b/gitlab/cli.py --- a/gitlab/cli.py +++ b/gitlab/cli.py @@ -98,7 +98,7 @@ "will be used."), required=False) parser.add_argument("-o", "--output", - help=("Output format (v4 only): json|legacy|yaml"), + help="Output format (v4 only): json|legacy|yaml", required=False, choices=['json', 'legacy', 'yaml'], default="legacy") @@ -135,6 +135,10 @@ exit(0) parser = _get_base_parser(add_help=False) + if "--help" in sys.argv or "-h" in sys.argv: + parser.print_help() + exit(0) + # This first parsing step is used to find the gitlab config to use, and # load the propermodule (v3 or v4) accordingly. At that point we don't have # any subparser setup
{"golden_diff": "diff --git a/gitlab/cli.py b/gitlab/cli.py\n--- a/gitlab/cli.py\n+++ b/gitlab/cli.py\n@@ -98,7 +98,7 @@\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n- help=(\"Output format (v4 only): json|legacy|yaml\"),\n+ help=\"Output format (v4 only): json|legacy|yaml\",\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n@@ -135,6 +135,10 @@\n exit(0)\n \n parser = _get_base_parser(add_help=False)\n+ if \"--help\" in sys.argv or \"-h\" in sys.argv:\n+ parser.print_help()\n+ exit(0)\n+\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n", "issue": "gitlab --help requires a config file\n## Description of the problem, including code/CLI snippet\r\n\r\n`gitlab --help` reads its config file even when displaying help\r\n\r\n## Expected Behavior\r\n\r\nPrint out the help text.\r\n\r\n## Actual Behavior\r\n\r\nExits with error if no configuration file is found.\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 1.5.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import print_function\nimport argparse\nimport functools\nimport importlib\nimport re\nimport sys\n\nimport gitlab.config\n\ncamel_re = re.compile('(.)([A-Z])')\n\n# custom_actions = {\n# cls: {\n# action: (mandatory_args, optional_args, in_obj),\n# },\n# }\ncustom_actions = {}\n\n\ndef register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):\n def wrap(f):\n @functools.wraps(f)\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n\n # in_obj defines whether the method belongs to the obj or the manager\n in_obj = True\n classes = cls_names\n if type(cls_names) != tuple:\n classes = (cls_names, )\n\n for cls_name in classes:\n final_name = cls_name\n if cls_name.endswith('Manager'):\n final_name = cls_name.replace('Manager', '')\n in_obj = False\n if final_name not in custom_actions:\n custom_actions[final_name] = {}\n\n action = f.__name__.replace('_', '-')\n custom_actions[final_name][action] = (mandatory, optional, in_obj)\n\n return wrapped_f\n return wrap\n\n\ndef die(msg, e=None):\n if e:\n msg = \"%s (%s)\" % (msg, e)\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)\n\n\ndef what_to_cls(what):\n return \"\".join([s.capitalize() for s in what.split(\"-\")])\n\n\ndef cls_to_what(cls):\n return camel_re.sub(r'\\1-\\2', cls.__name__).lower()\n\n\ndef _get_base_parser(add_help=True):\n parser = argparse.ArgumentParser(\n add_help=add_help,\n description=\"GitLab API Command Line Interface\")\n parser.add_argument(\"--version\", help=\"Display the version.\",\n action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", \"--fancy\",\n help=\"Verbose mode (legacy format only)\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\",\n help=\"Debug mode (display HTTP requests)\",\n action=\"store_true\")\n parser.add_argument(\"-c\", \"--config-file\", action='append',\n help=(\"Configuration file to use. Can be used \"\n \"multiple times.\"))\n parser.add_argument(\"-g\", \"--gitlab\",\n help=(\"Which configuration section should \"\n \"be used. If not defined, the default selection \"\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n help=(\"Output format (v4 only): json|legacy|yaml\"),\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n parser.add_argument(\"-f\", \"--fields\",\n help=(\"Fields to display in the output (comma \"\n \"separated). Not used with legacy output\"),\n required=False)\n\n return parser\n\n\ndef _get_parser(cli_module):\n parser = _get_base_parser()\n return cli_module.extend_parser(parser)\n\n\ndef _parse_value(v):\n if isinstance(v, str) and v.startswith('@'):\n # If the user-provided value starts with @, we try to read the file\n # path provided after @ as the real value. Exit on any error.\n try:\n with open(v[1:]) as fl:\n return fl.read()\n except Exception as e:\n sys.stderr.write(\"%s\\n\" % e)\n sys.exit(1)\n\n return v\n\n\ndef main():\n if \"--version\" in sys.argv:\n print(gitlab.__version__)\n exit(0)\n\n parser = _get_base_parser(add_help=False)\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n\n config = gitlab.config.GitlabConfigParser(options.gitlab,\n options.config_file)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n\n # Now we build the entire set of subcommands and do the complete parsing\n parser = _get_parser(cli_module)\n args = parser.parse_args(sys.argv[1:])\n\n config_files = args.config_file\n gitlab_id = args.gitlab\n verbose = args.verbose\n output = args.output\n fields = []\n if args.fields:\n fields = [x.strip() for x in args.fields.split(',')]\n debug = args.debug\n action = args.action\n what = args.what\n\n args = args.__dict__\n # Remove CLI behavior-related args\n for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',\n 'version', 'output'):\n args.pop(item)\n args = {k: _parse_value(v) for k, v in args.items() if v is not None}\n\n try:\n gl = gitlab.Gitlab.from_config(gitlab_id, config_files)\n if gl.private_token or gl.oauth_token:\n gl.auth()\n except Exception as e:\n die(str(e))\n\n if debug:\n gl.enable_debug()\n\n cli_module.run(gl, what, action, args, verbose, output, fields)\n\n sys.exit(0)\n", "path": "gitlab/cli.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import print_function\nimport argparse\nimport functools\nimport importlib\nimport re\nimport sys\n\nimport gitlab.config\n\ncamel_re = re.compile('(.)([A-Z])')\n\n# custom_actions = {\n# cls: {\n# action: (mandatory_args, optional_args, in_obj),\n# },\n# }\ncustom_actions = {}\n\n\ndef register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):\n def wrap(f):\n @functools.wraps(f)\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n\n # in_obj defines whether the method belongs to the obj or the manager\n in_obj = True\n classes = cls_names\n if type(cls_names) != tuple:\n classes = (cls_names, )\n\n for cls_name in classes:\n final_name = cls_name\n if cls_name.endswith('Manager'):\n final_name = cls_name.replace('Manager', '')\n in_obj = False\n if final_name not in custom_actions:\n custom_actions[final_name] = {}\n\n action = f.__name__.replace('_', '-')\n custom_actions[final_name][action] = (mandatory, optional, in_obj)\n\n return wrapped_f\n return wrap\n\n\ndef die(msg, e=None):\n if e:\n msg = \"%s (%s)\" % (msg, e)\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)\n\n\ndef what_to_cls(what):\n return \"\".join([s.capitalize() for s in what.split(\"-\")])\n\n\ndef cls_to_what(cls):\n return camel_re.sub(r'\\1-\\2', cls.__name__).lower()\n\n\ndef _get_base_parser(add_help=True):\n parser = argparse.ArgumentParser(\n add_help=add_help,\n description=\"GitLab API Command Line Interface\")\n parser.add_argument(\"--version\", help=\"Display the version.\",\n action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", \"--fancy\",\n help=\"Verbose mode (legacy format only)\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\",\n help=\"Debug mode (display HTTP requests)\",\n action=\"store_true\")\n parser.add_argument(\"-c\", \"--config-file\", action='append',\n help=(\"Configuration file to use. Can be used \"\n \"multiple times.\"))\n parser.add_argument(\"-g\", \"--gitlab\",\n help=(\"Which configuration section should \"\n \"be used. If not defined, the default selection \"\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n help=\"Output format (v4 only): json|legacy|yaml\",\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n parser.add_argument(\"-f\", \"--fields\",\n help=(\"Fields to display in the output (comma \"\n \"separated). Not used with legacy output\"),\n required=False)\n\n return parser\n\n\ndef _get_parser(cli_module):\n parser = _get_base_parser()\n return cli_module.extend_parser(parser)\n\n\ndef _parse_value(v):\n if isinstance(v, str) and v.startswith('@'):\n # If the user-provided value starts with @, we try to read the file\n # path provided after @ as the real value. Exit on any error.\n try:\n with open(v[1:]) as fl:\n return fl.read()\n except Exception as e:\n sys.stderr.write(\"%s\\n\" % e)\n sys.exit(1)\n\n return v\n\n\ndef main():\n if \"--version\" in sys.argv:\n print(gitlab.__version__)\n exit(0)\n\n parser = _get_base_parser(add_help=False)\n if \"--help\" in sys.argv or \"-h\" in sys.argv:\n parser.print_help()\n exit(0)\n\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n\n config = gitlab.config.GitlabConfigParser(options.gitlab,\n options.config_file)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n\n # Now we build the entire set of subcommands and do the complete parsing\n parser = _get_parser(cli_module)\n args = parser.parse_args(sys.argv[1:])\n\n config_files = args.config_file\n gitlab_id = args.gitlab\n verbose = args.verbose\n output = args.output\n fields = []\n if args.fields:\n fields = [x.strip() for x in args.fields.split(',')]\n debug = args.debug\n action = args.action\n what = args.what\n\n args = args.__dict__\n # Remove CLI behavior-related args\n for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',\n 'version', 'output'):\n args.pop(item)\n args = {k: _parse_value(v) for k, v in args.items() if v is not None}\n\n try:\n gl = gitlab.Gitlab.from_config(gitlab_id, config_files)\n if gl.private_token or gl.oauth_token:\n gl.auth()\n except Exception as e:\n die(str(e))\n\n if debug:\n gl.enable_debug()\n\n cli_module.run(gl, what, action, args, verbose, output, fields)\n\n sys.exit(0)\n", "path": "gitlab/cli.py"}]}
2,145
225
gh_patches_debug_19870
rasdani/github-patches
git_diff
Parsl__parsl-389
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- parsl installed from pip tries to determine its version using git Every time I run parsl I get: ``` kacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py fatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git' ``` and in logs: ``` 2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state Traceback (most recent call last): File "/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version head = subprocess.check_output(cmd, env=env).strip().decode('utf-8') File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 336, in check_output **kwargs).stdout File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 418, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/utils.py` Content: ``` 1 import logging 2 import os 3 import shlex 4 import subprocess 5 import threading 6 import time 7 from contextlib import contextmanager 8 from functools import wraps 9 10 import parsl 11 from parsl.version import VERSION 12 13 logger = logging.getLogger(__name__) 14 15 16 def get_version(): 17 version = parsl.__version__ 18 work_tree = os.path.dirname(os.path.dirname(__file__)) 19 git_dir = os.path.join(work_tree, '.git') 20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir} 21 try: 22 cmd = shlex.split('git rev-parse --short HEAD') 23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8') 24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env) 25 status = 'dirty' if diff else 'clean' 26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status) 27 except Exception as e: 28 pass 29 30 return version 31 32 33 def get_all_checkpoints(rundir="runinfo"): 34 """Finds the checkpoints from all last runs. 35 36 Note that checkpoints are incremental, and this helper will not find 37 previous checkpoints from earlier than the most recent run. It probably 38 should be made to do so. 39 40 Kwargs: 41 - rundir(str) : Path to the runinfo directory 42 43 Returns: 44 - a list suitable for the checkpointFiles parameter of DataFlowKernel 45 constructor 46 47 """ 48 49 if(not(os.path.isdir(rundir))): 50 return [] 51 52 dirs = sorted(os.listdir(rundir)) 53 54 checkpoints = [] 55 56 for runid in dirs: 57 58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid)) 59 60 if(os.path.isdir(checkpoint)): 61 checkpoints.append(checkpoint) 62 63 return checkpoints 64 65 66 def get_last_checkpoint(rundir="runinfo"): 67 """Finds the checkpoint from the last run, if one exists. 68 69 Note that checkpoints are incremental, and this helper will not find 70 previous checkpoints from earlier than the most recent run. It probably 71 should be made to do so. 72 73 Kwargs: 74 - rundir(str) : Path to the runinfo directory 75 76 Returns: 77 - a list suitable for checkpointFiles parameter of DataFlowKernel 78 constructor, with 0 or 1 elements 79 80 """ 81 82 if(not(os.path.isdir(rundir))): 83 return [] 84 85 dirs = sorted(os.listdir(rundir)) 86 87 if(len(dirs) == 0): 88 return [] 89 90 last_runid = dirs[-1] 91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid)) 92 93 if(not(os.path.isdir(last_checkpoint))): 94 return [] 95 96 return [last_checkpoint] 97 98 99 def timeout(seconds=None): 100 def decorator(func, *args, **kwargs): 101 @wraps(func) 102 def wrapper(*args, **kwargs): 103 t = threading.Thread(target=func, args=args, kwargs=kwargs) 104 t.start() 105 result = t.join(seconds) 106 if t.is_alive(): 107 raise RuntimeError('timed out in {}'.format(func)) 108 return result 109 return wrapper 110 return decorator 111 112 113 @contextmanager 114 def wait_for_file(path, seconds=10): 115 for i in range(0, int(seconds * 100)): 116 time.sleep(seconds / 100.) 117 if os.path.exists(path): 118 break 119 yield 120 121 122 @contextmanager 123 def time_limited_open(path, mode, seconds=1): 124 wait_for_file(path, seconds) 125 126 f = open(path, mode) 127 yield f 128 f.close() 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/utils.py b/parsl/utils.py --- a/parsl/utils.py +++ b/parsl/utils.py @@ -17,15 +17,16 @@ version = parsl.__version__ work_tree = os.path.dirname(os.path.dirname(__file__)) git_dir = os.path.join(work_tree, '.git') - env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir} - try: - cmd = shlex.split('git rev-parse --short HEAD') - head = subprocess.check_output(cmd, env=env).strip().decode('utf-8') - diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env) - status = 'dirty' if diff else 'clean' - version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status) - except Exception as e: - pass + if os.path.exists(git_dir): + env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir} + try: + cmd = shlex.split('git rev-parse --short HEAD') + head = subprocess.check_output(cmd, env=env).strip().decode('utf-8') + diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env) + status = 'dirty' if diff else 'clean' + version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status) + except Exception as e: + pass return version
{"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -17,15 +17,16 @@\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n- try:\n- cmd = shlex.split('git rev-parse --short HEAD')\n- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n- status = 'dirty' if diff else 'clean'\n- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n- except Exception as e:\n- pass\n+ if os.path.exists(git_dir):\n+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n+ try:\n+ cmd = shlex.split('git rev-parse --short HEAD')\n+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n+ status = 'dirty' if diff else 'clean'\n+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n+ except Exception as e:\n+ pass\n \n return version\n", "issue": "parsl installed from pip tries to determine its version using git\nEvery time I run parsl I get:\r\n\r\n```\r\nkacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py\r\nfatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'\r\n```\r\n\r\nand in logs:\r\n\r\n```\r\n2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n```\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}], "after_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n if os.path.exists(git_dir):\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]}
1,615
342
gh_patches_debug_13403
rasdani/github-patches
git_diff
chainer__chainer-267
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Maybe there is a bug in chainer.optimizer.weight_decay(decay). In the [optimizer.py](https://github.com/pfnet/chainer/blob/30807b4f9f40177f592099663b86a6bb3eb1f867/chainer/optimizer.py) file, `weight_decay` is implemented as following: ``` python def weight_decay(self, decay): for p, g, _ in self.tuples: g -= decay * p ``` However, the gradient is decayed, not the weight. So if we update the weights `param -= lr * grad`, because the grad becomes smaller, the param (weight) becomes larger, which is not we want. As far as I know the regularized gradient decent is `p = p * (1-decay) - lr * grad`, so I think the weight should be decayed not the gradient. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/optimizer.py` Content: ``` 1 import math 2 3 import numpy 4 5 from chainer import cuda 6 7 8 # TODO(delta2323): Make it public function and move it to common directory. 9 10 11 def _sqnorm(x): 12 if isinstance(x, cuda.GPUArray): 13 with cuda.using_device(x): 14 return float(cuda.gpuarray.dot(x, x).get()) 15 x = x.ravel() 16 return float(x.dot(x)) 17 18 19 class Optimizer(object): 20 21 """Base class of all numerical optimizers. 22 23 Optimizer is set up with references to parameters and gradients, and 24 then on every call of :meth:`update`, it updates parameters based on 25 corresponding gradients. Optimizer implementations must override 26 :meth:`update_one` method, which updates one parameter array using the 27 corresponding gradient array. 28 29 Optimizer can optionally use state for each parameter/gradient pair. It is 30 initialized by :meth:`init_state` method at set up. 31 32 Attributes: 33 t (int): Number of update steps. It can be used in :meth:`update_one` 34 implementation, where :attr:`t` is incremented beforehand. 35 36 """ 37 38 def setup(self, params_grads): 39 """Prepares states for all given parameter/gradient pairs. 40 41 Args: 42 params_grads: Tuple (pair) of two tuples. The first element is a 43 tuple of parameter arrays, and the second is a tuple of 44 corresponding gradient arrays. 45 Return value of :meth:`FunctionSet.collect_parameters` method 46 can be used. 47 48 """ 49 self.t = 0 50 self.tuples = [] 51 for p, g in zip(*params_grads): 52 with cuda.using_device(p): 53 state = self.init_state(p, g) 54 self.tuples.append((p, g, state)) 55 56 def init_state(self, param, grad): 57 """Returns the initial state for given parameter and gradient. 58 59 Default implementation delegates the procedure to 60 :meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type 61 of ``param``. 62 63 Args: 64 param: Parameter array. 65 grad: Gradient array corresponding to ``param``. 66 67 Returns: 68 Initial state value. 69 70 .. warning:: 71 72 Note that, on every call of :meth:`update_one`, the state value 73 is passed by value and then the method updates its content, so 74 the state must be a reference. Especiallly, one cannot use a 75 value of built-in numeric type. If the state is one scalar 76 value, it is recommended to use scalar array, i.e. 77 :class:`~numpy.ndarray` with shape ``()``. 78 79 """ 80 if isinstance(param, cuda.GPUArray): 81 return self.init_state_gpu(param, grad) 82 return self.init_state_cpu(param, grad) 83 84 def init_state_cpu(self, param, grad): 85 """Returns the initial state for given parameter and gradient on GPU. 86 87 Args: 88 param (~numpy.ndarray): Parameter array. 89 grad (~numpy.ndarray): Gradient array. 90 91 Returns: 92 Initial state value. 93 94 .. seealso:: :meth:`init_state`, :meth:`init_state_gpu` 95 96 """ 97 return None 98 99 def init_state_gpu(self, param, grad): 100 """Returns the initial state for given parameter and gradient on CPU. 101 102 Args: 103 param (~pycuda.gpuarray.GPUArray): Parameter array. 104 grad (~pycuda.gpuarray.GPUArray): Gradient array. 105 106 Returns: 107 Initial state value. 108 109 .. seealso:: :meth:`init_state`, :meth:`init_state_gpu` 110 111 """ 112 return None 113 114 def zero_grads(self): 115 """Fills all gradient arrays by zeros. 116 117 This method should be call before backprop takes place, since 118 gradients are accumulated on backprop. 119 120 """ 121 for _, g, _ in self.tuples: 122 if isinstance(g, cuda.GPUArray): 123 with cuda.using_device(g): 124 g.fill(0) 125 else: 126 g.fill(0) 127 128 def compute_grads_norm(self): 129 """Computes the norm of whole gradients. 130 131 Returns: 132 float: L2 norm of whole gradients, i.e. square root of sum of 133 square of all gradient elements. 134 135 .. warning:: 136 137 This method returns a CPU-computed value, which means that this 138 method synchronizes between CPU and GPU if at least one of the 139 gradients reside on the GPU. 140 141 """ 142 # TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU 143 sqnorm = 0 144 for _, g, _ in self.tuples: 145 sqnorm += _sqnorm(g) 146 return math.sqrt(sqnorm) 147 148 def clip_grads(self, maxnorm): 149 """Clips the norm of whole gradients up to given threshold. 150 151 Args: 152 maxnorm (float): Threshold of gradient L2 norm. 153 154 .. seealso:: 155 156 :meth:`compute_grads_norm` 157 It uses this method to compute the gradient norm to be clipped. 158 159 """ 160 norm = self.compute_grads_norm() 161 if norm > maxnorm: 162 ratio = maxnorm / norm 163 for _, g, _ in self.tuples: 164 with cuda.using_device(g): 165 g *= ratio 166 167 def weight_decay(self, decay): 168 """Applies weight decay to the parameter/gradient pairs. 169 170 Args: 171 decay (float): Coefficient of weight decay 172 173 """ 174 for p, g, _ in self.tuples: 175 if isinstance(p, cuda.GPUArray): 176 with cuda.using_device(p): 177 cuda.elementwise('float* g, const float* p, float decay', 178 'g[i] -= decay * p[i]', 179 'weight_decay')(g, p, decay) 180 else: 181 g -= decay * p 182 183 def accumulate_grads(self, grads): 184 """Accumulates gradients from other source. 185 186 This method just adds given gradient arrays to gradients that this 187 optimizer holds. It is typically used in data-parallel optimization, 188 where gradients for different shards are computed in parallel and 189 aggregated by this method. This method correctly treats multiple GPU 190 devices. 191 192 Args: 193 grads (Iterable): Iterable of gradient arrays to be accumulated. 194 195 """ 196 for (_, g_dst, _), g_src in zip(self.tuples, grads): 197 if isinstance(g_dst, numpy.ndarray): 198 g_dst += cuda.to_cpu(g_src) 199 continue 200 201 with cuda.using_device(g_dst): 202 if (isinstance(g_src, cuda.GPUArray) and 203 g_dst.gpudata.device != g_src.gpudata.device): 204 g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device) 205 else: 206 g_dst += cuda.to_gpu(g_src) 207 208 def update(self): 209 """Updates all parameters and states using corresponding gradients. 210 211 This method iteratively calls :meth:`update_one` for each parameter/ 212 gradient/state tuple. Beforehand, :attr:`t` attribute is incremented. 213 214 """ 215 self.t += 1 216 for p, g, s in self.tuples: 217 with cuda.using_device(p): 218 self.update_one(p, g, s) 219 220 def update_one(self, param, grad, state): 221 """Updates a parameter array and its state using given gradient. 222 223 The default implementation delegates the procedure to 224 :meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type 225 of the parameter array. Optimizer implmentation must override these 226 type-specific methods or this :meth:`update_one` method directly. 227 228 Args: 229 param: Parameter array. 230 grad: Gradient array. 231 state: State value. 232 233 .. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu` 234 235 """ 236 if isinstance(param, cuda.GPUArray): 237 self.update_one_gpu(param, grad, state) 238 else: 239 self.update_one_cpu(param, grad, state) 240 241 def update_one_cpu(self, param, grad, state): 242 """Updates a parameter array and its state using given gradient on CPU. 243 244 Args: 245 param (~numpy.ndarray): Parameter array. 246 grad (~numpy.ndarray): Gradient array. 247 state: State value. 248 249 .. seealso:: :meth:`update_one`, :meth:`update_one_gpu` 250 251 """ 252 raise NotImplementedError() 253 254 def update_one_gpu(self, param, grad, state): 255 """Updates a parameter array and its state using given gradient on GPU. 256 257 Args: 258 param (~pycuda.gpuarray.GPUArray): Parameter array. 259 grad (~pycuda.gpuarray.GPUArray): Gradient array. 260 state: State value. 261 262 .. seealso:: :meth:`update_one`, :meth:`update_one_cpu` 263 264 """ 265 raise NotImplementedError() 266 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/optimizer.py b/chainer/optimizer.py --- a/chainer/optimizer.py +++ b/chainer/optimizer.py @@ -175,10 +175,10 @@ if isinstance(p, cuda.GPUArray): with cuda.using_device(p): cuda.elementwise('float* g, const float* p, float decay', - 'g[i] -= decay * p[i]', + 'g[i] += decay * p[i]', 'weight_decay')(g, p, decay) else: - g -= decay * p + g += decay * p def accumulate_grads(self, grads): """Accumulates gradients from other source.
{"golden_diff": "diff --git a/chainer/optimizer.py b/chainer/optimizer.py\n--- a/chainer/optimizer.py\n+++ b/chainer/optimizer.py\n@@ -175,10 +175,10 @@\n if isinstance(p, cuda.GPUArray):\n with cuda.using_device(p):\n cuda.elementwise('float* g, const float* p, float decay',\n- 'g[i] -= decay * p[i]',\n+ 'g[i] += decay * p[i]',\n 'weight_decay')(g, p, decay)\n else:\n- g -= decay * p\n+ g += decay * p\n \n def accumulate_grads(self, grads):\n \"\"\"Accumulates gradients from other source.\n", "issue": "Maybe there is a bug in chainer.optimizer.weight_decay(decay).\nIn the [optimizer.py](https://github.com/pfnet/chainer/blob/30807b4f9f40177f592099663b86a6bb3eb1f867/chainer/optimizer.py) file, `weight_decay` is implemented as following:\n\n``` python\n def weight_decay(self, decay):\n for p, g, _ in self.tuples:\n g -= decay * p\n```\n\nHowever, the gradient is decayed, not the weight. So if we update the weights `param -= lr * grad`, because the grad becomes smaller, the param (weight) becomes larger, which is not we want. \n\nAs far as I know the regularized gradient decent is `p = p * (1-decay) - lr * grad`, so I think the weight should be decayed not the gradient.\n\n", "before_files": [{"content": "import math\n\nimport numpy\n\nfrom chainer import cuda\n\n\n# TODO(delta2323): Make it public function and move it to common directory.\n\n\ndef _sqnorm(x):\n if isinstance(x, cuda.GPUArray):\n with cuda.using_device(x):\n return float(cuda.gpuarray.dot(x, x).get())\n x = x.ravel()\n return float(x.dot(x))\n\n\nclass Optimizer(object):\n\n \"\"\"Base class of all numerical optimizers.\n\n Optimizer is set up with references to parameters and gradients, and\n then on every call of :meth:`update`, it updates parameters based on\n corresponding gradients. Optimizer implementations must override\n :meth:`update_one` method, which updates one parameter array using the\n corresponding gradient array.\n\n Optimizer can optionally use state for each parameter/gradient pair. It is\n initialized by :meth:`init_state` method at set up.\n\n Attributes:\n t (int): Number of update steps. It can be used in :meth:`update_one`\n implementation, where :attr:`t` is incremented beforehand.\n\n \"\"\"\n\n def setup(self, params_grads):\n \"\"\"Prepares states for all given parameter/gradient pairs.\n\n Args:\n params_grads: Tuple (pair) of two tuples. The first element is a\n tuple of parameter arrays, and the second is a tuple of\n corresponding gradient arrays.\n Return value of :meth:`FunctionSet.collect_parameters` method\n can be used.\n\n \"\"\"\n self.t = 0\n self.tuples = []\n for p, g in zip(*params_grads):\n with cuda.using_device(p):\n state = self.init_state(p, g)\n self.tuples.append((p, g, state))\n\n def init_state(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient.\n\n Default implementation delegates the procedure to\n :meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type\n of ``param``.\n\n Args:\n param: Parameter array.\n grad: Gradient array corresponding to ``param``.\n\n Returns:\n Initial state value.\n\n .. warning::\n\n Note that, on every call of :meth:`update_one`, the state value\n is passed by value and then the method updates its content, so\n the state must be a reference. Especiallly, one cannot use a\n value of built-in numeric type. If the state is one scalar\n value, it is recommended to use scalar array, i.e.\n :class:`~numpy.ndarray` with shape ``()``.\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n return self.init_state_gpu(param, grad)\n return self.init_state_cpu(param, grad)\n\n def init_state_cpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on GPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def init_state_gpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on CPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def zero_grads(self):\n \"\"\"Fills all gradient arrays by zeros.\n\n This method should be call before backprop takes place, since\n gradients are accumulated on backprop.\n\n \"\"\"\n for _, g, _ in self.tuples:\n if isinstance(g, cuda.GPUArray):\n with cuda.using_device(g):\n g.fill(0)\n else:\n g.fill(0)\n\n def compute_grads_norm(self):\n \"\"\"Computes the norm of whole gradients.\n\n Returns:\n float: L2 norm of whole gradients, i.e. square root of sum of\n square of all gradient elements.\n\n .. warning::\n\n This method returns a CPU-computed value, which means that this\n method synchronizes between CPU and GPU if at least one of the\n gradients reside on the GPU.\n\n \"\"\"\n # TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU\n sqnorm = 0\n for _, g, _ in self.tuples:\n sqnorm += _sqnorm(g)\n return math.sqrt(sqnorm)\n\n def clip_grads(self, maxnorm):\n \"\"\"Clips the norm of whole gradients up to given threshold.\n\n Args:\n maxnorm (float): Threshold of gradient L2 norm.\n\n .. seealso::\n\n :meth:`compute_grads_norm`\n It uses this method to compute the gradient norm to be clipped.\n\n \"\"\"\n norm = self.compute_grads_norm()\n if norm > maxnorm:\n ratio = maxnorm / norm\n for _, g, _ in self.tuples:\n with cuda.using_device(g):\n g *= ratio\n\n def weight_decay(self, decay):\n \"\"\"Applies weight decay to the parameter/gradient pairs.\n\n Args:\n decay (float): Coefficient of weight decay\n\n \"\"\"\n for p, g, _ in self.tuples:\n if isinstance(p, cuda.GPUArray):\n with cuda.using_device(p):\n cuda.elementwise('float* g, const float* p, float decay',\n 'g[i] -= decay * p[i]',\n 'weight_decay')(g, p, decay)\n else:\n g -= decay * p\n\n def accumulate_grads(self, grads):\n \"\"\"Accumulates gradients from other source.\n\n This method just adds given gradient arrays to gradients that this\n optimizer holds. It is typically used in data-parallel optimization,\n where gradients for different shards are computed in parallel and\n aggregated by this method. This method correctly treats multiple GPU\n devices.\n\n Args:\n grads (Iterable): Iterable of gradient arrays to be accumulated.\n\n \"\"\"\n for (_, g_dst, _), g_src in zip(self.tuples, grads):\n if isinstance(g_dst, numpy.ndarray):\n g_dst += cuda.to_cpu(g_src)\n continue\n\n with cuda.using_device(g_dst):\n if (isinstance(g_src, cuda.GPUArray) and\n g_dst.gpudata.device != g_src.gpudata.device):\n g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device)\n else:\n g_dst += cuda.to_gpu(g_src)\n\n def update(self):\n \"\"\"Updates all parameters and states using corresponding gradients.\n\n This method iteratively calls :meth:`update_one` for each parameter/\n gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.\n\n \"\"\"\n self.t += 1\n for p, g, s in self.tuples:\n with cuda.using_device(p):\n self.update_one(p, g, s)\n\n def update_one(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient.\n\n The default implementation delegates the procedure to\n :meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type\n of the parameter array. Optimizer implmentation must override these\n type-specific methods or this :meth:`update_one` method directly.\n\n Args:\n param: Parameter array.\n grad: Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n self.update_one_gpu(param, grad, state)\n else:\n self.update_one_cpu(param, grad, state)\n\n def update_one_cpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on CPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_gpu`\n\n \"\"\"\n raise NotImplementedError()\n\n def update_one_gpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on GPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_cpu`\n\n \"\"\"\n raise NotImplementedError()\n", "path": "chainer/optimizer.py"}], "after_files": [{"content": "import math\n\nimport numpy\n\nfrom chainer import cuda\n\n\n# TODO(delta2323): Make it public function and move it to common directory.\n\n\ndef _sqnorm(x):\n if isinstance(x, cuda.GPUArray):\n with cuda.using_device(x):\n return float(cuda.gpuarray.dot(x, x).get())\n x = x.ravel()\n return float(x.dot(x))\n\n\nclass Optimizer(object):\n\n \"\"\"Base class of all numerical optimizers.\n\n Optimizer is set up with references to parameters and gradients, and\n then on every call of :meth:`update`, it updates parameters based on\n corresponding gradients. Optimizer implementations must override\n :meth:`update_one` method, which updates one parameter array using the\n corresponding gradient array.\n\n Optimizer can optionally use state for each parameter/gradient pair. It is\n initialized by :meth:`init_state` method at set up.\n\n Attributes:\n t (int): Number of update steps. It can be used in :meth:`update_one`\n implementation, where :attr:`t` is incremented beforehand.\n\n \"\"\"\n\n def setup(self, params_grads):\n \"\"\"Prepares states for all given parameter/gradient pairs.\n\n Args:\n params_grads: Tuple (pair) of two tuples. The first element is a\n tuple of parameter arrays, and the second is a tuple of\n corresponding gradient arrays.\n Return value of :meth:`FunctionSet.collect_parameters` method\n can be used.\n\n \"\"\"\n self.t = 0\n self.tuples = []\n for p, g in zip(*params_grads):\n with cuda.using_device(p):\n state = self.init_state(p, g)\n self.tuples.append((p, g, state))\n\n def init_state(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient.\n\n Default implementation delegates the procedure to\n :meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type\n of ``param``.\n\n Args:\n param: Parameter array.\n grad: Gradient array corresponding to ``param``.\n\n Returns:\n Initial state value.\n\n .. warning::\n\n Note that, on every call of :meth:`update_one`, the state value\n is passed by value and then the method updates its content, so\n the state must be a reference. Especiallly, one cannot use a\n value of built-in numeric type. If the state is one scalar\n value, it is recommended to use scalar array, i.e.\n :class:`~numpy.ndarray` with shape ``()``.\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n return self.init_state_gpu(param, grad)\n return self.init_state_cpu(param, grad)\n\n def init_state_cpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on GPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def init_state_gpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on CPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def zero_grads(self):\n \"\"\"Fills all gradient arrays by zeros.\n\n This method should be call before backprop takes place, since\n gradients are accumulated on backprop.\n\n \"\"\"\n for _, g, _ in self.tuples:\n if isinstance(g, cuda.GPUArray):\n with cuda.using_device(g):\n g.fill(0)\n else:\n g.fill(0)\n\n def compute_grads_norm(self):\n \"\"\"Computes the norm of whole gradients.\n\n Returns:\n float: L2 norm of whole gradients, i.e. square root of sum of\n square of all gradient elements.\n\n .. warning::\n\n This method returns a CPU-computed value, which means that this\n method synchronizes between CPU and GPU if at least one of the\n gradients reside on the GPU.\n\n \"\"\"\n # TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU\n sqnorm = 0\n for _, g, _ in self.tuples:\n sqnorm += _sqnorm(g)\n return math.sqrt(sqnorm)\n\n def clip_grads(self, maxnorm):\n \"\"\"Clips the norm of whole gradients up to given threshold.\n\n Args:\n maxnorm (float): Threshold of gradient L2 norm.\n\n .. seealso::\n\n :meth:`compute_grads_norm`\n It uses this method to compute the gradient norm to be clipped.\n\n \"\"\"\n norm = self.compute_grads_norm()\n if norm > maxnorm:\n ratio = maxnorm / norm\n for _, g, _ in self.tuples:\n with cuda.using_device(g):\n g *= ratio\n\n def weight_decay(self, decay):\n \"\"\"Applies weight decay to the parameter/gradient pairs.\n\n Args:\n decay (float): Coefficient of weight decay\n\n \"\"\"\n for p, g, _ in self.tuples:\n if isinstance(p, cuda.GPUArray):\n with cuda.using_device(p):\n cuda.elementwise('float* g, const float* p, float decay',\n 'g[i] += decay * p[i]',\n 'weight_decay')(g, p, decay)\n else:\n g += decay * p\n\n def accumulate_grads(self, grads):\n \"\"\"Accumulates gradients from other source.\n\n This method just adds given gradient arrays to gradients that this\n optimizer holds. It is typically used in data-parallel optimization,\n where gradients for different shards are computed in parallel and\n aggregated by this method. This method correctly treats multiple GPU\n devices.\n\n Args:\n grads (Iterable): Iterable of gradient arrays to be accumulated.\n\n \"\"\"\n for (_, g_dst, _), g_src in zip(self.tuples, grads):\n if isinstance(g_dst, numpy.ndarray):\n g_dst += cuda.to_cpu(g_src)\n continue\n\n with cuda.using_device(g_dst):\n if (isinstance(g_src, cuda.GPUArray) and\n g_dst.gpudata.device != g_src.gpudata.device):\n g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device)\n else:\n g_dst += cuda.to_gpu(g_src)\n\n def update(self):\n \"\"\"Updates all parameters and states using corresponding gradients.\n\n This method iteratively calls :meth:`update_one` for each parameter/\n gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.\n\n \"\"\"\n self.t += 1\n for p, g, s in self.tuples:\n with cuda.using_device(p):\n self.update_one(p, g, s)\n\n def update_one(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient.\n\n The default implementation delegates the procedure to\n :meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type\n of the parameter array. Optimizer implmentation must override these\n type-specific methods or this :meth:`update_one` method directly.\n\n Args:\n param: Parameter array.\n grad: Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n self.update_one_gpu(param, grad, state)\n else:\n self.update_one_cpu(param, grad, state)\n\n def update_one_cpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on CPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_gpu`\n\n \"\"\"\n raise NotImplementedError()\n\n def update_one_gpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on GPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_cpu`\n\n \"\"\"\n raise NotImplementedError()\n", "path": "chainer/optimizer.py"}]}
3,062
156
gh_patches_debug_28583
rasdani/github-patches
git_diff
pytorch__ignite-48
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide basic update + inference functions See #37 for discussion on this. We should provide something for simple supervised training cc @jasonkriss --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/trainer/trainer.py` Content: ``` 1 from __future__ import division 2 3 import logging 4 import time 5 from collections import Iterable 6 7 from enum import Enum 8 from ignite.trainer.history import History 9 10 __all__ = ["TrainingEvents", "Trainer"] 11 12 13 class TrainingEvents(Enum): 14 EPOCH_STARTED = "epoch_started" 15 EPOCH_COMPLETED = "epoch_completed" 16 TRAINING_EPOCH_STARTED = "training_epoch_started" 17 TRAINING_EPOCH_COMPLETED = "training_epoch_completed" 18 VALIDATION_STARTING = "validation_starting" 19 VALIDATION_COMPLETED = "validation_completed" 20 TRAINING_STARTED = "training_started" 21 TRAINING_COMPLETED = "training_completed" 22 TRAINING_ITERATION_STARTED = "training_iteration_started" 23 TRAINING_ITERATION_COMPLETED = "training_iteration_completed" 24 VALIDATION_ITERATION_STARTED = "validation_iteration_started" 25 VALIDATION_ITERATION_COMPLETED = "validation_iteration_completed" 26 EXCEPTION_RAISED = "exception_raised" 27 28 29 def _to_hours_mins_secs(time_taken): 30 mins, secs = divmod(time_taken, 60) 31 hours, mins = divmod(mins, 60) 32 return hours, mins, secs 33 34 35 class Trainer(object): 36 """ 37 Generic trainer class. 38 39 Training update and validation functions receive batches of data and return values which will 40 be stored in the `training_history` and `validation_history`. The trainer defines multiple 41 events in `TrainingEvents` for which the user can attach event handlers to. The events get 42 passed the trainer, so they can access the training/validation history 43 44 45 Parameters 46 ---------- 47 training_update_function : callable 48 Update function receiving the current training batch in each iteration 49 50 validation_inference_function : callable 51 Function receiving data and performing a feed forward without update 52 """ 53 54 def __init__(self, training_update_function, validation_inference_function=None): 55 56 self._logger = self._get_logger() 57 self._training_update_function = training_update_function 58 self._validation_inference_function = validation_inference_function 59 self._event_handlers = {} 60 61 self.training_history = History() 62 self.validation_history = History() 63 self.current_iteration = 0 64 self.current_validation_iteration = 0 65 self.current_epoch = 0 66 self.max_epochs = 0 67 self.should_terminate = False 68 69 def _get_logger(self): 70 logger = logging.getLogger(__name__ + "." + self.__class__.__name__) 71 logger.addHandler(logging.NullHandler()) 72 return logger 73 74 def add_event_handler(self, event_name, handler, *args, **kwargs): 75 """ 76 Add an event handler to be executed when the specified event is fired 77 78 Parameters 79 ---------- 80 event_name: enum 81 event from ignite.trainer.TrainingEvents to attach the 82 handler to 83 handler: Callable 84 the callable event handler that should be invoked 85 args: 86 optional args to be passed to `handler` 87 kwargs: 88 optional keyword args to be passed to `handler` 89 90 Returns 91 ------- 92 None 93 """ 94 if event_name not in TrainingEvents.__members__.values(): 95 self._logger.error("attempt to add event handler to non-existent event %s ", 96 event_name) 97 raise ValueError("Event {} not a valid training event".format(event_name)) 98 99 if event_name not in self._event_handlers.keys(): 100 self._event_handlers[event_name] = [] 101 102 self._event_handlers[event_name].append((handler, args, kwargs)) 103 self._logger.debug("added handler for event % ", event_name) 104 105 def _fire_event(self, event_name): 106 if event_name in self._event_handlers.keys(): 107 self._logger.debug("firing handlers for event %s ", event_name) 108 for func, args, kwargs in self._event_handlers[event_name]: 109 func(self, *args, **kwargs) 110 111 def _train_one_epoch(self, training_data): 112 self._fire_event(TrainingEvents.TRAINING_EPOCH_STARTED) 113 start_time = time.time() 114 115 self.epoch_losses = [] 116 for _, batch in enumerate(training_data, 1): 117 self._fire_event(TrainingEvents.TRAINING_ITERATION_STARTED) 118 119 training_step_result = self._training_update_function(batch) 120 if training_step_result is not None: 121 self.training_history.append(training_step_result) 122 123 self.current_iteration += 1 124 125 self._fire_event(TrainingEvents.TRAINING_ITERATION_COMPLETED) 126 if self.should_terminate: 127 return 128 129 time_taken = time.time() - start_time 130 hours, mins, secs = _to_hours_mins_secs(time_taken) 131 self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.current_epoch, hours, 132 mins, secs) 133 134 self._fire_event(TrainingEvents.TRAINING_EPOCH_COMPLETED) 135 136 def validate(self, validation_data): 137 """ Evaluates the validation set""" 138 if self._validation_inference_function is None: 139 raise ValueError("Trainer must have a validation_inference_function in order to validate") 140 141 self.current_validation_iteration = 0 142 self._fire_event(TrainingEvents.VALIDATION_STARTING) 143 start_time = time.time() 144 145 for _, batch in enumerate(validation_data, 1): 146 self._fire_event(TrainingEvents.VALIDATION_ITERATION_STARTED) 147 validation_step_result = self._validation_inference_function(batch) 148 if validation_step_result is not None: 149 self.validation_history.append(validation_step_result) 150 151 self.current_validation_iteration += 1 152 self._fire_event(TrainingEvents.VALIDATION_ITERATION_COMPLETED) 153 if self.should_terminate: 154 break 155 156 time_taken = time.time() - start_time 157 hours, mins, secs = _to_hours_mins_secs(time_taken) 158 self._logger.info("Validation Complete. Time taken: %02d:%02d:%02d", hours, mins, secs) 159 160 self._fire_event(TrainingEvents.VALIDATION_COMPLETED) 161 162 def terminate(self): 163 """ 164 Sends terminate signal to trainer, so that training terminates after the current iteration 165 """ 166 self._logger.info("Terminate signaled to trainer. " + 167 "Training will stop after current iteration is finished") 168 self.should_terminate = True 169 170 def run(self, training_data, max_epochs=1): 171 """ 172 Train the model, evaluate the validation set and update best parameters if the validation loss 173 improves. 174 In the event that the validation set is not run (or doesn't exist), the training loss is used 175 to update the best parameters. 176 177 Parameters 178 ---------- 179 training_data : Iterable 180 Collection of training batches allowing repeated iteration (e.g., list or DataLoader) 181 max_epochs: int, optional 182 max epochs to train for [default=1] 183 184 Returns 185 ------- 186 None 187 """ 188 189 try: 190 self._logger.info("Training starting with max_epochs={}".format(max_epochs)) 191 192 self.max_epochs = max_epochs 193 194 start_time = time.time() 195 196 self._fire_event(TrainingEvents.TRAINING_STARTED) 197 while self.current_epoch < max_epochs and not self.should_terminate: 198 self._fire_event(TrainingEvents.EPOCH_STARTED) 199 self._train_one_epoch(training_data) 200 if self.should_terminate: 201 break 202 203 self._fire_event(TrainingEvents.EPOCH_COMPLETED) 204 self.current_epoch += 1 205 206 self._fire_event(TrainingEvents.TRAINING_COMPLETED) 207 time_taken = time.time() - start_time 208 mins, secs = divmod(time_taken, 60) 209 hours, mins = divmod(mins, 60) 210 self._logger.info("Training complete. Time taken %02d:%02d:%02d" % (hours, mins, secs)) 211 except BaseException as e: 212 self._logger.error("Training is terminating due to exception: %s", str(e)) 213 self._fire_event(TrainingEvents.EXCEPTION_RAISED) 214 raise e 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/trainer/trainer.py b/ignite/trainer/trainer.py --- a/ignite/trainer/trainer.py +++ b/ignite/trainer/trainer.py @@ -5,9 +5,10 @@ from collections import Iterable from enum import Enum +from torch.autograd import Variable from ignite.trainer.history import History -__all__ = ["TrainingEvents", "Trainer"] +__all__ = ["TrainingEvents", "Trainer", "create_supervised"] class TrainingEvents(Enum): @@ -212,3 +213,41 @@ self._logger.error("Training is terminating due to exception: %s", str(e)) self._fire_event(TrainingEvents.EXCEPTION_RAISED) raise e + + +def create_supervised(model, optimizer, loss_fn, cuda=False): + """ + Factory function for creating a trainer for supervised models + + Args: + model (torch.nn.Module): the model to train + optimizer (torch.optim.Optimizer): the optimizer to use + loss_fn (torch.nn loss function): the loss function to use + cuda (bool, optional): whether or not to transfer batch to GPU (default: False) + + Returns: + Trainer: a trainer instance with supervised update and inference functions + """ + def _prepare_batch(batch, volatile=False): + x, y = batch + if cuda: + x, y = x.cuda(), y.cuda() + return Variable(x, volatile=volatile), Variable(y, volatile=volatile) + + def _update(batch): + model.train() + optimizer.zero_grad() + x, y = _prepare_batch(batch) + y_pred = model(x) + loss = loss_fn(y_pred, y) + loss.backward() + optimizer.step() + return loss.data.cpu()[0] + + def _inference(batch): + model.eval() + x, y = _prepare_batch(batch, volatile=True) + y_pred = model(x) + return y_pred.data.cpu(), y.data.cpu() + + return Trainer(_update, _inference)
{"golden_diff": "diff --git a/ignite/trainer/trainer.py b/ignite/trainer/trainer.py\n--- a/ignite/trainer/trainer.py\n+++ b/ignite/trainer/trainer.py\n@@ -5,9 +5,10 @@\n from collections import Iterable\n \n from enum import Enum\n+from torch.autograd import Variable\n from ignite.trainer.history import History\n \n-__all__ = [\"TrainingEvents\", \"Trainer\"]\n+__all__ = [\"TrainingEvents\", \"Trainer\", \"create_supervised\"]\n \n \n class TrainingEvents(Enum):\n@@ -212,3 +213,41 @@\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(TrainingEvents.EXCEPTION_RAISED)\n raise e\n+\n+\n+def create_supervised(model, optimizer, loss_fn, cuda=False):\n+ \"\"\"\n+ Factory function for creating a trainer for supervised models\n+\n+ Args:\n+ model (torch.nn.Module): the model to train\n+ optimizer (torch.optim.Optimizer): the optimizer to use\n+ loss_fn (torch.nn loss function): the loss function to use\n+ cuda (bool, optional): whether or not to transfer batch to GPU (default: False)\n+\n+ Returns:\n+ Trainer: a trainer instance with supervised update and inference functions\n+ \"\"\"\n+ def _prepare_batch(batch, volatile=False):\n+ x, y = batch\n+ if cuda:\n+ x, y = x.cuda(), y.cuda()\n+ return Variable(x, volatile=volatile), Variable(y, volatile=volatile)\n+\n+ def _update(batch):\n+ model.train()\n+ optimizer.zero_grad()\n+ x, y = _prepare_batch(batch)\n+ y_pred = model(x)\n+ loss = loss_fn(y_pred, y)\n+ loss.backward()\n+ optimizer.step()\n+ return loss.data.cpu()[0]\n+\n+ def _inference(batch):\n+ model.eval()\n+ x, y = _prepare_batch(batch, volatile=True)\n+ y_pred = model(x)\n+ return y_pred.data.cpu(), y.data.cpu()\n+\n+ return Trainer(_update, _inference)\n", "issue": "Provide basic update + inference functions\nSee #37 for discussion on this. We should provide something for simple supervised training\r\n\r\ncc @jasonkriss\n", "before_files": [{"content": "from __future__ import division\n\nimport logging\nimport time\nfrom collections import Iterable\n\nfrom enum import Enum\nfrom ignite.trainer.history import History\n\n__all__ = [\"TrainingEvents\", \"Trainer\"]\n\n\nclass TrainingEvents(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n TRAINING_EPOCH_STARTED = \"training_epoch_started\"\n TRAINING_EPOCH_COMPLETED = \"training_epoch_completed\"\n VALIDATION_STARTING = \"validation_starting\"\n VALIDATION_COMPLETED = \"validation_completed\"\n TRAINING_STARTED = \"training_started\"\n TRAINING_COMPLETED = \"training_completed\"\n TRAINING_ITERATION_STARTED = \"training_iteration_started\"\n TRAINING_ITERATION_COMPLETED = \"training_iteration_completed\"\n VALIDATION_ITERATION_STARTED = \"validation_iteration_started\"\n VALIDATION_ITERATION_COMPLETED = \"validation_iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\ndef _to_hours_mins_secs(time_taken):\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n\n\nclass Trainer(object):\n \"\"\"\n Generic trainer class.\n\n Training update and validation functions receive batches of data and return values which will\n be stored in the `training_history` and `validation_history`. The trainer defines multiple\n events in `TrainingEvents` for which the user can attach event handlers to. The events get\n passed the trainer, so they can access the training/validation history\n\n\n Parameters\n ----------\n training_update_function : callable\n Update function receiving the current training batch in each iteration\n\n validation_inference_function : callable\n Function receiving data and performing a feed forward without update\n \"\"\"\n\n def __init__(self, training_update_function, validation_inference_function=None):\n\n self._logger = self._get_logger()\n self._training_update_function = training_update_function\n self._validation_inference_function = validation_inference_function\n self._event_handlers = {}\n\n self.training_history = History()\n self.validation_history = History()\n self.current_iteration = 0\n self.current_validation_iteration = 0\n self.current_epoch = 0\n self.max_epochs = 0\n self.should_terminate = False\n\n def _get_logger(self):\n logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n logger.addHandler(logging.NullHandler())\n return logger\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"\n Add an event handler to be executed when the specified event is fired\n\n Parameters\n ----------\n event_name: enum\n event from ignite.trainer.TrainingEvents to attach the\n handler to\n handler: Callable\n the callable event handler that should be invoked\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n if event_name not in TrainingEvents.__members__.values():\n self._logger.error(\"attempt to add event handler to non-existent event %s \",\n event_name)\n raise ValueError(\"Event {} not a valid training event\".format(event_name))\n\n if event_name not in self._event_handlers.keys():\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event % \", event_name)\n\n def _fire_event(self, event_name):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *args, **kwargs)\n\n def _train_one_epoch(self, training_data):\n self._fire_event(TrainingEvents.TRAINING_EPOCH_STARTED)\n start_time = time.time()\n\n self.epoch_losses = []\n for _, batch in enumerate(training_data, 1):\n self._fire_event(TrainingEvents.TRAINING_ITERATION_STARTED)\n\n training_step_result = self._training_update_function(batch)\n if training_step_result is not None:\n self.training_history.append(training_step_result)\n\n self.current_iteration += 1\n\n self._fire_event(TrainingEvents.TRAINING_ITERATION_COMPLETED)\n if self.should_terminate:\n return\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.current_epoch, hours,\n mins, secs)\n\n self._fire_event(TrainingEvents.TRAINING_EPOCH_COMPLETED)\n\n def validate(self, validation_data):\n \"\"\" Evaluates the validation set\"\"\"\n if self._validation_inference_function is None:\n raise ValueError(\"Trainer must have a validation_inference_function in order to validate\")\n\n self.current_validation_iteration = 0\n self._fire_event(TrainingEvents.VALIDATION_STARTING)\n start_time = time.time()\n\n for _, batch in enumerate(validation_data, 1):\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_STARTED)\n validation_step_result = self._validation_inference_function(batch)\n if validation_step_result is not None:\n self.validation_history.append(validation_step_result)\n\n self.current_validation_iteration += 1\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Validation Complete. Time taken: %02d:%02d:%02d\", hours, mins, secs)\n\n self._fire_event(TrainingEvents.VALIDATION_COMPLETED)\n\n def terminate(self):\n \"\"\"\n Sends terminate signal to trainer, so that training terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled to trainer. \" +\n \"Training will stop after current iteration is finished\")\n self.should_terminate = True\n\n def run(self, training_data, max_epochs=1):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n training_data : Iterable\n Collection of training batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs: int, optional\n max epochs to train for [default=1]\n\n Returns\n -------\n None\n \"\"\"\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n\n self.max_epochs = max_epochs\n\n start_time = time.time()\n\n self._fire_event(TrainingEvents.TRAINING_STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n self._fire_event(TrainingEvents.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n\n self._fire_event(TrainingEvents.EPOCH_COMPLETED)\n self.current_epoch += 1\n\n self._fire_event(TrainingEvents.TRAINING_COMPLETED)\n time_taken = time.time() - start_time\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(TrainingEvents.EXCEPTION_RAISED)\n raise e\n", "path": "ignite/trainer/trainer.py"}], "after_files": [{"content": "from __future__ import division\n\nimport logging\nimport time\nfrom collections import Iterable\n\nfrom enum import Enum\nfrom torch.autograd import Variable\nfrom ignite.trainer.history import History\n\n__all__ = [\"TrainingEvents\", \"Trainer\", \"create_supervised\"]\n\n\nclass TrainingEvents(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n TRAINING_EPOCH_STARTED = \"training_epoch_started\"\n TRAINING_EPOCH_COMPLETED = \"training_epoch_completed\"\n VALIDATION_STARTING = \"validation_starting\"\n VALIDATION_COMPLETED = \"validation_completed\"\n TRAINING_STARTED = \"training_started\"\n TRAINING_COMPLETED = \"training_completed\"\n TRAINING_ITERATION_STARTED = \"training_iteration_started\"\n TRAINING_ITERATION_COMPLETED = \"training_iteration_completed\"\n VALIDATION_ITERATION_STARTED = \"validation_iteration_started\"\n VALIDATION_ITERATION_COMPLETED = \"validation_iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\ndef _to_hours_mins_secs(time_taken):\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n\n\nclass Trainer(object):\n \"\"\"\n Generic trainer class.\n\n Training update and validation functions receive batches of data and return values which will\n be stored in the `training_history` and `validation_history`. The trainer defines multiple\n events in `TrainingEvents` for which the user can attach event handlers to. The events get\n passed the trainer, so they can access the training/validation history\n\n\n Parameters\n ----------\n training_update_function : callable\n Update function receiving the current training batch in each iteration\n\n validation_inference_function : callable\n Function receiving data and performing a feed forward without update\n \"\"\"\n\n def __init__(self, training_update_function, validation_inference_function=None):\n\n self._logger = self._get_logger()\n self._training_update_function = training_update_function\n self._validation_inference_function = validation_inference_function\n self._event_handlers = {}\n\n self.training_history = History()\n self.validation_history = History()\n self.current_iteration = 0\n self.current_validation_iteration = 0\n self.current_epoch = 0\n self.max_epochs = 0\n self.should_terminate = False\n\n def _get_logger(self):\n logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n logger.addHandler(logging.NullHandler())\n return logger\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"\n Add an event handler to be executed when the specified event is fired\n\n Parameters\n ----------\n event_name: enum\n event from ignite.trainer.TrainingEvents to attach the\n handler to\n handler: Callable\n the callable event handler that should be invoked\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n if event_name not in TrainingEvents.__members__.values():\n self._logger.error(\"attempt to add event handler to non-existent event %s \",\n event_name)\n raise ValueError(\"Event {} not a valid training event\".format(event_name))\n\n if event_name not in self._event_handlers.keys():\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event % \", event_name)\n\n def _fire_event(self, event_name):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *args, **kwargs)\n\n def _train_one_epoch(self, training_data):\n self._fire_event(TrainingEvents.TRAINING_EPOCH_STARTED)\n start_time = time.time()\n\n self.epoch_losses = []\n for _, batch in enumerate(training_data, 1):\n self._fire_event(TrainingEvents.TRAINING_ITERATION_STARTED)\n\n training_step_result = self._training_update_function(batch)\n if training_step_result is not None:\n self.training_history.append(training_step_result)\n\n self.current_iteration += 1\n\n self._fire_event(TrainingEvents.TRAINING_ITERATION_COMPLETED)\n if self.should_terminate:\n return\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.current_epoch, hours,\n mins, secs)\n\n self._fire_event(TrainingEvents.TRAINING_EPOCH_COMPLETED)\n\n def validate(self, validation_data):\n \"\"\" Evaluates the validation set\"\"\"\n if self._validation_inference_function is None:\n raise ValueError(\"Trainer must have a validation_inference_function in order to validate\")\n\n self.current_validation_iteration = 0\n self._fire_event(TrainingEvents.VALIDATION_STARTING)\n start_time = time.time()\n\n for _, batch in enumerate(validation_data, 1):\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_STARTED)\n validation_step_result = self._validation_inference_function(batch)\n if validation_step_result is not None:\n self.validation_history.append(validation_step_result)\n\n self.current_validation_iteration += 1\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Validation Complete. Time taken: %02d:%02d:%02d\", hours, mins, secs)\n\n self._fire_event(TrainingEvents.VALIDATION_COMPLETED)\n\n def terminate(self):\n \"\"\"\n Sends terminate signal to trainer, so that training terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled to trainer. \" +\n \"Training will stop after current iteration is finished\")\n self.should_terminate = True\n\n def run(self, training_data, max_epochs=1):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n training_data : Iterable\n Collection of training batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs: int, optional\n max epochs to train for [default=1]\n\n Returns\n -------\n None\n \"\"\"\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n\n self.max_epochs = max_epochs\n\n start_time = time.time()\n\n self._fire_event(TrainingEvents.TRAINING_STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n self._fire_event(TrainingEvents.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n\n self._fire_event(TrainingEvents.EPOCH_COMPLETED)\n self.current_epoch += 1\n\n self._fire_event(TrainingEvents.TRAINING_COMPLETED)\n time_taken = time.time() - start_time\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(TrainingEvents.EXCEPTION_RAISED)\n raise e\n\n\ndef create_supervised(model, optimizer, loss_fn, cuda=False):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (torch.nn.Module): the model to train\n optimizer (torch.optim.Optimizer): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n cuda (bool, optional): whether or not to transfer batch to GPU (default: False)\n\n Returns:\n Trainer: a trainer instance with supervised update and inference functions\n \"\"\"\n def _prepare_batch(batch, volatile=False):\n x, y = batch\n if cuda:\n x, y = x.cuda(), y.cuda()\n return Variable(x, volatile=volatile), Variable(y, volatile=volatile)\n\n def _update(batch):\n model.train()\n optimizer.zero_grad()\n x, y = _prepare_batch(batch)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.data.cpu()[0]\n\n def _inference(batch):\n model.eval()\n x, y = _prepare_batch(batch, volatile=True)\n y_pred = model(x)\n return y_pred.data.cpu(), y.data.cpu()\n\n return Trainer(_update, _inference)\n", "path": "ignite/trainer/trainer.py"}]}
2,532
470
gh_patches_debug_8615
rasdani/github-patches
git_diff
secdev__scapy-373
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- unknown.version in egg.info and in the banner Hi, I'm porting 2.3.3 to OpenBSD. I had two issues: 1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info' I patched setup.py to hardcode the version then it worked ``` --- setup.py.orig Tue Oct 18 10:44:43 2016 +++ setup.py Mon Oct 31 17:19:45 2016 @@ -47,7 +47,7 @@ if os.name == "nt": setup( name='scapy', - version=__import__('scapy').VERSION, + version='2.3.3', packages=[ 'scapy', 'scapy/arch', ``` I now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info 2. running scapy it says "Welcome to Scapy (unknown.version)" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/__init__.py` Content: ``` 1 ## This file is part of Scapy 2 ## See http://www.secdev.org/projects/scapy for more informations 3 ## Copyright (C) Philippe Biondi <[email protected]> 4 ## This program is published under a GPLv2 license 5 6 """ 7 Scapy: create, send, sniff, dissect and manipulate network packets. 8 9 Usable either from an interactive console or as a Python library. 10 http://www.secdev.org/projects/scapy 11 """ 12 13 import os 14 import re 15 import subprocess 16 17 18 _SCAPY_PKG_DIR = os.path.dirname(__file__) 19 20 def _version_from_git_describe(): 21 """ 22 Read the version from ``git describe``. It returns the latest tag with an 23 optional suffix if the current directory is not exactly on the tag. 24 25 Example:: 26 27 $ git describe --always 28 v2.3.2-346-g164a52c075c8 29 30 The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are 31 removed if present. 32 33 If the current directory is not exactly on the tag, a ``.devN`` suffix is 34 appended where N is the number of commits made after the last tag. 35 36 Example:: 37 38 >>> _version_from_git_describe() 39 '2.3.2.dev346' 40 """ 41 p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR, 42 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 43 44 out, err = p.communicate() 45 46 if p.returncode == 0: 47 tag = out.strip() 48 match = re.match(r'^v?(.+?)-(\d+)-g[a-f0-9]+$', tag) 49 if match: 50 # remove the 'v' prefix and add a '.devN' suffix 51 return '%s.dev%s' % (match.group(1), match.group(2)) 52 else: 53 # just remove the 'v' prefix 54 return re.sub(r'^v', '', tag) 55 else: 56 raise subprocess.CalledProcessError(p.returncode, err) 57 58 def _version(): 59 version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION') 60 try: 61 tag = _version_from_git_describe() 62 # successfully read the tag from git, write it in VERSION for 63 # installation and/or archive generation. 64 with open(version_file, 'w') as f: 65 f.write(tag) 66 return tag 67 except: 68 # failed to read the tag from git, try to read it from a VERSION file 69 try: 70 with open(version_file, 'r') as f: 71 tag = f.read() 72 return tag 73 except: 74 return 'unknown.version' 75 76 VERSION = _version() 77 78 if __name__ == "__main__": 79 from scapy.main import interact 80 interact() 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/__init__.py b/scapy/__init__.py --- a/scapy/__init__.py +++ b/scapy/__init__.py @@ -71,7 +71,17 @@ tag = f.read() return tag except: - return 'unknown.version' + # Rely on git archive "export-subst" git attribute. + # See 'man gitattributes' for more details. + git_archive_id = '$Format:%h %d$' + sha1 = git_archive_id.strip().split()[0] + match = re.search(r'tag:(\S+)', git_archive_id) + if match: + return match.group(1) + elif sha1: + return sha1 + else: + return 'unknown.version' VERSION = _version()
{"golden_diff": "diff --git a/scapy/__init__.py b/scapy/__init__.py\n--- a/scapy/__init__.py\n+++ b/scapy/__init__.py\n@@ -71,7 +71,17 @@\n tag = f.read()\n return tag\n except:\n- return 'unknown.version'\n+ # Rely on git archive \"export-subst\" git attribute.\n+ # See 'man gitattributes' for more details.\n+ git_archive_id = '$Format:%h %d$'\n+ sha1 = git_archive_id.strip().split()[0]\n+ match = re.search(r'tag:(\\S+)', git_archive_id)\n+ if match:\n+ return match.group(1)\n+ elif sha1:\n+ return sha1\n+ else:\n+ return 'unknown.version'\n \n VERSION = _version()\n", "issue": "unknown.version in egg.info and in the banner\nHi,\r\n\r\nI'm porting 2.3.3 to OpenBSD. I had two issues:\r\n1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'\r\n\r\nI patched setup.py to hardcode the version then it worked\r\n```\r\n--- setup.py.orig Tue Oct 18 10:44:43 2016\r\n+++ setup.py Mon Oct 31 17:19:45 2016\r\n@@ -47,7 +47,7 @@ if os.name == \"nt\":\r\n \r\n setup(\r\n name='scapy',\r\n- version=__import__('scapy').VERSION,\r\n+ version='2.3.3',\r\n packages=[\r\n 'scapy',\r\n 'scapy/arch',\r\n\r\n```\r\nI now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info\r\n\r\n2. running scapy it says \"Welcome to Scapy (unknown.version)\" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}], "after_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n # Rely on git archive \"export-subst\" git attribute.\n # See 'man gitattributes' for more details.\n git_archive_id = '$Format:%h %d$'\n sha1 = git_archive_id.strip().split()[0]\n match = re.search(r'tag:(\\S+)', git_archive_id)\n if match:\n return match.group(1)\n elif sha1:\n return sha1\n else:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}]}
1,313
186
gh_patches_debug_24772
rasdani/github-patches
git_diff
Flexget__Flexget-548
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [add] jinja split: Adds split into jinja filters Sorry about this, I found the solution --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/input/trakt_emit.py` Content: ``` 1 from __future__ import unicode_literals, division, absolute_import 2 import hashlib 3 import logging 4 from urlparse import urljoin 5 6 from requests import RequestException 7 8 from flexget import plugin 9 from flexget.entry import Entry 10 from flexget.event import event 11 from flexget.utils import json 12 from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url 13 14 log = logging.getLogger('trakt_emit') 15 16 17 class TraktEmit(object): 18 """ 19 Creates an entry for the latest or the next item in your watched or collected 20 episodes in your trakt account. 21 22 Syntax: 23 24 trakt_emit: 25 username: <value> 26 position: <last|next> 27 context: <collect|collected|watch|watched> 28 list: <value> 29 30 Options username, password and api_key are required. 31 32 """ 33 34 schema = { 35 'type': 'object', 36 'properties': { 37 'username': {'type': 'string'}, 38 'password': {'type': 'string'}, 39 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'}, 40 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'}, 41 'list': {'type': 'string'} 42 }, 43 'required': ['username'], 44 'additionalProperties': False 45 } 46 47 def on_task_input(self, task, config): 48 session = get_session(config['username'], config.get('password')) 49 listed_series = {} 50 if config.get('list'): 51 url = urljoin(API_URL, 'users/%s/' % config['username']) 52 if config['list'] in ['collection', 'watchlist', 'watched']: 53 url = urljoin(url, '%s/shows' % config['list']) 54 else: 55 url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list'])) 56 try: 57 data = session.get(url).json() 58 except RequestException as e: 59 raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e)) 60 if not data: 61 log.warning('The list "%s" is empty.' % config['list']) 62 return 63 for item in data: 64 if item['show'] is not None: 65 if not item['show']['title']: 66 # Seems we can get entries with a blank show title sometimes 67 log.warning('Found trakt list show with no series name.') 68 continue 69 trakt_id = item['show']['ids']['trakt'] 70 listed_series[trakt_id] = { 71 'series_name': item['show']['title'], 72 'trakt_id': trakt_id, 73 'tvdb_id': item['show']['ids']['tvdb']} 74 context = config['context'] 75 if context == 'collected': 76 context = 'collection' 77 entries = [] 78 for trakt_id, fields in listed_series.iteritems(): 79 url = get_api_url('shows', trakt_id, 'progress', context) 80 try: 81 data = session.get(url).json() 82 except RequestException as e: 83 raise plugin.PluginError('TODO: error message') 84 if config['position'] == 'next' and data.get('next_episode'): 85 # If the next episode is already in the trakt database, we'll get it here 86 eps = data['next_episode']['season'] 87 epn = data['next_episode']['number'] 88 else: 89 # If we need last ep, or next_episode was not provided, search for last ep 90 for seas in reversed(data['seasons']): 91 # Find the first season with collected/watched episodes 92 if seas['completed'] > 0: 93 eps = seas['number'] 94 # Pick the highest collected/watched episode 95 epn = max(item['number'] for item in seas['episodes'] if item['completed']) 96 # If we are in next episode mode, we have to increment this number 97 if config['position'] == 'next': 98 if seas['completed'] >= seas['aired']: 99 # TODO: next_episode doesn't count unaired episodes right now, this will skip to next 100 # season too early when there are episodes left to air this season. 101 eps += 1 102 epn = 1 103 else: 104 epn += 1 105 break 106 if eps and epn: 107 entry = self.make_entry(fields, eps, epn) 108 entries.append(entry) 109 return entries 110 111 def make_entry(self, fields, season, episode): 112 entry = Entry() 113 entry.update(fields) 114 entry['series_season'] = season 115 entry['series_episode'] = episode 116 entry['series_id_type'] = 'ep' 117 entry['series_id'] = 'S%02dE%02d' % (season, episode) 118 entry['title'] = entry['series_name'] + ' ' + entry['series_id'] 119 entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode) 120 return entry 121 122 123 @event('plugin.register') 124 def register_plugin(): 125 plugin.register(TraktEmit, 'trakt_emit', api_ver=2) 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py --- a/flexget/plugins/input/trakt_emit.py +++ b/flexget/plugins/input/trakt_emit.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals, division, absolute_import -import hashlib import logging from urlparse import urljoin @@ -8,7 +7,6 @@ from flexget import plugin from flexget.entry import Entry from flexget.event import event -from flexget.utils import json from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url log = logging.getLogger('trakt_emit') @@ -103,6 +101,12 @@ else: epn += 1 break + else: + if config['position'] == 'next': + eps = epn = 1 + else: + # There were no watched/collected episodes, nothing to emit in 'last' mode + continue if eps and epn: entry = self.make_entry(fields, eps, epn) entries.append(entry)
{"golden_diff": "diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py\n--- a/flexget/plugins/input/trakt_emit.py\n+++ b/flexget/plugins/input/trakt_emit.py\n@@ -1,5 +1,4 @@\n from __future__ import unicode_literals, division, absolute_import\n-import hashlib\n import logging\n from urlparse import urljoin\n \n@@ -8,7 +7,6 @@\n from flexget import plugin\n from flexget.entry import Entry\n from flexget.event import event\n-from flexget.utils import json\n from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n \n log = logging.getLogger('trakt_emit')\n@@ -103,6 +101,12 @@\n else:\n epn += 1\n break\n+ else:\n+ if config['position'] == 'next':\n+ eps = epn = 1\n+ else:\n+ # There were no watched/collected episodes, nothing to emit in 'last' mode\n+ continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n", "issue": "[add] jinja split: Adds split into jinja filters\nSorry about this, I found the solution\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport hashlib\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n else:\n if config['position'] == 'next':\n eps = epn = 1\n else:\n # There were no watched/collected episodes, nothing to emit in 'last' mode\n continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}]}
1,697
257
gh_patches_debug_918
rasdani/github-patches
git_diff
vas3k__vas3k.club-260
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Сломался check_PR экшн на новые пуллреквесты Вот здесь все пошло не так после пары изменений в requirements и докерфайлах: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml Из-за этого все новые пуллреквесты красненькие и мержить их приходится только суровой админской рукой. Надо бы переосмыслить этот CI как-нибудь. У кого есть идеи? По сути мне важны линтеры и чтобы докер с новым кодом успешно поднимался. Остального пока нет. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `utils/images.py` Content: ``` 1 import io 2 import logging 3 import os 4 from urllib.parse import urlparse 5 6 import requests 7 from PIL import Image 8 from django.conf import settings 9 10 log = logging.getLogger(__name__) 11 12 13 def upload_image_bytes( 14 filename, data, resize=(192, 192), convert_to=None, quality=None 15 ): 16 if not data: 17 return None 18 19 if resize: 20 try: 21 image = Image.open(data) 22 except Exception as ex: 23 log.warning(f"Bad image data: {ex}") 24 return None 25 26 image.thumbnail(resize) 27 saved_image = io.BytesIO() 28 saved_image.name = filename 29 30 try: 31 image.save(saved_image) 32 except OSError: 33 log.warning(f"Error saving image data: {ex}") 34 return None 35 36 data = saved_image.getvalue() 37 38 upload_params = { 39 "code": settings.MEDIA_UPLOAD_CODE 40 } 41 42 if convert_to: 43 upload_params["convert_to"] = convert_to 44 45 if quality: 46 upload_params["quality"] = quality 47 48 try: 49 uploaded = requests.post( 50 url=settings.MEDIA_UPLOAD_URL, 51 params=upload_params, 52 files={"media": (filename, data)}, 53 ) 54 except requests.exceptions.RequestException as ex: 55 log.error(f"Image upload error: {ex}") 56 return None 57 58 if 200 <= uploaded.status_code <= 299: 59 try: 60 response_data = uploaded.json() 61 except Exception as ex: 62 log.error(f"Image upload error: {ex} ({uploaded.content})") 63 return None 64 65 return response_data["uploaded"][0] 66 67 return None 68 69 70 def upload_image_from_url(url, resize=(192, 192), convert_to="jpg", quality=90): 71 if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE: 72 return url 73 74 if not url: 75 return None 76 77 image_name = os.path.basename(urlparse(url).path) 78 if "." not in image_name: 79 image_name += ".jpg" 80 81 try: 82 image_data = io.BytesIO(requests.get(url).content) 83 except requests.exceptions.RequestException: 84 return None 85 86 return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/utils/images.py b/utils/images.py --- a/utils/images.py +++ b/utils/images.py @@ -29,7 +29,7 @@ try: image.save(saved_image) - except OSError: + except OSError as ex: log.warning(f"Error saving image data: {ex}") return None
{"golden_diff": "diff --git a/utils/images.py b/utils/images.py\n--- a/utils/images.py\n+++ b/utils/images.py\n@@ -29,7 +29,7 @@\n \n try:\n image.save(saved_image)\n- except OSError:\n+ except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n", "issue": "\u0421\u043b\u043e\u043c\u0430\u043b\u0441\u044f check_PR \u044d\u043a\u0448\u043d \u043d\u0430 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b\n\u0412\u043e\u0442 \u0437\u0434\u0435\u0441\u044c \u0432\u0441\u0435 \u043f\u043e\u0448\u043b\u043e \u043d\u0435 \u0442\u0430\u043a \u043f\u043e\u0441\u043b\u0435 \u043f\u0430\u0440\u044b \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0432 requirements \u0438 \u0434\u043e\u043a\u0435\u0440\u0444\u0430\u0439\u043b\u0430\u0445: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml\r\n\r\n\u0418\u0437-\u0437\u0430 \u044d\u0442\u043e\u0433\u043e \u0432\u0441\u0435 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b \u043a\u0440\u0430\u0441\u043d\u0435\u043d\u044c\u043a\u0438\u0435 \u0438 \u043c\u0435\u0440\u0436\u0438\u0442\u044c \u0438\u0445 \u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0441\u0443\u0440\u043e\u0432\u043e\u0439 \u0430\u0434\u043c\u0438\u043d\u0441\u043a\u043e\u0439 \u0440\u0443\u043a\u043e\u0439. \u041d\u0430\u0434\u043e \u0431\u044b \u043f\u0435\u0440\u0435\u043e\u0441\u043c\u044b\u0441\u043b\u0438\u0442\u044c \u044d\u0442\u043e\u0442 CI \u043a\u0430\u043a-\u043d\u0438\u0431\u0443\u0434\u044c. \u0423 \u043a\u043e\u0433\u043e \u0435\u0441\u0442\u044c \u0438\u0434\u0435\u0438?\r\n\r\n\u041f\u043e \u0441\u0443\u0442\u0438 \u043c\u043d\u0435 \u0432\u0430\u0436\u043d\u044b \u043b\u0438\u043d\u0442\u0435\u0440\u044b \u0438 \u0447\u0442\u043e\u0431\u044b \u0434\u043e\u043a\u0435\u0440 \u0441 \u043d\u043e\u0432\u044b\u043c \u043a\u043e\u0434\u043e\u043c \u0443\u0441\u043f\u0435\u0448\u043d\u043e \u043f\u043e\u0434\u043d\u0438\u043c\u0430\u043b\u0441\u044f. \u041e\u0441\u0442\u0430\u043b\u044c\u043d\u043e\u0433\u043e \u043f\u043e\u043a\u0430 \u043d\u0435\u0442.\n", "before_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}], "after_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}]}
1,075
75
gh_patches_debug_3052
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-1486
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The type of `n_gram` is mislabeled as bool, which should be int type. ## 🐛 Bug In Translation Task: The type of `n_gram` is mislabeled as bool, which should be int type. ### To Reproduce <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> ``` flash translation from_hf_datasets --help ``` The error raised: ``` translation: error: Configuration check failed :: Parser key "model.n_gram": Expected a <class 'bool'> but got "4" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flash/text/seq2seq/translation/model.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Any, Dict, Optional, Union 15 16 from torchmetrics import BLEUScore 17 18 from flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0 19 from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE 20 from flash.text.seq2seq.core.model import Seq2SeqTask 21 22 23 class TranslationTask(Seq2SeqTask): 24 """The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see 25 :ref:`translation`. 26 27 You can change the backbone to any translation model from `HuggingFace/transformers 28 <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument. 29 30 Args: 31 backbone: backbone model to use for the task. 32 max_source_length: The maximum length to pad / truncate input sequences to. 33 max_target_length: The maximum length to pad / truncate target sequences to. 34 padding: The type of padding to apply. One of: "longest" or ``True``, "max_length", "do_not_pad" or 35 ``False``. 36 loss_fn: Loss function for training. 37 optimizer: Optimizer to use for training. 38 lr_scheduler: The LR scheduler to use during training. 39 metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric. 40 Changing this argument currently has no effect. 41 learning_rate: Learning rate to use for training, defaults to `1e-5` 42 num_beams: Number of beams to use in validation when generating predictions. Defaults to `4` 43 n_gram: Maximum n_grams to use in metric calculation. Defaults to `4` 44 smooth: Apply smoothing in BLEU calculation. Defaults to `True` 45 enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training 46 """ 47 48 def __init__( 49 self, 50 backbone: str = "t5-small", 51 tokenizer_kwargs: Optional[Dict[str, Any]] = None, 52 max_source_length: int = 128, 53 max_target_length: int = 128, 54 padding: Union[str, bool] = "max_length", 55 loss_fn: LOSS_FN_TYPE = None, 56 optimizer: OPTIMIZER_TYPE = "Adam", 57 lr_scheduler: LR_SCHEDULER_TYPE = None, 58 metrics: METRICS_TYPE = None, 59 learning_rate: Optional[float] = None, 60 num_beams: Optional[int] = 4, 61 n_gram: bool = 4, 62 smooth: bool = True, 63 enable_ort: bool = False, 64 ): 65 self.save_hyperparameters() 66 super().__init__( 67 backbone=backbone, 68 tokenizer_kwargs=tokenizer_kwargs, 69 max_source_length=max_source_length, 70 max_target_length=max_target_length, 71 padding=padding, 72 loss_fn=loss_fn, 73 optimizer=optimizer, 74 lr_scheduler=lr_scheduler, 75 metrics=metrics, 76 learning_rate=learning_rate, 77 num_beams=num_beams, 78 enable_ort=enable_ort, 79 ) 80 self.bleu = BLEUScore( 81 n_gram=n_gram, 82 smooth=smooth, 83 ) 84 85 @property 86 def task(self) -> str: 87 return "translation" 88 89 def compute_metrics(self, generated_tokens, batch, prefix): 90 reference_corpus = self.decode(batch["labels"]) 91 # wrap targets in list as score expects a list of potential references 92 reference_corpus = [[reference] for reference in reference_corpus] 93 94 translate_corpus = self.decode(generated_tokens) 95 translate_corpus = [line for line in translate_corpus] 96 97 if _TM_GREATER_EQUAL_0_7_0: 98 result = self.bleu(translate_corpus, reference_corpus) 99 else: 100 result = self.bleu(reference_corpus, translate_corpus) 101 self.log(f"{prefix}_bleu_score", result, on_step=False, on_epoch=True, prog_bar=True) 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py --- a/flash/text/seq2seq/translation/model.py +++ b/flash/text/seq2seq/translation/model.py @@ -58,7 +58,7 @@ metrics: METRICS_TYPE = None, learning_rate: Optional[float] = None, num_beams: Optional[int] = 4, - n_gram: bool = 4, + n_gram: int = 4, smooth: bool = True, enable_ort: bool = False, ):
{"golden_diff": "diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py\n--- a/flash/text/seq2seq/translation/model.py\n+++ b/flash/text/seq2seq/translation/model.py\n@@ -58,7 +58,7 @@\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n- n_gram: bool = 4,\n+ n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n", "issue": "The type of `n_gram` is mislabeled as bool, which should be int type.\n## \ud83d\udc1b Bug\r\n\r\nIn Translation Task:\r\nThe type of `n_gram` is mislabeled as bool, which should be int type.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nflash translation from_hf_datasets --help\r\n```\r\nThe error raised:\r\n```\r\ntranslation: error: Configuration check failed :: Parser key \"model.n_gram\": Expected a <class 'bool'> but got \"4\"\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: bool = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}]}
1,581
139
gh_patches_debug_28402
rasdani/github-patches
git_diff
dask__distributed-416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Distributed.joblib code fails to affect sklearn The comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect. It would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/joblib.py` Content: ``` 1 from __future__ import print_function, division, absolute_import 2 3 from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin 4 from joblib.parallel import register_parallel_backend 5 from tornado import gen 6 7 from .executor import Executor, _wait 8 9 10 class DistributedBackend(ParallelBackendBase, AutoBatchingMixin): 11 MIN_IDEAL_BATCH_DURATION = 0.2 12 MAX_IDEAL_BATCH_DURATION = 1.0 13 14 def __init__(self, scheduler_host='127.0.0.1:8786', loop=None): 15 self.executor = Executor(scheduler_host, loop=loop) 16 self.futures = set() 17 18 def configure(self, n_jobs=1, parallel=None, **backend_args): 19 return self.effective_n_jobs(n_jobs) 20 21 def effective_n_jobs(self, n_jobs=1): 22 return sum(self.executor.ncores().values()) 23 24 def apply_async(self, func, *args, **kwargs): 25 callback = kwargs.pop('callback', None) 26 kwargs['pure'] = False 27 future = self.executor.submit(func, *args, **kwargs) 28 self.futures.add(future) 29 30 @gen.coroutine 31 def callback_wrapper(): 32 result = yield _wait([future]) 33 self.futures.remove(future) 34 callback(result) # gets called in separate thread 35 36 self.executor.loop.add_callback(callback_wrapper) 37 38 future.get = future.result # monkey patch to achieve AsyncResult API 39 return future 40 41 def abort_everything(self, ensure_ready=True): 42 # Tell the executor to cancel any task submitted via this instance 43 # as joblib.Parallel will never access those results. 44 self.executor.cancel(self.futures) 45 self.futures.clear() 46 47 48 register_parallel_backend('distributed', DistributedBackend) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/joblib.py b/distributed/joblib.py --- a/distributed/joblib.py +++ b/distributed/joblib.py @@ -1,10 +1,36 @@ from __future__ import print_function, division, absolute_import -from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin -from joblib.parallel import register_parallel_backend +from distutils.version import LooseVersion + from tornado import gen from .executor import Executor, _wait +from .utils import ignoring + + +# A user could have installed joblib, sklearn, both, or neither. Further, only +# joblib >= 0.10.0 supports backends, so we also need to check for that. This +# bit of logic is to ensure that we create and register the backend for all +# viable installations of joblib. +joblib = sk_joblib = None +with ignoring(ImportError): + import joblib + if LooseVersion(joblib.__version__) < '0.10.0': + joblib = None +with ignoring(ImportError): + import sklearn.externals.joblib as sk_joblib + if LooseVersion(sk_joblib.__version__) < '0.10.0': + sk_joblib = None + +if joblib: + from joblib._parallel_backends import (ParallelBackendBase, + AutoBatchingMixin) +elif sk_joblib: + from sklearn.externals.joblib._parallel_backends import ( + ParallelBackendBase, AutoBatchingMixin) +else: + raise RuntimeError("Joblib backend requires either `joblib` >= '0.10.0' " + " or `sklearn` > '0.17.1'. Please install or upgrade") class DistributedBackend(ParallelBackendBase, AutoBatchingMixin): @@ -45,4 +71,8 @@ self.futures.clear() -register_parallel_backend('distributed', DistributedBackend) +# Register the backend with any available versions of joblib +if joblib: + joblib.register_parallel_backend('distributed', DistributedBackend) +if sk_joblib: + sk_joblib.register_parallel_backend('distributed', DistributedBackend)
{"golden_diff": "diff --git a/distributed/joblib.py b/distributed/joblib.py\n--- a/distributed/joblib.py\n+++ b/distributed/joblib.py\n@@ -1,10 +1,36 @@\n from __future__ import print_function, division, absolute_import\n \n-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\n-from joblib.parallel import register_parallel_backend\n+from distutils.version import LooseVersion\n+\n from tornado import gen\n \n from .executor import Executor, _wait\n+from .utils import ignoring\n+\n+\n+# A user could have installed joblib, sklearn, both, or neither. Further, only\n+# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n+# bit of logic is to ensure that we create and register the backend for all\n+# viable installations of joblib.\n+joblib = sk_joblib = None\n+with ignoring(ImportError):\n+ import joblib\n+ if LooseVersion(joblib.__version__) < '0.10.0':\n+ joblib = None\n+with ignoring(ImportError):\n+ import sklearn.externals.joblib as sk_joblib\n+ if LooseVersion(sk_joblib.__version__) < '0.10.0':\n+ sk_joblib = None\n+\n+if joblib:\n+ from joblib._parallel_backends import (ParallelBackendBase,\n+ AutoBatchingMixin)\n+elif sk_joblib:\n+ from sklearn.externals.joblib._parallel_backends import (\n+ ParallelBackendBase, AutoBatchingMixin)\n+else:\n+ raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n+ \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n \n \n class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n@@ -45,4 +71,8 @@\n self.futures.clear()\n \n \n-register_parallel_backend('distributed', DistributedBackend)\n+# Register the backend with any available versions of joblib\n+if joblib:\n+ joblib.register_parallel_backend('distributed', DistributedBackend)\n+if sk_joblib:\n+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "issue": "Distributed.joblib code fails to affect sklearn\nThe comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.\n\nIt would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\nfrom joblib.parallel import register_parallel_backend\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\nregister_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom distutils.version import LooseVersion\n\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\nfrom .utils import ignoring\n\n\n# A user could have installed joblib, sklearn, both, or neither. Further, only\n# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n# bit of logic is to ensure that we create and register the backend for all\n# viable installations of joblib.\njoblib = sk_joblib = None\nwith ignoring(ImportError):\n import joblib\n if LooseVersion(joblib.__version__) < '0.10.0':\n joblib = None\nwith ignoring(ImportError):\n import sklearn.externals.joblib as sk_joblib\n if LooseVersion(sk_joblib.__version__) < '0.10.0':\n sk_joblib = None\n\nif joblib:\n from joblib._parallel_backends import (ParallelBackendBase,\n AutoBatchingMixin)\nelif sk_joblib:\n from sklearn.externals.joblib._parallel_backends import (\n ParallelBackendBase, AutoBatchingMixin)\nelse:\n raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\n# Register the backend with any available versions of joblib\nif joblib:\n joblib.register_parallel_backend('distributed', DistributedBackend)\nif sk_joblib:\n sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}]}
887
483
gh_patches_debug_60682
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-2076
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Login page: change spacing on left panel The spacing in the left panel is odd. Change to something like the below: ![image](https://cloud.githubusercontent.com/assets/1654485/5451121/6d3fa152-850d-11e4-87dd-4485c2f767da.png) Note, this will stay in backlog for now as we may want to revise this page to align with the Frog design. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckanext-hdx_theme/ckanext/hdx_theme/version.py` Content: ``` 1 hdx_version = 'v0.5.13' 2 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.5.13' +hdx_version = 'v0.5.15'
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.15'\n", "issue": "Login page: change spacing on left panel \nThe spacing in the left panel is odd. Change to something like the below: \n\n![image](https://cloud.githubusercontent.com/assets/1654485/5451121/6d3fa152-850d-11e4-87dd-4485c2f767da.png)\n\nNote, this will stay in backlog for now as we may want to revise this page to align with the Frog design.\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}], "after_files": [{"content": "hdx_version = 'v0.5.15'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
396
108
gh_patches_debug_10301
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1157
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update /availability API to pull from new registry ### Issue Description The [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP. ### Acceptance Criteria - [x] The checkDomain method is used to check that a domain is in the registry or not and - [x] The check is used at /availability - [x] Tests are implemented and/or updated - [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well. ### Additional Context (optional) Once done, the backend work of #476 and frontend work of #561 can be completed. _Consider add-on to defend the endpoint from bot spam._ ### Implementation Notes Epp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint. ### Issue Links Blocking #476 and #561 Blocked by #1028 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/api/views.py` Content: ``` 1 """Internal API views""" 2 from django.apps import apps 3 from django.views.decorators.http import require_http_methods 4 from django.http import JsonResponse 5 6 from django.contrib.auth.decorators import login_required 7 8 import requests 9 10 from cachetools.func import ttl_cache 11 12 13 DOMAIN_FILE_URL = ( 14 "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv" 15 ) 16 17 18 DOMAIN_API_MESSAGES = { 19 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”" 20 " For example, if you want www.city.gov, you would enter “city”" 21 " (without the quotes).", 22 "extra_dots": "Enter the .gov domain you want without any periods.", 23 "unavailable": "That domain isn’t available. Try entering another one." 24 " Contact us if you need help coming up with a domain.", 25 "invalid": "Enter a domain using only letters," 26 " numbers, or hyphens (though we don't recommend using hyphens).", 27 "success": "That domain is available!", 28 } 29 30 31 # this file doesn't change that often, nor is it that big, so cache the result 32 # in memory for ten minutes 33 @ttl_cache(ttl=600) 34 def _domains(): 35 """Return a list of the current .gov domains. 36 37 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain, 38 lowercase everything and return the list. 39 """ 40 DraftDomain = apps.get_model("registrar.DraftDomain") 41 # 5 second timeout 42 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text 43 domains = set() 44 # skip the first line 45 for line in file_contents.splitlines()[1:]: 46 # get the domain before the first comma 47 domain = line.split(",", 1)[0] 48 # sanity-check the string we got from the file here 49 if DraftDomain.string_could_be_domain(domain): 50 # lowercase everything when we put it in domains 51 domains.add(domain.lower()) 52 return domains 53 54 55 def in_domains(domain): 56 """Return true if the given domain is in the domains list. 57 58 The given domain is lowercased to match against the domains list. If the 59 given domain doesn't end with .gov, ".gov" is added when looking for 60 a match. 61 """ 62 domain = domain.lower() 63 if domain.endswith(".gov"): 64 return domain.lower() in _domains() 65 else: 66 # domain search string doesn't end with .gov, add it on here 67 return (domain + ".gov") in _domains() 68 69 70 @require_http_methods(["GET"]) 71 @login_required 72 def available(request, domain=""): 73 """Is a given domain available or not. 74 75 Response is a JSON dictionary with the key "available" and value true or 76 false. 77 """ 78 DraftDomain = apps.get_model("registrar.DraftDomain") 79 # validate that the given domain could be a domain name and fail early if 80 # not. 81 if not ( 82 DraftDomain.string_could_be_domain(domain) 83 or DraftDomain.string_could_be_domain(domain + ".gov") 84 ): 85 return JsonResponse( 86 {"available": False, "message": DOMAIN_API_MESSAGES["invalid"]} 87 ) 88 # a domain is available if it is NOT in the list of current domains 89 if in_domains(domain): 90 return JsonResponse( 91 {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]} 92 ) 93 else: 94 return JsonResponse( 95 {"available": True, "message": DOMAIN_API_MESSAGES["success"]} 96 ) 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/api/views.py b/src/api/views.py --- a/src/api/views.py +++ b/src/api/views.py @@ -59,12 +59,12 @@ given domain doesn't end with .gov, ".gov" is added when looking for a match. """ - domain = domain.lower() + Domain = apps.get_model("registrar.Domain") if domain.endswith(".gov"): - return domain.lower() in _domains() + return Domain.available(domain) else: # domain search string doesn't end with .gov, add it on here - return (domain + ".gov") in _domains() + return Domain.available(domain + ".gov") @require_http_methods(["GET"])
{"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -59,12 +59,12 @@\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n- domain = domain.lower()\n+ Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n- return domain.lower() in _domains()\n+ return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n- return (domain + \".gov\") in _domains()\n+ return Domain.available(domain + \".gov\")\n \n \n @require_http_methods([\"GET\"])\n", "issue": "Update /availability API to pull from new registry\n### Issue Description\r\n\r\nThe [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] The checkDomain method is used to check that a domain is in the registry or not\r\n\r\nand\r\n\r\n- [x] The check is used at /availability \r\n- [x] Tests are implemented and/or updated\r\n- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well. \r\n\r\n### Additional Context (optional)\r\n\r\nOnce done, the backend work of #476 and frontend work of #561 can be completed.\r\n\r\n_Consider add-on to defend the endpoint from bot spam._\r\n\r\n### Implementation Notes\r\n\r\nEpp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.\r\n\r\n### Issue Links\r\n\r\nBlocking #476 and #561\r\nBlocked by #1028 \n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n domain = domain.lower()\n if domain.endswith(\".gov\"):\n return domain.lower() in _domains()\n else:\n # domain search string doesn't end with .gov, add it on here\n return (domain + \".gov\") in _domains()\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}], "after_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}]}
1,560
161
gh_patches_debug_27706
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix capabilities bug At the moment the button "Create Page" disapears for every role except the superuser. ![image](https://user-images.githubusercontent.com/15608678/84593767-1dde2000-ae4e-11ea-858b-be5d7bf4dded.png) Fix capabilities bug At the moment the button "Create Page" disapears for every role except the superuser. ![image](https://user-images.githubusercontent.com/15608678/84593767-1dde2000-ae4e-11ea-858b-be5d7bf4dded.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cms/views/pages/page_view.py` Content: ``` 1 """ 2 3 Returns: 4 [type]: [description] 5 """ 6 import logging 7 8 from django.contrib import messages 9 from django.contrib.auth.decorators import login_required 10 from django.contrib.auth.mixins import PermissionRequiredMixin 11 from django.core.exceptions import PermissionDenied 12 from django.shortcuts import render, redirect 13 from django.utils.decorators import method_decorator 14 from django.utils.translation import ugettext as _ 15 from django.views.generic import TemplateView 16 17 from ...constants import status 18 from ...decorators import region_permission_required 19 from ...forms.pages import PageForm, PageTranslationForm 20 from ...models import Page, PageTranslation, Region, Language 21 22 logger = logging.getLogger(__name__) 23 24 25 @method_decorator(login_required, name='dispatch') 26 @method_decorator(region_permission_required, name='dispatch') 27 class PageView(PermissionRequiredMixin, TemplateView): 28 permission_required = 'cms.view_pages' 29 raise_exception = True 30 31 template_name = 'pages/page_form.html' 32 base_context = { 33 'current_menu_item': 'pages', 34 'PUBLIC': status.PUBLIC 35 } 36 37 def get(self, request, *args, **kwargs): 38 39 region = Region.objects.get(slug=kwargs.get('region_slug')) 40 41 language = Language.objects.get(code=kwargs.get('language_code')) 42 43 # get page and translation objects if they exist 44 page = Page.objects.filter(id=kwargs.get('page_id')).first() 45 page_translation = PageTranslation.objects.filter( 46 page=page, 47 language=language, 48 ).first() 49 50 # Make form disabled if user has no permission to edit the page 51 if not request.user.has_perm('cms.edit_page', page): 52 disabled = True 53 messages.warning(request, _("You don't have the permission to edit this page.")) 54 elif page and page.archived: 55 disabled = True 56 messages.warning(request, _("You cannot edit this page because it is archived.")) 57 else: 58 disabled = False 59 60 page_form = PageForm( 61 instance=page, 62 region=region, 63 language=language, 64 disabled=disabled 65 ) 66 page_translation_form = PageTranslationForm( 67 instance=page_translation, 68 disabled=disabled 69 ) 70 71 return render(request, self.template_name, { 72 **self.base_context, 73 'page_form': page_form, 74 'page_translation_form': page_translation_form, 75 'page': page, 76 'language': language, 77 # Languages for tab view 78 'languages': region.languages if page else [language], 79 }) 80 81 # pylint: disable=too-many-branches,unused-argument 82 def post(self, request, *args, **kwargs): 83 84 region = Region.objects.get(slug=kwargs.get('region_slug')) 85 language = Language.objects.get(code=kwargs.get('language_code')) 86 87 page_instance = Page.objects.filter(id=kwargs.get('page_id')).first() 88 page_translation_instance = PageTranslation.objects.filter( 89 page=page_instance, 90 language=language, 91 ).first() 92 93 if not request.user.has_perm('cms.edit_page', page_instance): 94 raise PermissionDenied 95 96 page_form = PageForm( 97 request.POST, 98 instance=page_instance, 99 region=region, 100 language=language, 101 ) 102 page_translation_form = PageTranslationForm( 103 request.POST, 104 instance=page_translation_instance, 105 region=region, 106 language=language, 107 ) 108 109 if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data: 110 if not request.user.has_perm('cms.publish_page', page_instance): 111 raise PermissionDenied 112 113 # TODO: error handling 114 if not page_form.is_valid() or not page_translation_form.is_valid(): 115 messages.error(request, _('Errors have occurred.')) 116 return render(request, self.template_name, { 117 **self.base_context, 118 'page_form': page_form, 119 'page_translation_form': page_translation_form, 120 'page': page_instance, 121 'language': language, 122 # Languages for tab view 123 'languages': region.languages if page_instance else [language], 124 }) 125 126 if not page_form.has_changed() and not page_translation_form.has_changed(): 127 messages.info(request, _('No changes detected.')) 128 return render(request, self.template_name, { 129 **self.base_context, 130 'page_form': page_form, 131 'page_translation_form': page_translation_form, 132 'page': page_instance, 133 'language': language, 134 # Languages for tab view 135 'languages': region.languages if page_instance else [language], 136 }) 137 138 page = page_form.save() 139 page_translation = page_translation_form.save( 140 page=page, 141 user=request.user, 142 ) 143 144 published = page_translation.status == status.PUBLIC 145 if not page_instance: 146 if published: 147 messages.success(request, _('Page was successfully created and published.')) 148 else: 149 messages.success(request, _('Page was successfully created.')) 150 elif not page_translation_instance: 151 if published: 152 messages.success(request, _('Translation was successfully created and published.')) 153 else: 154 messages.success(request, _('Translation was successfully created.')) 155 else: 156 if published: 157 messages.success(request, _('Translation was successfully published.')) 158 else: 159 messages.success(request, _('Translation was successfully saved.')) 160 161 return redirect('edit_page', **{ 162 'page_id': page.id, 163 'region_slug': region.slug, 164 'language_code': language.code, 165 }) 166 ``` Path: `src/cms/rules.py` Content: ``` 1 from rules import add_perm, predicate 2 3 4 # Predicates 5 6 @predicate 7 def is_page_editor(user, page): 8 if not page: 9 return False 10 return user in page.editors.all() 11 12 @predicate 13 def is_page_publisher(user, page): 14 if not page: 15 return False 16 return user in page.publishers.all() 17 18 @predicate 19 # pylint: disable=unused-argument 20 def can_edit_all_pages(user, page): 21 return user.has_perm('edit_pages') 22 23 @predicate 24 # pylint: disable=unused-argument 25 def can_publish_all_pages(user, page): 26 return user.has_perm('publish_pages') 27 28 29 # Permissions 30 31 add_perm('cms.edit_page', can_edit_all_pages | is_page_editor | can_publish_all_pages | is_page_publisher) 32 add_perm('cms.publish_page', can_publish_all_pages | is_page_publisher) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cms/rules.py b/src/cms/rules.py --- a/src/cms/rules.py +++ b/src/cms/rules.py @@ -18,12 +18,12 @@ @predicate # pylint: disable=unused-argument def can_edit_all_pages(user, page): - return user.has_perm('edit_pages') + return user.has_perm('cms.edit_pages') @predicate # pylint: disable=unused-argument def can_publish_all_pages(user, page): - return user.has_perm('publish_pages') + return user.has_perm('cms.publish_pages') # Permissions diff --git a/src/cms/views/pages/page_view.py b/src/cms/views/pages/page_view.py --- a/src/cms/views/pages/page_view.py +++ b/src/cms/views/pages/page_view.py @@ -48,14 +48,17 @@ ).first() # Make form disabled if user has no permission to edit the page - if not request.user.has_perm('cms.edit_page', page): - disabled = True - messages.warning(request, _("You don't have the permission to edit this page.")) - elif page and page.archived: - disabled = True - messages.warning(request, _("You cannot edit this page because it is archived.")) + disabled = False + if page: + if page.archived: + disabled = True + messages.warning(request, _("You cannot edit this page because it is archived.")) + elif not request.user.has_perm('cms.edit_page', page): + disabled = True + messages.warning(request, _("You don't have the permission to edit this page, but you can propose changes and submit them for review instead.")) else: - disabled = False + if not request.user.has_perm('cms.edit_pages'): + raise PermissionDenied page_form = PageForm( instance=page,
{"golden_diff": "diff --git a/src/cms/rules.py b/src/cms/rules.py\n--- a/src/cms/rules.py\n+++ b/src/cms/rules.py\n@@ -18,12 +18,12 @@\n @predicate\n # pylint: disable=unused-argument\n def can_edit_all_pages(user, page):\n- return user.has_perm('edit_pages')\n+ return user.has_perm('cms.edit_pages')\n \n @predicate\n # pylint: disable=unused-argument\n def can_publish_all_pages(user, page):\n- return user.has_perm('publish_pages')\n+ return user.has_perm('cms.publish_pages')\n \n \n # Permissions\ndiff --git a/src/cms/views/pages/page_view.py b/src/cms/views/pages/page_view.py\n--- a/src/cms/views/pages/page_view.py\n+++ b/src/cms/views/pages/page_view.py\n@@ -48,14 +48,17 @@\n ).first()\n \n # Make form disabled if user has no permission to edit the page\n- if not request.user.has_perm('cms.edit_page', page):\n- disabled = True\n- messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n- elif page and page.archived:\n- disabled = True\n- messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n+ disabled = False\n+ if page:\n+ if page.archived:\n+ disabled = True\n+ messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n+ elif not request.user.has_perm('cms.edit_page', page):\n+ disabled = True\n+ messages.warning(request, _(\"You don't have the permission to edit this page, but you can propose changes and submit them for review instead.\"))\n else:\n- disabled = False\n+ if not request.user.has_perm('cms.edit_pages'):\n+ raise PermissionDenied\n \n page_form = PageForm(\n instance=page,\n", "issue": "Fix capabilities bug\nAt the moment the button \"Create Page\" disapears for every role except the superuser.\r\n![image](https://user-images.githubusercontent.com/15608678/84593767-1dde2000-ae4e-11ea-858b-be5d7bf4dded.png)\r\n\nFix capabilities bug\nAt the moment the button \"Create Page\" disapears for every role except the superuser.\r\n![image](https://user-images.githubusercontent.com/15608678/84593767-1dde2000-ae4e-11ea-858b-be5d7bf4dded.png)\r\n\n", "before_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pages import PageForm, PageTranslationForm\nfrom ...models import Page, PageTranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass PageView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.view_pages'\n raise_exception = True\n\n template_name = 'pages/page_form.html'\n base_context = {\n 'current_menu_item': 'pages',\n 'PUBLIC': status.PUBLIC\n }\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get page and translation objects if they exist\n page = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation = PageTranslation.objects.filter(\n page=page,\n language=language,\n ).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm('cms.edit_page', page):\n disabled = True\n messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n elif page and page.archived:\n disabled = True\n messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n else:\n disabled = False\n\n page_form = PageForm(\n instance=page,\n region=region,\n language=language,\n disabled=disabled\n )\n page_translation_form = PageTranslationForm(\n instance=page_translation,\n disabled=disabled\n )\n\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page else [language],\n })\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation_instance = PageTranslation.objects.filter(\n page=page_instance,\n language=language,\n ).first()\n\n if not request.user.has_perm('cms.edit_page', page_instance):\n raise PermissionDenied\n\n page_form = PageForm(\n request.POST,\n instance=page_instance,\n region=region,\n language=language,\n )\n page_translation_form = PageTranslationForm(\n request.POST,\n instance=page_translation_instance,\n region=region,\n language=language,\n )\n\n if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:\n if not request.user.has_perm('cms.publish_page', page_instance):\n raise PermissionDenied\n\n # TODO: error handling\n if not page_form.is_valid() or not page_translation_form.is_valid():\n messages.error(request, _('Errors have occurred.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n if not page_form.has_changed() and not page_translation_form.has_changed():\n messages.info(request, _('No changes detected.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n page = page_form.save()\n page_translation = page_translation_form.save(\n page=page,\n user=request.user,\n )\n\n published = page_translation.status == status.PUBLIC\n if not page_instance:\n if published:\n messages.success(request, _('Page was successfully created and published.'))\n else:\n messages.success(request, _('Page was successfully created.'))\n elif not page_translation_instance:\n if published:\n messages.success(request, _('Translation was successfully created and published.'))\n else:\n messages.success(request, _('Translation was successfully created.'))\n else:\n if published:\n messages.success(request, _('Translation was successfully published.'))\n else:\n messages.success(request, _('Translation was successfully saved.'))\n\n return redirect('edit_page', **{\n 'page_id': page.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n", "path": "src/cms/views/pages/page_view.py"}, {"content": "from rules import add_perm, predicate\n\n\n# Predicates\n\n@predicate\ndef is_page_editor(user, page):\n if not page:\n return False\n return user in page.editors.all()\n\n@predicate\ndef is_page_publisher(user, page):\n if not page:\n return False\n return user in page.publishers.all()\n\n@predicate\n# pylint: disable=unused-argument\ndef can_edit_all_pages(user, page):\n return user.has_perm('edit_pages')\n\n@predicate\n# pylint: disable=unused-argument\ndef can_publish_all_pages(user, page):\n return user.has_perm('publish_pages')\n\n\n# Permissions\n\nadd_perm('cms.edit_page', can_edit_all_pages | is_page_editor | can_publish_all_pages | is_page_publisher)\nadd_perm('cms.publish_page', can_publish_all_pages | is_page_publisher)\n", "path": "src/cms/rules.py"}], "after_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pages import PageForm, PageTranslationForm\nfrom ...models import Page, PageTranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass PageView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.view_pages'\n raise_exception = True\n\n template_name = 'pages/page_form.html'\n base_context = {\n 'current_menu_item': 'pages',\n 'PUBLIC': status.PUBLIC\n }\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get page and translation objects if they exist\n page = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation = PageTranslation.objects.filter(\n page=page,\n language=language,\n ).first()\n\n # Make form disabled if user has no permission to edit the page\n disabled = False\n if page:\n if page.archived:\n disabled = True\n messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n elif not request.user.has_perm('cms.edit_page', page):\n disabled = True\n messages.warning(request, _(\"You don't have the permission to edit this page, but you can propose changes and submit them for review instead.\"))\n else:\n if not request.user.has_perm('cms.edit_pages'):\n raise PermissionDenied\n\n page_form = PageForm(\n instance=page,\n region=region,\n language=language,\n disabled=disabled\n )\n page_translation_form = PageTranslationForm(\n instance=page_translation,\n disabled=disabled\n )\n\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page else [language],\n })\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation_instance = PageTranslation.objects.filter(\n page=page_instance,\n language=language,\n ).first()\n\n if not request.user.has_perm('cms.edit_page', page_instance):\n raise PermissionDenied\n\n page_form = PageForm(\n request.POST,\n instance=page_instance,\n region=region,\n language=language,\n )\n page_translation_form = PageTranslationForm(\n request.POST,\n instance=page_translation_instance,\n region=region,\n language=language,\n )\n\n if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:\n if not request.user.has_perm('cms.publish_page', page_instance):\n raise PermissionDenied\n\n # TODO: error handling\n if not page_form.is_valid() or not page_translation_form.is_valid():\n messages.error(request, _('Errors have occurred.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n if not page_form.has_changed() and not page_translation_form.has_changed():\n messages.info(request, _('No changes detected.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n page = page_form.save()\n page_translation = page_translation_form.save(\n page=page,\n user=request.user,\n )\n\n published = page_translation.status == status.PUBLIC\n if not page_instance:\n if published:\n messages.success(request, _('Page was successfully created and published.'))\n else:\n messages.success(request, _('Page was successfully created.'))\n elif not page_translation_instance:\n if published:\n messages.success(request, _('Translation was successfully created and published.'))\n else:\n messages.success(request, _('Translation was successfully created.'))\n else:\n if published:\n messages.success(request, _('Translation was successfully published.'))\n else:\n messages.success(request, _('Translation was successfully saved.'))\n\n return redirect('edit_page', **{\n 'page_id': page.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n", "path": "src/cms/views/pages/page_view.py"}, {"content": "from rules import add_perm, predicate\n\n\n# Predicates\n\n@predicate\ndef is_page_editor(user, page):\n if not page:\n return False\n return user in page.editors.all()\n\n@predicate\ndef is_page_publisher(user, page):\n if not page:\n return False\n return user in page.publishers.all()\n\n@predicate\n# pylint: disable=unused-argument\ndef can_edit_all_pages(user, page):\n return user.has_perm('cms.edit_pages')\n\n@predicate\n# pylint: disable=unused-argument\ndef can_publish_all_pages(user, page):\n return user.has_perm('cms.publish_pages')\n\n\n# Permissions\n\nadd_perm('cms.edit_page', can_edit_all_pages | is_page_editor | can_publish_all_pages | is_page_publisher)\nadd_perm('cms.publish_page', can_publish_all_pages | is_page_publisher)\n", "path": "src/cms/rules.py"}]}
2,207
412
gh_patches_debug_24069
rasdani/github-patches
git_diff
Qiskit__qiskit-2700
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Timeslots should have informative representation <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected enhancement? Currently `Interval`, `Timeslot` and `TimeslotCollection` have the standard python object representation. This should be replaced with an informative implementation of `__repr__`. As `TimeslotCollection` contains `Timeslot`s and `Timeslot`s contain `Interval`s the hierarchal nature of these data structure should be used to derive representations. ### Example A possible representation might look like Interval: `(t0, tf)` eg. `(0, 10)` Timeslot: Is an interval on a channel `(channel, repr(interval))`: `(DriveChannel(0), (0, 10))` TimeslotCollection: Is a dictionary of timeslots. Ie. it groups together intervals on channels. `{channel1: [repr(interval1_c1), repr(interval2_c1), ...]), channel2: [repr(interval1_c2), repr(interval2_c2), ...]}` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/pulse/timeslots.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This code is part of Qiskit. 4 # 5 # (C) Copyright IBM 2017, 2019. 6 # 7 # This code is licensed under the Apache License, Version 2.0. You may 8 # obtain a copy of this license in the LICENSE.txt file in the root directory 9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 10 # 11 # Any modifications or derivative works of this code must retain this 12 # copyright notice, and modified files need to carry a notice indicating 13 # that they have been altered from the originals. 14 15 """ 16 Timeslots for channels. 17 """ 18 from collections import defaultdict 19 import itertools 20 from typing import List, Tuple 21 22 from .channels import Channel 23 from .exceptions import PulseError 24 25 26 # pylint: disable=missing-return-doc 27 28 29 class Interval: 30 """Time interval.""" 31 32 def __init__(self, begin: int, end: int): 33 """Create an interval = (begin, end)) 34 35 Args: 36 begin: begin time of this interval 37 end: end time of this interval 38 39 Raises: 40 PulseError: when invalid time or duration is specified 41 """ 42 if begin < 0: 43 raise PulseError("Cannot create Interval with negative begin time") 44 if end < 0: 45 raise PulseError("Cannot create Interval with negative end time") 46 self._begin = begin 47 self._end = end 48 49 @property 50 def begin(self): 51 """Begin time of this interval.""" 52 return self._begin 53 54 @property 55 def end(self): 56 """End time of this interval.""" 57 return self._end 58 59 @property 60 def duration(self): 61 """Duration of this interval.""" 62 return self._end - self._begin 63 64 def has_overlap(self, interval: 'Interval') -> bool: 65 """Check if self has overlap with `interval`. 66 67 Args: 68 interval: interval to be examined 69 70 Returns: 71 bool: True if self has overlap with `interval` otherwise False 72 """ 73 if self.begin < interval.end and interval.begin < self.end: 74 return True 75 return False 76 77 def shift(self, time: int) -> 'Interval': 78 """Return a new interval shifted by `time` from self 79 80 Args: 81 time: time to be shifted 82 83 Returns: 84 Interval: interval shifted by `time` 85 """ 86 return Interval(self._begin + time, self._end + time) 87 88 def __eq__(self, other): 89 """Two intervals are the same if they have the same begin and end. 90 91 Args: 92 other (Interval): other Interval 93 94 Returns: 95 bool: are self and other equal. 96 """ 97 if self._begin == other._begin and self._end == other._end: 98 return True 99 return False 100 101 102 class Timeslot: 103 """Named tuple of (Interval, Channel).""" 104 105 def __init__(self, interval: Interval, channel: Channel): 106 self._interval = interval 107 self._channel = channel 108 109 @property 110 def interval(self): 111 """Interval of this time slot.""" 112 return self._interval 113 114 @property 115 def channel(self): 116 """Channel of this time slot.""" 117 return self._channel 118 119 def shift(self, time: int) -> 'Timeslot': 120 """Return a new Timeslot shifted by `time`. 121 122 Args: 123 time: time to be shifted 124 """ 125 return Timeslot(self.interval.shift(time), self.channel) 126 127 def __eq__(self, other) -> bool: 128 """Two time-slots are the same if they have the same interval and channel. 129 130 Args: 131 other (Timeslot): other Timeslot 132 """ 133 if self.interval == other.interval and self.channel == other.channel: 134 return True 135 return False 136 137 138 class TimeslotCollection: 139 """Collection of `Timeslot`s.""" 140 141 def __init__(self, *timeslots: List[Timeslot]): 142 """Create a new time-slot collection. 143 144 Args: 145 *timeslots: list of time slots 146 Raises: 147 PulseError: when overlapped time slots are specified 148 """ 149 self._table = defaultdict(list) 150 151 for slot in timeslots: 152 for interval in self._table[slot.channel]: 153 if slot.interval.has_overlap(interval): 154 raise PulseError("Cannot create TimeslotCollection from overlapped timeslots") 155 self._table[slot.channel].append(slot.interval) 156 157 self._timeslots = tuple(timeslots) 158 159 @property 160 def timeslots(self) -> Tuple[Timeslot]: 161 """`Timeslot`s in collection.""" 162 return self._timeslots 163 164 @property 165 def channels(self) -> Tuple[Timeslot]: 166 """Channels within the timeslot collection.""" 167 return tuple(self._table.keys()) 168 169 @property 170 def start_time(self) -> int: 171 """Return earliest start time in this collection.""" 172 return self.ch_start_time(*self.channels) 173 174 @property 175 def stop_time(self) -> int: 176 """Return maximum time of timeslots over all channels.""" 177 return self.ch_stop_time(*self.channels) 178 179 @property 180 def duration(self) -> int: 181 """Return maximum duration of timeslots over all channels.""" 182 return self.stop_time 183 184 def ch_start_time(self, *channels: List[Channel]) -> int: 185 """Return earliest start time in this collection. 186 187 Args: 188 *channels: Channels over which to obtain start_time. 189 """ 190 intervals = list(itertools.chain(*(self._table[chan] for chan in channels 191 if chan in self._table))) 192 if intervals: 193 return min(interval.begin for interval in intervals) 194 return 0 195 196 def ch_stop_time(self, *channels: List[Channel]) -> int: 197 """Return maximum time of timeslots over all channels. 198 199 Args: 200 *channels: Channels over which to obtain stop time. 201 """ 202 intervals = list(itertools.chain(*(self._table[chan] for chan in channels 203 if chan in self._table))) 204 if intervals: 205 return max(interval.end for interval in intervals) 206 return 0 207 208 def ch_duration(self, *channels: List[Channel]) -> int: 209 """Return maximum duration of timeslots over all channels. 210 211 Args: 212 *channels: Channels over which to obtain the duration. 213 """ 214 return self.ch_stop_time(*channels) 215 216 def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool: 217 """Return if self is mergeable with `timeslots`. 218 219 Args: 220 timeslots: TimeslotCollection to be checked 221 """ 222 for slot in timeslots.timeslots: 223 if slot.channel in self.channels: 224 for interval in self._table[slot.channel]: 225 if slot.interval.has_overlap(interval): 226 return False 227 return True 228 229 def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection': 230 """Return a new TimeslotCollection merged with a specified `timeslots` 231 232 Args: 233 timeslots: TimeslotCollection to be merged 234 """ 235 slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots] 236 slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots]) 237 return TimeslotCollection(*slots) 238 239 def shift(self, time: int) -> 'TimeslotCollection': 240 """Return a new TimeslotCollection shifted by `time`. 241 242 Args: 243 time: time to be shifted by 244 """ 245 slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots] 246 return TimeslotCollection(*slots) 247 248 def __eq__(self, other) -> bool: 249 """Two time-slot collections are the same if they have the same time-slots. 250 251 Args: 252 other (TimeslotCollection): other TimeslotCollection 253 """ 254 if self.timeslots == other.timeslots: 255 return True 256 return False 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/pulse/timeslots.py b/qiskit/pulse/timeslots.py --- a/qiskit/pulse/timeslots.py +++ b/qiskit/pulse/timeslots.py @@ -98,6 +98,10 @@ return True return False + def __repr__(self): + """Return a readable representation of Interval Object""" + return "{}({}, {})".format(self.__class__.__name__, self.begin, self.end) + class Timeslot: """Named tuple of (Interval, Channel).""" @@ -134,6 +138,12 @@ return True return False + def __repr__(self): + """Return a readable representation of Timeslot Object""" + return "{}({}, {})".format(self.__class__.__name__, + self.channel, + (self.interval.begin, self.interval.end)) + class TimeslotCollection: """Collection of `Timeslot`s.""" @@ -254,3 +264,10 @@ if self.timeslots == other.timeslots: return True return False + + def __repr__(self): + """Return a readable representation of TimeslotCollection Object""" + rep = dict() + for key, val in self._table.items(): + rep[key] = [(interval.begin, interval.end) for interval in val] + return self.__class__.__name__ + str(rep)
{"golden_diff": "diff --git a/qiskit/pulse/timeslots.py b/qiskit/pulse/timeslots.py\n--- a/qiskit/pulse/timeslots.py\n+++ b/qiskit/pulse/timeslots.py\n@@ -98,6 +98,10 @@\n return True\n return False\n \n+ def __repr__(self):\n+ \"\"\"Return a readable representation of Interval Object\"\"\"\n+ return \"{}({}, {})\".format(self.__class__.__name__, self.begin, self.end)\n+\n \n class Timeslot:\n \"\"\"Named tuple of (Interval, Channel).\"\"\"\n@@ -134,6 +138,12 @@\n return True\n return False\n \n+ def __repr__(self):\n+ \"\"\"Return a readable representation of Timeslot Object\"\"\"\n+ return \"{}({}, {})\".format(self.__class__.__name__,\n+ self.channel,\n+ (self.interval.begin, self.interval.end))\n+\n \n class TimeslotCollection:\n \"\"\"Collection of `Timeslot`s.\"\"\"\n@@ -254,3 +264,10 @@\n if self.timeslots == other.timeslots:\n return True\n return False\n+\n+ def __repr__(self):\n+ \"\"\"Return a readable representation of TimeslotCollection Object\"\"\"\n+ rep = dict()\n+ for key, val in self._table.items():\n+ rep[key] = [(interval.begin, interval.end) for interval in val]\n+ return self.__class__.__name__ + str(rep)\n", "issue": "Timeslots should have informative representation\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nCurrently `Interval`, `Timeslot` and `TimeslotCollection` have the standard python object representation. This should be replaced with an informative implementation of `__repr__`. As `TimeslotCollection` contains `Timeslot`s and `Timeslot`s contain `Interval`s the hierarchal nature of these data structure should be used to derive representations.\r\n\r\n### Example\r\nA possible representation might look like\r\n\r\nInterval:\r\n`(t0, tf)` eg. `(0, 10)`\r\n\r\nTimeslot: Is an interval on a channel\r\n`(channel, repr(interval))`: `(DriveChannel(0), (0, 10))`\r\n\r\nTimeslotCollection: Is a dictionary of timeslots. Ie. it groups together intervals on channels.\r\n\r\n`{channel1: [repr(interval1_c1), repr(interval2_c1), ...]), channel2: [repr(interval1_c2), repr(interval2_c2), ...]}`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nTimeslots for channels.\n\"\"\"\nfrom collections import defaultdict\nimport itertools\nfrom typing import List, Tuple\n\nfrom .channels import Channel\nfrom .exceptions import PulseError\n\n\n# pylint: disable=missing-return-doc\n\n\nclass Interval:\n \"\"\"Time interval.\"\"\"\n\n def __init__(self, begin: int, end: int):\n \"\"\"Create an interval = (begin, end))\n\n Args:\n begin: begin time of this interval\n end: end time of this interval\n\n Raises:\n PulseError: when invalid time or duration is specified\n \"\"\"\n if begin < 0:\n raise PulseError(\"Cannot create Interval with negative begin time\")\n if end < 0:\n raise PulseError(\"Cannot create Interval with negative end time\")\n self._begin = begin\n self._end = end\n\n @property\n def begin(self):\n \"\"\"Begin time of this interval.\"\"\"\n return self._begin\n\n @property\n def end(self):\n \"\"\"End time of this interval.\"\"\"\n return self._end\n\n @property\n def duration(self):\n \"\"\"Duration of this interval.\"\"\"\n return self._end - self._begin\n\n def has_overlap(self, interval: 'Interval') -> bool:\n \"\"\"Check if self has overlap with `interval`.\n\n Args:\n interval: interval to be examined\n\n Returns:\n bool: True if self has overlap with `interval` otherwise False\n \"\"\"\n if self.begin < interval.end and interval.begin < self.end:\n return True\n return False\n\n def shift(self, time: int) -> 'Interval':\n \"\"\"Return a new interval shifted by `time` from self\n\n Args:\n time: time to be shifted\n\n Returns:\n Interval: interval shifted by `time`\n \"\"\"\n return Interval(self._begin + time, self._end + time)\n\n def __eq__(self, other):\n \"\"\"Two intervals are the same if they have the same begin and end.\n\n Args:\n other (Interval): other Interval\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n if self._begin == other._begin and self._end == other._end:\n return True\n return False\n\n\nclass Timeslot:\n \"\"\"Named tuple of (Interval, Channel).\"\"\"\n\n def __init__(self, interval: Interval, channel: Channel):\n self._interval = interval\n self._channel = channel\n\n @property\n def interval(self):\n \"\"\"Interval of this time slot.\"\"\"\n return self._interval\n\n @property\n def channel(self):\n \"\"\"Channel of this time slot.\"\"\"\n return self._channel\n\n def shift(self, time: int) -> 'Timeslot':\n \"\"\"Return a new Timeslot shifted by `time`.\n\n Args:\n time: time to be shifted\n \"\"\"\n return Timeslot(self.interval.shift(time), self.channel)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slots are the same if they have the same interval and channel.\n\n Args:\n other (Timeslot): other Timeslot\n \"\"\"\n if self.interval == other.interval and self.channel == other.channel:\n return True\n return False\n\n\nclass TimeslotCollection:\n \"\"\"Collection of `Timeslot`s.\"\"\"\n\n def __init__(self, *timeslots: List[Timeslot]):\n \"\"\"Create a new time-slot collection.\n\n Args:\n *timeslots: list of time slots\n Raises:\n PulseError: when overlapped time slots are specified\n \"\"\"\n self._table = defaultdict(list)\n\n for slot in timeslots:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n raise PulseError(\"Cannot create TimeslotCollection from overlapped timeslots\")\n self._table[slot.channel].append(slot.interval)\n\n self._timeslots = tuple(timeslots)\n\n @property\n def timeslots(self) -> Tuple[Timeslot]:\n \"\"\"`Timeslot`s in collection.\"\"\"\n return self._timeslots\n\n @property\n def channels(self) -> Tuple[Timeslot]:\n \"\"\"Channels within the timeslot collection.\"\"\"\n return tuple(self._table.keys())\n\n @property\n def start_time(self) -> int:\n \"\"\"Return earliest start time in this collection.\"\"\"\n return self.ch_start_time(*self.channels)\n\n @property\n def stop_time(self) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\"\"\"\n return self.ch_stop_time(*self.channels)\n\n @property\n def duration(self) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\"\"\"\n return self.stop_time\n\n def ch_start_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return earliest start time in this collection.\n\n Args:\n *channels: Channels over which to obtain start_time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return min(interval.begin for interval in intervals)\n return 0\n\n def ch_stop_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain stop time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return max(interval.end for interval in intervals)\n return 0\n\n def ch_duration(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain the duration.\n \"\"\"\n return self.ch_stop_time(*channels)\n\n def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool:\n \"\"\"Return if self is mergeable with `timeslots`.\n\n Args:\n timeslots: TimeslotCollection to be checked\n \"\"\"\n for slot in timeslots.timeslots:\n if slot.channel in self.channels:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n return False\n return True\n\n def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection merged with a specified `timeslots`\n\n Args:\n timeslots: TimeslotCollection to be merged\n \"\"\"\n slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots]\n slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots])\n return TimeslotCollection(*slots)\n\n def shift(self, time: int) -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection shifted by `time`.\n\n Args:\n time: time to be shifted by\n \"\"\"\n slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]\n return TimeslotCollection(*slots)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slot collections are the same if they have the same time-slots.\n\n Args:\n other (TimeslotCollection): other TimeslotCollection\n \"\"\"\n if self.timeslots == other.timeslots:\n return True\n return False\n", "path": "qiskit/pulse/timeslots.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nTimeslots for channels.\n\"\"\"\nfrom collections import defaultdict\nimport itertools\nfrom typing import List, Tuple\n\nfrom .channels import Channel\nfrom .exceptions import PulseError\n\n\n# pylint: disable=missing-return-doc\n\n\nclass Interval:\n \"\"\"Time interval.\"\"\"\n\n def __init__(self, begin: int, end: int):\n \"\"\"Create an interval = (begin, end))\n\n Args:\n begin: begin time of this interval\n end: end time of this interval\n\n Raises:\n PulseError: when invalid time or duration is specified\n \"\"\"\n if begin < 0:\n raise PulseError(\"Cannot create Interval with negative begin time\")\n if end < 0:\n raise PulseError(\"Cannot create Interval with negative end time\")\n self._begin = begin\n self._end = end\n\n @property\n def begin(self):\n \"\"\"Begin time of this interval.\"\"\"\n return self._begin\n\n @property\n def end(self):\n \"\"\"End time of this interval.\"\"\"\n return self._end\n\n @property\n def duration(self):\n \"\"\"Duration of this interval.\"\"\"\n return self._end - self._begin\n\n def has_overlap(self, interval: 'Interval') -> bool:\n \"\"\"Check if self has overlap with `interval`.\n\n Args:\n interval: interval to be examined\n\n Returns:\n bool: True if self has overlap with `interval` otherwise False\n \"\"\"\n if self.begin < interval.end and interval.begin < self.end:\n return True\n return False\n\n def shift(self, time: int) -> 'Interval':\n \"\"\"Return a new interval shifted by `time` from self\n\n Args:\n time: time to be shifted\n\n Returns:\n Interval: interval shifted by `time`\n \"\"\"\n return Interval(self._begin + time, self._end + time)\n\n def __eq__(self, other):\n \"\"\"Two intervals are the same if they have the same begin and end.\n\n Args:\n other (Interval): other Interval\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n if self._begin == other._begin and self._end == other._end:\n return True\n return False\n\n def __repr__(self):\n \"\"\"Return a readable representation of Interval Object\"\"\"\n return \"{}({}, {})\".format(self.__class__.__name__, self.begin, self.end)\n\n\nclass Timeslot:\n \"\"\"Named tuple of (Interval, Channel).\"\"\"\n\n def __init__(self, interval: Interval, channel: Channel):\n self._interval = interval\n self._channel = channel\n\n @property\n def interval(self):\n \"\"\"Interval of this time slot.\"\"\"\n return self._interval\n\n @property\n def channel(self):\n \"\"\"Channel of this time slot.\"\"\"\n return self._channel\n\n def shift(self, time: int) -> 'Timeslot':\n \"\"\"Return a new Timeslot shifted by `time`.\n\n Args:\n time: time to be shifted\n \"\"\"\n return Timeslot(self.interval.shift(time), self.channel)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slots are the same if they have the same interval and channel.\n\n Args:\n other (Timeslot): other Timeslot\n \"\"\"\n if self.interval == other.interval and self.channel == other.channel:\n return True\n return False\n\n def __repr__(self):\n \"\"\"Return a readable representation of Timeslot Object\"\"\"\n return \"{}({}, {})\".format(self.__class__.__name__,\n self.channel,\n (self.interval.begin, self.interval.end))\n\n\nclass TimeslotCollection:\n \"\"\"Collection of `Timeslot`s.\"\"\"\n\n def __init__(self, *timeslots: List[Timeslot]):\n \"\"\"Create a new time-slot collection.\n\n Args:\n *timeslots: list of time slots\n Raises:\n PulseError: when overlapped time slots are specified\n \"\"\"\n self._table = defaultdict(list)\n\n for slot in timeslots:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n raise PulseError(\"Cannot create TimeslotCollection from overlapped timeslots\")\n self._table[slot.channel].append(slot.interval)\n\n self._timeslots = tuple(timeslots)\n\n @property\n def timeslots(self) -> Tuple[Timeslot]:\n \"\"\"`Timeslot`s in collection.\"\"\"\n return self._timeslots\n\n @property\n def channels(self) -> Tuple[Timeslot]:\n \"\"\"Channels within the timeslot collection.\"\"\"\n return tuple(self._table.keys())\n\n @property\n def start_time(self) -> int:\n \"\"\"Return earliest start time in this collection.\"\"\"\n return self.ch_start_time(*self.channels)\n\n @property\n def stop_time(self) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\"\"\"\n return self.ch_stop_time(*self.channels)\n\n @property\n def duration(self) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\"\"\"\n return self.stop_time\n\n def ch_start_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return earliest start time in this collection.\n\n Args:\n *channels: Channels over which to obtain start_time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return min(interval.begin for interval in intervals)\n return 0\n\n def ch_stop_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain stop time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return max(interval.end for interval in intervals)\n return 0\n\n def ch_duration(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain the duration.\n \"\"\"\n return self.ch_stop_time(*channels)\n\n def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool:\n \"\"\"Return if self is mergeable with `timeslots`.\n\n Args:\n timeslots: TimeslotCollection to be checked\n \"\"\"\n for slot in timeslots.timeslots:\n if slot.channel in self.channels:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n return False\n return True\n\n def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection merged with a specified `timeslots`\n\n Args:\n timeslots: TimeslotCollection to be merged\n \"\"\"\n slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots]\n slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots])\n return TimeslotCollection(*slots)\n\n def shift(self, time: int) -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection shifted by `time`.\n\n Args:\n time: time to be shifted by\n \"\"\"\n slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]\n return TimeslotCollection(*slots)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slot collections are the same if they have the same time-slots.\n\n Args:\n other (TimeslotCollection): other TimeslotCollection\n \"\"\"\n if self.timeslots == other.timeslots:\n return True\n return False\n\n def __repr__(self):\n \"\"\"Return a readable representation of TimeslotCollection Object\"\"\"\n rep = dict()\n for key, val in self._table.items():\n rep[key] = [(interval.begin, interval.end) for interval in val]\n return self.__class__.__name__ + str(rep)\n", "path": "qiskit/pulse/timeslots.py"}]}
2,894
318
gh_patches_debug_10105
rasdani/github-patches
git_diff
pulp__pulpcore-2566
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implementation of does_batch in DeclarativeContent is wrong **Describe the bug** `does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues. **Additional context** This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/plugin/stages/models.py` Content: ``` 1 from gettext import gettext as _ 2 3 import asyncio 4 5 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS 6 from pulpcore.plugin.models import Artifact 7 8 9 class DeclarativeArtifact: 10 """ 11 Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its 12 `relative_path` used later during publishing. 13 14 This is used by the Stages API stages to determine if an 15 :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in 16 the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes 17 may be incomplete because not all digest information can be computed until the 18 :class:`~pulpcore.plugin.models.Artifact` is downloaded. 19 20 Attributes: 21 artifact (:class:`~pulpcore.plugin.models.Artifact`): An 22 :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it 23 may have partial digest information attached to it. 24 url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from. 25 relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact` 26 should be published at for any Publication. 27 remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this 28 :class:`~pulpcore.plugin.models.Artifact`. 29 extra_data (dict): A dictionary available for additional data to be stored in. 30 deferred_download (bool): Whether this artifact should be downloaded and saved 31 in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`. 32 33 Raises: 34 ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not 35 specified and `artifact` doesn't have a file. 36 """ 37 38 __slots__ = ("artifact", "url", "relative_path", "remote", "extra_data", "deferred_download") 39 40 def __init__( 41 self, 42 artifact=None, 43 url=None, 44 relative_path=None, 45 remote=None, 46 extra_data=None, 47 deferred_download=False, 48 ): 49 if not url: 50 raise ValueError(_("DeclarativeArtifact must have a 'url'")) 51 if not relative_path: 52 raise ValueError(_("DeclarativeArtifact must have a 'relative_path'")) 53 if not artifact: 54 raise ValueError(_("DeclarativeArtifact must have a 'artifact'")) 55 if not remote and not artifact.file: 56 raise ValueError( 57 _( 58 "DeclarativeArtifact must have a 'remote' if the Artifact doesn't " 59 "have a file backing it." 60 ) 61 ) 62 self.artifact = artifact 63 self.url = url 64 self.relative_path = relative_path 65 self.remote = remote 66 self.extra_data = extra_data or {} 67 self.deferred_download = deferred_download 68 69 async def download(self): 70 """ 71 Download content and update the associated Artifact. 72 73 Returns: 74 Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact. 75 """ 76 expected_digests = {} 77 validation_kwargs = {} 78 for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS: 79 digest_value = getattr(self.artifact, digest_name) 80 if digest_value: 81 expected_digests[digest_name] = digest_value 82 if expected_digests: 83 validation_kwargs["expected_digests"] = expected_digests 84 if self.artifact.size: 85 expected_size = self.artifact.size 86 validation_kwargs["expected_size"] = expected_size 87 downloader = self.remote.get_downloader(url=self.url, **validation_kwargs) 88 # Custom downloaders may need extra information to complete the request. 89 download_result = await downloader.run(extra_data=self.extra_data) 90 self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path) 91 return download_result 92 93 94 class DeclarativeContent: 95 """ 96 Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact` 97 objects. 98 99 This is used by the Stages API stages to determine if a Content unit is already present and 100 ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are 101 related correctly. The `content` can be either saved or unsaved depending on where in the Stages 102 API pipeline this is used. 103 104 Attributes: 105 content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly 106 unsaved 107 d_artifacts (list): A list of zero or more 108 :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`. 109 extra_data (dict): A dictionary available for additional data to be stored in. 110 111 Raises: 112 ValueError: If `content` is not specified. 113 """ 114 115 __slots__ = ( 116 "content", 117 "d_artifacts", 118 "extra_data", 119 "_future", 120 "_thaw_queue_event", 121 "_resolved", 122 ) 123 124 def __init__(self, content=None, d_artifacts=None, extra_data=None): 125 if not content: 126 raise ValueError(_("DeclarativeContent must have a 'content'")) 127 self.content = content 128 self.d_artifacts = d_artifacts or [] 129 self.extra_data = extra_data or {} 130 self._future = None 131 self._thaw_queue_event = None 132 self._resolved = False 133 134 @property 135 def does_batch(self): 136 """Whether this content is being awaited on and must therefore not wait forever in batches. 137 When overwritten in subclasses, a `True` value must never be turned into `False`. 138 """ 139 return not self._resolved and self._future is None 140 141 async def resolution(self): 142 """Coroutine that waits for the content to be saved to database. 143 Returns the content unit.""" 144 if self._resolved: 145 # Already resolved ~> shortcut 146 return self.content 147 if self._future is None: 148 # We do not yet have a future 149 self._future = asyncio.get_event_loop().create_future() 150 if self._thaw_queue_event: 151 # We have a future now but are still stuck in a queue 152 self._thaw_queue_event.set() 153 # Now we wait 154 return await self._future 155 156 def resolve(self): 157 """Resolve this content unit and notify any waiting tasks.""" 158 self._resolved = True 159 if self._future: 160 self._future.set_result(self.content) 161 self._future = None 162 163 def __str__(self): 164 return str(self.content.__class__.__name__) 165 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py --- a/pulpcore/plugin/stages/models.py +++ b/pulpcore/plugin/stages/models.py @@ -136,7 +136,7 @@ """Whether this content is being awaited on and must therefore not wait forever in batches. When overwritten in subclasses, a `True` value must never be turned into `False`. """ - return not self._resolved and self._future is None + return self._resolved or self._future is None async def resolution(self): """Coroutine that waits for the content to be saved to database.
{"golden_diff": "diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py\n--- a/pulpcore/plugin/stages/models.py\n+++ b/pulpcore/plugin/stages/models.py\n@@ -136,7 +136,7 @@\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n- return not self._resolved and self._future is None\n+ return self._resolved or self._future is None\n \n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n", "issue": "Implementation of does_batch in DeclarativeContent is wrong\n**Describe the bug**\r\n`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.\r\n\r\n**Additional context**\r\nThis is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\n\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.models import Artifact\n\n\nclass DeclarativeArtifact:\n \"\"\"\n Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its\n `relative_path` used later during publishing.\n\n This is used by the Stages API stages to determine if an\n :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in\n the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes\n may be incomplete because not all digest information can be computed until the\n :class:`~pulpcore.plugin.models.Artifact` is downloaded.\n\n Attributes:\n artifact (:class:`~pulpcore.plugin.models.Artifact`): An\n :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it\n may have partial digest information attached to it.\n url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.\n relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`\n should be published at for any Publication.\n remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this\n :class:`~pulpcore.plugin.models.Artifact`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n deferred_download (bool): Whether this artifact should be downloaded and saved\n in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.\n\n Raises:\n ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not\n specified and `artifact` doesn't have a file.\n \"\"\"\n\n __slots__ = (\"artifact\", \"url\", \"relative_path\", \"remote\", \"extra_data\", \"deferred_download\")\n\n def __init__(\n self,\n artifact=None,\n url=None,\n relative_path=None,\n remote=None,\n extra_data=None,\n deferred_download=False,\n ):\n if not url:\n raise ValueError(_(\"DeclarativeArtifact must have a 'url'\"))\n if not relative_path:\n raise ValueError(_(\"DeclarativeArtifact must have a 'relative_path'\"))\n if not artifact:\n raise ValueError(_(\"DeclarativeArtifact must have a 'artifact'\"))\n if not remote and not artifact.file:\n raise ValueError(\n _(\n \"DeclarativeArtifact must have a 'remote' if the Artifact doesn't \"\n \"have a file backing it.\"\n )\n )\n self.artifact = artifact\n self.url = url\n self.relative_path = relative_path\n self.remote = remote\n self.extra_data = extra_data or {}\n self.deferred_download = deferred_download\n\n async def download(self):\n \"\"\"\n Download content and update the associated Artifact.\n\n Returns:\n Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.\n \"\"\"\n expected_digests = {}\n validation_kwargs = {}\n for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:\n digest_value = getattr(self.artifact, digest_name)\n if digest_value:\n expected_digests[digest_name] = digest_value\n if expected_digests:\n validation_kwargs[\"expected_digests\"] = expected_digests\n if self.artifact.size:\n expected_size = self.artifact.size\n validation_kwargs[\"expected_size\"] = expected_size\n downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)\n # Custom downloaders may need extra information to complete the request.\n download_result = await downloader.run(extra_data=self.extra_data)\n self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)\n return download_result\n\n\nclass DeclarativeContent:\n \"\"\"\n Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`\n objects.\n\n This is used by the Stages API stages to determine if a Content unit is already present and\n ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are\n related correctly. The `content` can be either saved or unsaved depending on where in the Stages\n API pipeline this is used.\n\n Attributes:\n content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly\n unsaved\n d_artifacts (list): A list of zero or more\n :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n\n Raises:\n ValueError: If `content` is not specified.\n \"\"\"\n\n __slots__ = (\n \"content\",\n \"d_artifacts\",\n \"extra_data\",\n \"_future\",\n \"_thaw_queue_event\",\n \"_resolved\",\n )\n\n def __init__(self, content=None, d_artifacts=None, extra_data=None):\n if not content:\n raise ValueError(_(\"DeclarativeContent must have a 'content'\"))\n self.content = content\n self.d_artifacts = d_artifacts or []\n self.extra_data = extra_data or {}\n self._future = None\n self._thaw_queue_event = None\n self._resolved = False\n\n @property\n def does_batch(self):\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n return not self._resolved and self._future is None\n\n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n Returns the content unit.\"\"\"\n if self._resolved:\n # Already resolved ~> shortcut\n return self.content\n if self._future is None:\n # We do not yet have a future\n self._future = asyncio.get_event_loop().create_future()\n if self._thaw_queue_event:\n # We have a future now but are still stuck in a queue\n self._thaw_queue_event.set()\n # Now we wait\n return await self._future\n\n def resolve(self):\n \"\"\"Resolve this content unit and notify any waiting tasks.\"\"\"\n self._resolved = True\n if self._future:\n self._future.set_result(self.content)\n self._future = None\n\n def __str__(self):\n return str(self.content.__class__.__name__)\n", "path": "pulpcore/plugin/stages/models.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\n\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.models import Artifact\n\n\nclass DeclarativeArtifact:\n \"\"\"\n Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its\n `relative_path` used later during publishing.\n\n This is used by the Stages API stages to determine if an\n :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in\n the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes\n may be incomplete because not all digest information can be computed until the\n :class:`~pulpcore.plugin.models.Artifact` is downloaded.\n\n Attributes:\n artifact (:class:`~pulpcore.plugin.models.Artifact`): An\n :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it\n may have partial digest information attached to it.\n url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.\n relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`\n should be published at for any Publication.\n remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this\n :class:`~pulpcore.plugin.models.Artifact`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n deferred_download (bool): Whether this artifact should be downloaded and saved\n in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.\n\n Raises:\n ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not\n specified and `artifact` doesn't have a file.\n \"\"\"\n\n __slots__ = (\"artifact\", \"url\", \"relative_path\", \"remote\", \"extra_data\", \"deferred_download\")\n\n def __init__(\n self,\n artifact=None,\n url=None,\n relative_path=None,\n remote=None,\n extra_data=None,\n deferred_download=False,\n ):\n if not url:\n raise ValueError(_(\"DeclarativeArtifact must have a 'url'\"))\n if not relative_path:\n raise ValueError(_(\"DeclarativeArtifact must have a 'relative_path'\"))\n if not artifact:\n raise ValueError(_(\"DeclarativeArtifact must have a 'artifact'\"))\n if not remote and not artifact.file:\n raise ValueError(\n _(\n \"DeclarativeArtifact must have a 'remote' if the Artifact doesn't \"\n \"have a file backing it.\"\n )\n )\n self.artifact = artifact\n self.url = url\n self.relative_path = relative_path\n self.remote = remote\n self.extra_data = extra_data or {}\n self.deferred_download = deferred_download\n\n async def download(self):\n \"\"\"\n Download content and update the associated Artifact.\n\n Returns:\n Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.\n \"\"\"\n expected_digests = {}\n validation_kwargs = {}\n for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:\n digest_value = getattr(self.artifact, digest_name)\n if digest_value:\n expected_digests[digest_name] = digest_value\n if expected_digests:\n validation_kwargs[\"expected_digests\"] = expected_digests\n if self.artifact.size:\n expected_size = self.artifact.size\n validation_kwargs[\"expected_size\"] = expected_size\n downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)\n # Custom downloaders may need extra information to complete the request.\n download_result = await downloader.run(extra_data=self.extra_data)\n self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)\n return download_result\n\n\nclass DeclarativeContent:\n \"\"\"\n Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`\n objects.\n\n This is used by the Stages API stages to determine if a Content unit is already present and\n ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are\n related correctly. The `content` can be either saved or unsaved depending on where in the Stages\n API pipeline this is used.\n\n Attributes:\n content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly\n unsaved\n d_artifacts (list): A list of zero or more\n :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n\n Raises:\n ValueError: If `content` is not specified.\n \"\"\"\n\n __slots__ = (\n \"content\",\n \"d_artifacts\",\n \"extra_data\",\n \"_future\",\n \"_thaw_queue_event\",\n \"_resolved\",\n )\n\n def __init__(self, content=None, d_artifacts=None, extra_data=None):\n if not content:\n raise ValueError(_(\"DeclarativeContent must have a 'content'\"))\n self.content = content\n self.d_artifacts = d_artifacts or []\n self.extra_data = extra_data or {}\n self._future = None\n self._thaw_queue_event = None\n self._resolved = False\n\n @property\n def does_batch(self):\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n return self._resolved or self._future is None\n\n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n Returns the content unit.\"\"\"\n if self._resolved:\n # Already resolved ~> shortcut\n return self.content\n if self._future is None:\n # We do not yet have a future\n self._future = asyncio.get_event_loop().create_future()\n if self._thaw_queue_event:\n # We have a future now but are still stuck in a queue\n self._thaw_queue_event.set()\n # Now we wait\n return await self._future\n\n def resolve(self):\n \"\"\"Resolve this content unit and notify any waiting tasks.\"\"\"\n self._resolved = True\n if self._future:\n self._future.set_result(self.content)\n self._future = None\n\n def __str__(self):\n return str(self.content.__class__.__name__)\n", "path": "pulpcore/plugin/stages/models.py"}]}
2,158
144
gh_patches_debug_7477
rasdani/github-patches
git_diff
zigpy__zha-device-handlers-680
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A) [LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1) **Describe the solution you'd like** - [x] power control - [x] brightness control - [x] CCT control - [x] remove color control ![grafik](https://user-images.githubusercontent.com/21227087/101215714-9778fd00-367e-11eb-957c-a1f86d50946e.png) Maybee because the board is also color capable ![IMG_20201204_123040](https://user-images.githubusercontent.com/21227087/101216036-1e2dda00-367f-11eb-9663-c2623fec6553.jpg) **Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.** [Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF) **Additional context** https://zigbee.blakadder.com/Tuya_TS0502A.html Touchlink resetable --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zhaquirks/lidl/cct.py` Content: ``` 1 """Quirk for LIDL CCT bulb.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomCluster, CustomDevice 4 from zigpy.zcl.clusters.general import ( 5 Basic, 6 GreenPowerProxy, 7 Groups, 8 Identify, 9 LevelControl, 10 OnOff, 11 Ota, 12 Scenes, 13 Time, 14 ) 15 from zigpy.zcl.clusters.lighting import Color 16 from zigpy.zcl.clusters.lightlink import LightLink 17 18 from zhaquirks.const import ( 19 DEVICE_TYPE, 20 ENDPOINTS, 21 INPUT_CLUSTERS, 22 MODELS_INFO, 23 OUTPUT_CLUSTERS, 24 PROFILE_ID, 25 ) 26 27 28 class LidlCCTColorCluster(CustomCluster, Color): 29 """Lidl CCT Lighting custom cluster.""" 30 31 # Remove RGB color wheel for CCT Lighting: only expose color temperature 32 # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant) 33 _CONSTANT_ATTRIBUTES = {0x400A: 16} 34 35 36 class CCTLight(CustomDevice): 37 """Lidl CCT Lighting device.""" 38 39 signature = { 40 MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")], 41 ENDPOINTS: { 42 1: { 43 # <SimpleDescriptor endpoint=1 profile=260 device_type=268 44 # device_version=1 45 # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096] 46 # output_clusters=[10, 25] 47 PROFILE_ID: zha.PROFILE_ID, 48 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT, 49 INPUT_CLUSTERS: [ 50 Basic.cluster_id, 51 Identify.cluster_id, 52 Groups.cluster_id, 53 Scenes.cluster_id, 54 OnOff.cluster_id, 55 LevelControl.cluster_id, 56 Color.cluster_id, 57 LightLink.cluster_id, 58 ], 59 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id], 60 }, 61 242: { 62 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97 63 # device_version=0 64 # input_clusters=[] 65 # output_clusters=[33] 66 PROFILE_ID: 41440, 67 DEVICE_TYPE: 97, 68 INPUT_CLUSTERS: [], 69 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id], 70 }, 71 }, 72 } 73 74 replacement = { 75 ENDPOINTS: { 76 1: { 77 PROFILE_ID: zha.PROFILE_ID, 78 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT, 79 INPUT_CLUSTERS: [ 80 Basic.cluster_id, 81 Identify.cluster_id, 82 Groups.cluster_id, 83 Scenes.cluster_id, 84 OnOff.cluster_id, 85 LevelControl.cluster_id, 86 LidlCCTColorCluster, 87 LightLink.cluster_id, 88 ], 89 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id], 90 }, 91 242: { 92 PROFILE_ID: 41440, 93 DEVICE_TYPE: 97, 94 INPUT_CLUSTERS: [], 95 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id], 96 }, 97 } 98 } 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py --- a/zhaquirks/lidl/cct.py +++ b/zhaquirks/lidl/cct.py @@ -37,7 +37,12 @@ """Lidl CCT Lighting device.""" signature = { - MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")], + MODELS_INFO: [ + ("_TZ3000_49qchf10", "TS0502A"), + ("_TZ3000_oborybow", "TS0502A"), + ("_TZ3000_9evm3otq", "TS0502A"), + ("_TZ3000_rylaozuc", "TS0502A"), + ], ENDPOINTS: { 1: { # <SimpleDescriptor endpoint=1 profile=260 device_type=268
{"golden_diff": "diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py\n--- a/zhaquirks/lidl/cct.py\n+++ b/zhaquirks/lidl/cct.py\n@@ -37,7 +37,12 @@\n \"\"\"Lidl CCT Lighting device.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n+ MODELS_INFO: [\n+ (\"_TZ3000_49qchf10\", \"TS0502A\"),\n+ (\"_TZ3000_oborybow\", \"TS0502A\"),\n+ (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n+ (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n+ ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n", "issue": "[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)\n[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)\r\n\r\n**Describe the solution you'd like**\r\n- [x] power control\r\n- [x] brightness control\r\n- [x] CCT control\r\n- [x] remove color control\r\n\r\n![grafik](https://user-images.githubusercontent.com/21227087/101215714-9778fd00-367e-11eb-957c-a1f86d50946e.png)\r\n\r\nMaybee because the board is also color capable\r\n![IMG_20201204_123040](https://user-images.githubusercontent.com/21227087/101216036-1e2dda00-367f-11eb-9663-c2623fec6553.jpg)\r\n\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)\r\n\r\n\r\n**Additional context**\r\nhttps://zigbee.blakadder.com/Tuya_TS0502A.html\r\nTouchlink resetable\n", "before_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}], "after_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}]}
1,563
260
gh_patches_debug_30584
rasdani/github-patches
git_diff
opendatacube__datacube-core-1469
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- python 3.8/3.9 doesn't support entry_points with either group or select As title. https://github.com/opendatacube/datacube-core/blob/5fed4a688c85c198c5d25f2570b0c92ef91dfc04/datacube/drivers/driver_cache.py#L50 Ref: https://docs.python.org/3/library/importlib.metadata.html >Compatibility Note: The “selectable” entry points were introduced in importlib_metadata 3.6 and Python 3.10. Prior to those changes, entry_points accepted no parameters and always returned a dictionary of entry points, keyed by group. For compatibility, if no parameters are passed to entry_points, a SelectableGroups object is returned, implementing that dict interface. In the future, calling entry_points with no parameters will return an EntryPoints object. Users should rely on the selection interface to retrieve entry points by group. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `datacube/drivers/driver_cache.py` Content: ``` 1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information 2 # 3 # Copyright (c) 2015-2023 ODC Contributors 4 # SPDX-License-Identifier: Apache-2.0 5 import logging 6 from typing import Dict, Any, Tuple, Iterable 7 8 _LOG = logging.getLogger(__name__) 9 10 11 def load_drivers(group: str) -> Dict[str, Any]: 12 """ 13 Load available drivers for a given group name. 14 15 Gracefully handles: 16 17 - Driver module not able to be imported 18 - Driver init function throwing an exception or returning None 19 20 By having driver entry_points pointing to a function, we defer loading the driver 21 module or running any code until required. 22 23 :param group: Name of the entry point group e.g. "datacube.plugins.io.read" 24 25 :returns: Dictionary String -> Driver Object 26 """ 27 28 def safe_load(ep): 29 # pylint: disable=broad-except,bare-except 30 try: 31 driver_init = ep.load() 32 except Exception as e: 33 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name) 34 _LOG.warning('Error was: %s', repr(e)) 35 return None 36 37 try: 38 driver = driver_init() 39 except Exception: 40 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name) 41 return None 42 43 if driver is None: 44 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name) 45 46 return driver 47 48 def resolve_all(group: str) -> Iterable[Tuple[str, Any]]: 49 from importlib.metadata import entry_points 50 for ep in entry_points(group=group): 51 driver = safe_load(ep) 52 if driver is not None: 53 yield (ep.name, driver) 54 55 return dict((name, driver) for name, driver in resolve_all(group)) 56 ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 tests_require = [ 6 'hypothesis', 7 'pycodestyle', 8 'pylint', 9 'pytest', 10 'pytest-cov', 11 'pytest-timeout', 12 'pytest-httpserver', 13 'moto', 14 ] 15 doc_require = [ 16 'Sphinx', 17 'sphinx_autodoc_typehints', # Propagate mypy info into docs 18 'sphinx-click', 19 'recommonmark', 20 'autodocsumm', 21 'beautifulsoup4', 22 'nbsphinx', 23 'pydata-sphinx-theme==0.9.0', 24 ] 25 26 extras_require = { 27 'performance': ['ciso8601', 'bottleneck'], 28 'distributed': ['distributed', 'dask[distributed]'], 29 'doc': doc_require, 30 's3': ['boto3', 'botocore'], 31 'test': tests_require, 32 'cf': ['compliance-checker>=4.0.0'], 33 } 34 35 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [ 36 'test', 37 'doc', 38 'performance', 39 's3', 40 'distributed', 41 ]], []))) 42 43 # An 'all' option, following ipython naming conventions. 44 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 45 46 extra_plugins = dict(read=[], write=[], index=[]) 47 48 setup( 49 name='datacube', 50 python_requires='>=3.8.0', 51 52 url='https://github.com/opendatacube/datacube-core', 53 author='Open Data Cube', 54 maintainer='Open Data Cube', 55 maintainer_email='', 56 description='An analysis environment for satellite and other earth observation data', 57 long_description=open('README.rst').read(), 58 long_description_content_type='text/x-rst', 59 license='Apache License 2.0', 60 classifiers=[ 61 "Development Status :: 4 - Beta", 62 "Intended Audience :: Developers", 63 "Intended Audience :: Science/Research", 64 "License :: OSI Approved :: Apache Software License", 65 "Natural Language :: English", 66 "Operating System :: MacOS :: MacOS X", 67 "Operating System :: POSIX", 68 "Operating System :: POSIX :: BSD", 69 "Operating System :: POSIX :: Linux", 70 "Operating System :: Microsoft :: Windows", 71 "Programming Language :: Python", 72 "Programming Language :: Python :: 3", 73 "Programming Language :: Python :: 3.8", 74 "Programming Language :: Python :: 3.9", 75 "Programming Language :: Python :: 3.10", 76 "Topic :: Scientific/Engineering :: GIS", 77 "Topic :: Scientific/Engineering :: Information Analysis", 78 ], 79 80 packages=find_packages( 81 exclude=('tests', 'tests.*', 82 'integration_tests', 'integration_tests.*') 83 ), 84 package_data={ 85 '': ['*.yaml', '*/*.yaml'], 86 'datacube': ['py.typed'], 87 }, 88 scripts=[], 89 install_requires=[ 90 'affine', 91 'attrs>=18.1', 92 'pyproj>=2.5', 93 'shapely>=2.0', 94 'cachetools', 95 'click>=5.0', 96 'cloudpickle>=0.4', 97 'dask[array]', 98 'distributed', 99 'jsonschema', 100 'netcdf4', 101 'numpy', 102 'psycopg2', 103 'lark', 104 'pandas', 105 'python-dateutil', 106 'pyyaml', 107 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1 108 'ruamel.yaml', 109 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4. SqlAlchemy2 *may* work but has not been tested yet. 110 'GeoAlchemy2', 111 'toolz', 112 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 113 'packaging', 114 'deprecat', 115 ], 116 extras_require=extras_require, 117 tests_require=tests_require, 118 119 entry_points={ 120 'console_scripts': [ 121 'datacube = datacube.scripts.cli_app:cli', 122 'datacube-search = datacube.scripts.search_tool:cli', 123 'datacube-worker = datacube.execution.worker:main', 124 ], 125 'datacube.plugins.io.read': [ 126 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 127 *extra_plugins['read'], 128 ], 129 'datacube.plugins.io.write': [ 130 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 131 *extra_plugins['write'], 132 ], 133 'datacube.plugins.index': [ 134 'default = datacube.index.postgres.index:index_driver_init', 135 'null = datacube.index.null.index:index_driver_init', 136 'memory = datacube.index.memory.index:index_driver_init', 137 'postgis = datacube.index.postgis.index:index_driver_init', 138 *extra_plugins['index'], 139 ], 140 }, 141 ) 142 ``` Path: `docs/click_utils.py` Content: ``` 1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information 2 # 3 # Copyright (c) 2015-2023 ODC Contributors 4 # SPDX-License-Identifier: Apache-2.0 5 from importlib.metadata import entry_points 6 from docutils.nodes import literal_block, section, title, make_id 7 from sphinx.domains import Domain 8 from docutils.parsers.rst import Directive 9 import importlib 10 11 import click 12 13 14 class ClickHelpDirective(Directive): 15 has_content = True 16 required_arguments = 1 17 18 def run(self): 19 root_cmd = self.arguments[0] 20 21 env = self.state.document.settings.env 22 23 group = find_script_callable_from_env(root_cmd, env) 24 25 return [generate_help_text(group, [root_cmd])] 26 27 28 def find_script_callable_from_env(name, env): 29 commands = env.config.click_utils_commands 30 31 module, function_name = commands[name].split(':') 32 module = importlib.import_module(module) 33 return getattr(module, function_name) 34 35 36 def find_script_callable(name): 37 return list(entry_points( 38 group='console_scripts', name=name))[0].load() 39 40 41 def generate_help_text(command, prefix): 42 ctx = click.Context(command) 43 help_opts = command.get_help_option(ctx).opts 44 full_cmd = ' '.join(prefix) 45 block = section(None, 46 title(None, full_cmd), 47 ids=[make_id(full_cmd)], names=[full_cmd]) 48 if help_opts: 49 h = "$ {} {}\n".format(full_cmd, help_opts[0]) + command.get_help(ctx) 50 block.append(literal_block(None, h, language='console')) 51 52 if isinstance(command, click.core.MultiCommand): 53 for c in command.list_commands(ctx): 54 c = command.resolve_command(ctx, [c])[1] 55 block.append(generate_help_text(c, prefix+[c.name])) 56 57 return block 58 59 60 def make_block(command, opt, content): 61 h = "$ {} {}\n".format(command, opt) + content 62 return section(None, 63 title(None, command), 64 literal_block(None, h, language='console'), 65 ids=[make_id(command)], names=[command]) 66 67 68 class DatacubeDomain(Domain): 69 name = 'datacube' 70 label = 'Data Cube' 71 directives = { 72 'click-help': ClickHelpDirective, 73 } 74 75 76 def setup(app): 77 app.add_config_value('click_utils_commands', {}, 'html') 78 79 app.add_domain(DatacubeDomain) 80 return { 81 'parallel_read_safe': False, 82 'parallel_write_safe': False, 83 } 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py --- a/datacube/drivers/driver_cache.py +++ b/datacube/drivers/driver_cache.py @@ -46,7 +46,10 @@ return driver def resolve_all(group: str) -> Iterable[Tuple[str, Any]]: - from importlib.metadata import entry_points + try: + from importlib_metadata import entry_points + except ModuleNotFoundError: + from importlib.metadata import entry_points for ep in entry_points(group=group): driver = safe_load(ep) if driver is not None: diff --git a/docs/click_utils.py b/docs/click_utils.py --- a/docs/click_utils.py +++ b/docs/click_utils.py @@ -2,7 +2,6 @@ # # Copyright (c) 2015-2023 ODC Contributors # SPDX-License-Identifier: Apache-2.0 -from importlib.metadata import entry_points from docutils.nodes import literal_block, section, title, make_id from sphinx.domains import Domain from docutils.parsers.rst import Directive @@ -34,6 +33,10 @@ def find_script_callable(name): + try: + from importlib_metadata import entry_points + except ModuleNotFoundError: + from importlib.metadata import entry_points return list(entry_points( group='console_scripts', name=name))[0].load() diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -112,6 +112,7 @@ 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 'packaging', 'deprecat', + 'importlib_metadata>3.5;python_version<"3.10"', ], extras_require=extras_require, tests_require=tests_require,
{"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -46,7 +46,10 @@\n return driver\n \n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n- from importlib.metadata import entry_points\n+ try:\n+ from importlib_metadata import entry_points\n+ except ModuleNotFoundError:\n+ from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\ndiff --git a/docs/click_utils.py b/docs/click_utils.py\n--- a/docs/click_utils.py\n+++ b/docs/click_utils.py\n@@ -2,7 +2,6 @@\n #\n # Copyright (c) 2015-2023 ODC Contributors\n # SPDX-License-Identifier: Apache-2.0\n-from importlib.metadata import entry_points\n from docutils.nodes import literal_block, section, title, make_id\n from sphinx.domains import Domain\n from docutils.parsers.rst import Directive\n@@ -34,6 +33,10 @@\n \n \n def find_script_callable(name):\n+ try:\n+ from importlib_metadata import entry_points\n+ except ModuleNotFoundError:\n+ from importlib.metadata import entry_points\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -112,6 +112,7 @@\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n 'deprecat',\n+ 'importlib_metadata>3.5;python_version<\"3.10\"',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n", "issue": "python 3.8/3.9 doesn't support entry_points with either group or select\nAs title.\r\n\r\nhttps://github.com/opendatacube/datacube-core/blob/5fed4a688c85c198c5d25f2570b0c92ef91dfc04/datacube/drivers/driver_cache.py#L50\r\n\r\nRef: https://docs.python.org/3/library/importlib.metadata.html\r\n>Compatibility Note: The \u201cselectable\u201d entry points were introduced in importlib_metadata 3.6 and Python 3.10. Prior to those changes, entry_points accepted no parameters and always returned a dictionary of entry points, keyed by group. For compatibility, if no parameters are passed to entry_points, a SelectableGroups object is returned, implementing that dict interface. In the future, calling entry_points with no parameters will return an EntryPoints object. Users should rely on the selection interface to retrieve entry points by group.\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'autodocsumm',\n 'beautifulsoup4',\n 'nbsphinx',\n 'pydata-sphinx-theme==0.9.0',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'attrs>=18.1',\n 'pyproj>=2.5',\n 'shapely>=2.0',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'ruamel.yaml',\n 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4. SqlAlchemy2 *may* work but has not been tested yet.\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n 'deprecat',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}, {"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nfrom importlib.metadata import entry_points\nfrom docutils.nodes import literal_block, section, title, make_id\nfrom sphinx.domains import Domain\nfrom docutils.parsers.rst import Directive\nimport importlib\n\nimport click\n\n\nclass ClickHelpDirective(Directive):\n has_content = True\n required_arguments = 1\n\n def run(self):\n root_cmd = self.arguments[0]\n\n env = self.state.document.settings.env\n\n group = find_script_callable_from_env(root_cmd, env)\n\n return [generate_help_text(group, [root_cmd])]\n\n\ndef find_script_callable_from_env(name, env):\n commands = env.config.click_utils_commands\n\n module, function_name = commands[name].split(':')\n module = importlib.import_module(module)\n return getattr(module, function_name)\n\n\ndef find_script_callable(name):\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n\n\ndef generate_help_text(command, prefix):\n ctx = click.Context(command)\n help_opts = command.get_help_option(ctx).opts\n full_cmd = ' '.join(prefix)\n block = section(None,\n title(None, full_cmd),\n ids=[make_id(full_cmd)], names=[full_cmd])\n if help_opts:\n h = \"$ {} {}\\n\".format(full_cmd, help_opts[0]) + command.get_help(ctx)\n block.append(literal_block(None, h, language='console'))\n\n if isinstance(command, click.core.MultiCommand):\n for c in command.list_commands(ctx):\n c = command.resolve_command(ctx, [c])[1]\n block.append(generate_help_text(c, prefix+[c.name]))\n\n return block\n\n\ndef make_block(command, opt, content):\n h = \"$ {} {}\\n\".format(command, opt) + content\n return section(None,\n title(None, command),\n literal_block(None, h, language='console'),\n ids=[make_id(command)], names=[command])\n\n\nclass DatacubeDomain(Domain):\n name = 'datacube'\n label = 'Data Cube'\n directives = {\n 'click-help': ClickHelpDirective,\n }\n\n\ndef setup(app):\n app.add_config_value('click_utils_commands', {}, 'html')\n\n app.add_domain(DatacubeDomain)\n return {\n 'parallel_read_safe': False,\n 'parallel_write_safe': False,\n }\n", "path": "docs/click_utils.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n try:\n from importlib_metadata import entry_points\n except ModuleNotFoundError:\n from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'autodocsumm',\n 'beautifulsoup4',\n 'nbsphinx',\n 'pydata-sphinx-theme==0.9.0',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'attrs>=18.1',\n 'pyproj>=2.5',\n 'shapely>=2.0',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'ruamel.yaml',\n 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4. SqlAlchemy2 *may* work but has not been tested yet.\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n 'deprecat',\n 'importlib_metadata>3.5;python_version<\"3.10\"',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}, {"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nfrom docutils.nodes import literal_block, section, title, make_id\nfrom sphinx.domains import Domain\nfrom docutils.parsers.rst import Directive\nimport importlib\n\nimport click\n\n\nclass ClickHelpDirective(Directive):\n has_content = True\n required_arguments = 1\n\n def run(self):\n root_cmd = self.arguments[0]\n\n env = self.state.document.settings.env\n\n group = find_script_callable_from_env(root_cmd, env)\n\n return [generate_help_text(group, [root_cmd])]\n\n\ndef find_script_callable_from_env(name, env):\n commands = env.config.click_utils_commands\n\n module, function_name = commands[name].split(':')\n module = importlib.import_module(module)\n return getattr(module, function_name)\n\n\ndef find_script_callable(name):\n try:\n from importlib_metadata import entry_points\n except ModuleNotFoundError:\n from importlib.metadata import entry_points\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n\n\ndef generate_help_text(command, prefix):\n ctx = click.Context(command)\n help_opts = command.get_help_option(ctx).opts\n full_cmd = ' '.join(prefix)\n block = section(None,\n title(None, full_cmd),\n ids=[make_id(full_cmd)], names=[full_cmd])\n if help_opts:\n h = \"$ {} {}\\n\".format(full_cmd, help_opts[0]) + command.get_help(ctx)\n block.append(literal_block(None, h, language='console'))\n\n if isinstance(command, click.core.MultiCommand):\n for c in command.list_commands(ctx):\n c = command.resolve_command(ctx, [c])[1]\n block.append(generate_help_text(c, prefix+[c.name]))\n\n return block\n\n\ndef make_block(command, opt, content):\n h = \"$ {} {}\\n\".format(command, opt) + content\n return section(None,\n title(None, command),\n literal_block(None, h, language='console'),\n ids=[make_id(command)], names=[command])\n\n\nclass DatacubeDomain(Domain):\n name = 'datacube'\n label = 'Data Cube'\n directives = {\n 'click-help': ClickHelpDirective,\n }\n\n\ndef setup(app):\n app.add_config_value('click_utils_commands', {}, 'html')\n\n app.add_domain(DatacubeDomain)\n return {\n 'parallel_read_safe': False,\n 'parallel_write_safe': False,\n }\n", "path": "docs/click_utils.py"}]}
3,157
427
gh_patches_debug_57081
rasdani/github-patches
git_diff
SeldonIO__MLServer-945
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MLServer is incompatible with latest release of FastAPI MLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis. ``` 2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop 2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'. Traceback (most recent call last): File "/home/cc/miniconda3/envs/central-1/bin/mlserver", line 8, in <module> sys.exit(main()) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 79, in main root() File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1657, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 20, in wrapper return asyncio.run(f(*args, **kwargs)) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 43, in start server = MLServer(settings) File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py", line 71, in __init__ self._rest_server = RESTServer( File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py", line 26, in __init__ self._app = create_app( File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py", line 43, in create_app APIRoute( File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py", line 400, in __init__ self.response_field = create_response_field( File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py", line 90, in create_response_field raise fastapi.exceptions.FastAPIError( fastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 3 from typing import Dict 4 from setuptools import setup, find_packages 5 6 ROOT_PATH = os.path.dirname(__file__) 7 PKG_NAME = "mlserver" 8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME) 9 10 11 def _load_version() -> str: 12 version = "" 13 version_path = os.path.join(PKG_PATH, "version.py") 14 with open(version_path) as fp: 15 version_module: Dict[str, str] = {} 16 exec(fp.read(), version_module) 17 version = version_module["__version__"] 18 19 return version 20 21 22 def _load_description() -> str: 23 readme_path = os.path.join(ROOT_PATH, "README.md") 24 with open(readme_path) as fp: 25 return fp.read() 26 27 28 env_marker_cpython = ( 29 "sys_platform != 'win32'" 30 " and (sys_platform != 'cygwin'" 31 " and platform_python_implementation != 'PyPy')" 32 ) 33 34 setup( 35 name=PKG_NAME, 36 version=_load_version(), 37 url="https://github.com/SeldonIO/MLServer.git", 38 author="Seldon Technologies Ltd.", 39 author_email="[email protected]", 40 description="ML server", 41 packages=find_packages(exclude=["tests", "tests.*"]), 42 install_requires=[ 43 "click", 44 "fastapi<=0.88.0", 45 "python-dotenv", 46 "grpcio", 47 "importlib-metadata;python_version<'3.8'", 48 "numpy", 49 "pandas", 50 "protobuf", 51 "uvicorn", 52 "starlette_exporter", 53 "py-grpc-prometheus", 54 "uvloop;" + env_marker_cpython, 55 "aiokafka", 56 "tritonclient[http]>=2.24", 57 "aiofiles", 58 "orjson", 59 ], 60 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]}, 61 long_description=_load_description(), 62 long_description_content_type="text/markdown", 63 license="Apache 2.0", 64 ) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -41,7 +41,8 @@ packages=find_packages(exclude=["tests", "tests.*"]), install_requires=[ "click", - "fastapi<=0.88.0", + # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861 + "fastapi<=0.89.1, !=0.89.0", "python-dotenv", "grpcio", "importlib-metadata;python_version<'3.8'",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,8 @@\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n- \"fastapi<=0.88.0\",\n+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n+ \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n", "issue": "MLServer is incompatible with latest release of FastAPI\nMLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.\r\n```\r\n2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop\r\n2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.\r\nTraceback (most recent call last):\r\n File \"/home/cc/miniconda3/envs/central-1/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 79, in main\r\n root()\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 20, in wrapper\r\n return asyncio.run(f(*args, **kwargs))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"uvloop/loop.pyx\", line 1517, in uvloop.loop.Loop.run_until_complete\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 43, in start\r\n server = MLServer(settings)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py\", line 71, in __init__\r\n self._rest_server = RESTServer(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py\", line 26, in __init__\r\n self._app = create_app(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py\", line 43, in create_app\r\n APIRoute(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py\", line 400, in __init__\r\n self.response_field = create_response_field(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py\", line 90, in create_response_field\r\n raise fastapi.exceptions.FastAPIError(\r\nfastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n \"fastapi<=0.88.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]}
1,737
138
gh_patches_debug_22708
rasdani/github-patches
git_diff
Project-MONAI__MONAI-6639
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- np.product is deprecated. Use np.prod instead. **Describe the bug** ``` monai/networks/nets/regressor.py:146:32: error: Module has no attribute "product" [attr-defined] monai/networks/nets/varautoencoder.py:123:27: error: Module has no attribute "product" [attr-defined] ``` np.product is deprecated. Use np.prod instead. https://github.com/numpy/numpy/pull/23314 np.product is deprecated. Use np.prod instead. **Describe the bug** ``` monai/networks/nets/regressor.py:146:32: error: Module has no attribute "product" [attr-defined] monai/networks/nets/varautoencoder.py:123:27: error: Module has no attribute "product" [attr-defined] ``` np.product is deprecated. Use np.prod instead. https://github.com/numpy/numpy/pull/23314 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `monai/networks/nets/varautoencoder.py` Content: ``` 1 # Copyright (c) MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from __future__ import annotations 13 14 from collections.abc import Sequence 15 16 import numpy as np 17 import torch 18 import torch.nn as nn 19 from torch.nn import functional as F 20 21 from monai.networks.layers.convutils import calculate_out_shape, same_padding 22 from monai.networks.layers.factories import Act, Norm 23 from monai.networks.nets import AutoEncoder 24 25 __all__ = ["VarAutoEncoder"] 26 27 28 class VarAutoEncoder(AutoEncoder): 29 """ 30 Variational Autoencoder based on the paper - https://arxiv.org/abs/1312.6114 31 32 Args: 33 spatial_dims: number of spatial dimensions. 34 in_shape: shape of input data starting with channel dimension. 35 out_channels: number of output channels. 36 latent_size: size of the latent variable. 37 channels: sequence of channels. Top block first. The length of `channels` should be no less than 2. 38 strides: sequence of convolution strides. The length of `stride` should equal to `len(channels) - 1`. 39 kernel_size: convolution kernel size, the value(s) should be odd. If sequence, 40 its length should equal to dimensions. Defaults to 3. 41 up_kernel_size: upsampling convolution kernel size, the value(s) should be odd. If sequence, 42 its length should equal to dimensions. Defaults to 3. 43 num_res_units: number of residual units. Defaults to 0. 44 inter_channels: sequence of channels defining the blocks in the intermediate layer between encode and decode. 45 inter_dilations: defines the dilation value for each block of the intermediate layer. Defaults to 1. 46 num_inter_units: number of residual units for each block of the intermediate layer. Defaults to 0. 47 act: activation type and arguments. Defaults to PReLU. 48 norm: feature normalization type and arguments. Defaults to instance norm. 49 dropout: dropout ratio. Defaults to no dropout. 50 bias: whether to have a bias term in convolution blocks. Defaults to True. 51 According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_, 52 if a conv layer is directly followed by a batch norm layer, bias should be False. 53 use_sigmoid: whether to use the sigmoid function on final output. Defaults to True. 54 55 Examples:: 56 57 from monai.networks.nets import VarAutoEncoder 58 59 # 3 layer network accepting images with dimensions (1, 32, 32) and using a latent vector with 2 values 60 model = VarAutoEncoder( 61 spatial_dims=2, 62 in_shape=(32, 32), # image spatial shape 63 out_channels=1, 64 latent_size=2, 65 channels=(16, 32, 64), 66 strides=(1, 2, 2), 67 ) 68 69 see also: 70 - Variational autoencoder network with MedNIST Dataset 71 https://github.com/Project-MONAI/tutorials/blob/master/modules/varautoencoder_mednist.ipynb 72 """ 73 74 def __init__( 75 self, 76 spatial_dims: int, 77 in_shape: Sequence[int], 78 out_channels: int, 79 latent_size: int, 80 channels: Sequence[int], 81 strides: Sequence[int], 82 kernel_size: Sequence[int] | int = 3, 83 up_kernel_size: Sequence[int] | int = 3, 84 num_res_units: int = 0, 85 inter_channels: list | None = None, 86 inter_dilations: list | None = None, 87 num_inter_units: int = 2, 88 act: tuple | str | None = Act.PRELU, 89 norm: tuple | str = Norm.INSTANCE, 90 dropout: tuple | str | float | None = None, 91 bias: bool = True, 92 use_sigmoid: bool = True, 93 ) -> None: 94 self.in_channels, *self.in_shape = in_shape 95 self.use_sigmoid = use_sigmoid 96 97 self.latent_size = latent_size 98 self.final_size = np.asarray(self.in_shape, dtype=int) 99 100 super().__init__( 101 spatial_dims, 102 self.in_channels, 103 out_channels, 104 channels, 105 strides, 106 kernel_size, 107 up_kernel_size, 108 num_res_units, 109 inter_channels, 110 inter_dilations, 111 num_inter_units, 112 act, 113 norm, 114 dropout, 115 bias, 116 ) 117 118 padding = same_padding(self.kernel_size) 119 120 for s in strides: 121 self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore 122 123 linear_size = int(np.product(self.final_size)) * self.encoded_channels 124 self.mu = nn.Linear(linear_size, self.latent_size) 125 self.logvar = nn.Linear(linear_size, self.latent_size) 126 self.decodeL = nn.Linear(self.latent_size, linear_size) 127 128 def encode_forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: 129 x = self.encode(x) 130 x = self.intermediate(x) 131 x = x.view(x.shape[0], -1) 132 mu = self.mu(x) 133 logvar = self.logvar(x) 134 return mu, logvar 135 136 def decode_forward(self, z: torch.Tensor, use_sigmoid: bool = True) -> torch.Tensor: 137 x = F.relu(self.decodeL(z)) 138 x = x.view(x.shape[0], self.channels[-1], *self.final_size) 139 x = self.decode(x) 140 if use_sigmoid: 141 x = torch.sigmoid(x) 142 return x 143 144 def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor: 145 std = torch.exp(0.5 * logvar) 146 147 if self.training: # multiply random noise with std only during training 148 std = torch.randn_like(std).mul(std) 149 150 return std.add_(mu) 151 152 def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: 153 mu, logvar = self.encode_forward(x) 154 z = self.reparameterize(mu, logvar) 155 return self.decode_forward(z, self.use_sigmoid), mu, logvar, z 156 ``` Path: `monai/networks/nets/regressor.py` Content: ``` 1 # Copyright (c) MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from __future__ import annotations 13 14 from collections.abc import Sequence 15 16 import numpy as np 17 import torch 18 import torch.nn as nn 19 20 from monai.networks.blocks import Convolution, ResidualUnit 21 from monai.networks.layers.convutils import calculate_out_shape, same_padding 22 from monai.networks.layers.factories import Act, Norm 23 from monai.networks.layers.simplelayers import Reshape 24 from monai.utils import ensure_tuple, ensure_tuple_rep 25 26 __all__ = ["Regressor"] 27 28 29 class Regressor(nn.Module): 30 """ 31 This defines a network for relating large-sized input tensors to small output tensors, ie. regressing large 32 values to a prediction. An output of a single dimension can be used as value regression or multi-label 33 classification prediction, an output of a single value can be used as a discriminator or critic prediction. 34 35 The network is constructed as a sequence of layers, either :py:class:`monai.networks.blocks.Convolution` or 36 :py:class:`monai.networks.blocks.ResidualUnit`, with a final fully-connected layer resizing the output from the 37 blocks to the final size. Each block is defined with a stride value typically used to downsample the input using 38 strided convolutions. In this way each block progressively condenses information from the input into a deep 39 representation the final fully-connected layer relates to a final result. 40 41 Args: 42 in_shape: tuple of integers stating the dimension of the input tensor (minus batch dimension) 43 out_shape: tuple of integers stating the dimension of the final output tensor (minus batch dimension) 44 channels: tuple of integers stating the output channels of each convolutional layer 45 strides: tuple of integers stating the stride (downscale factor) of each convolutional layer 46 kernel_size: integer or tuple of integers stating size of convolutional kernels 47 num_res_units: integer stating number of convolutions in residual units, 0 means no residual units 48 act: name or type defining activation layers 49 norm: name or type defining normalization layers 50 dropout: optional float value in range [0, 1] stating dropout probability for layers, None for no dropout 51 bias: boolean stating if convolution layers should have a bias component 52 53 Examples:: 54 55 # infers a 2-value result (eg. a 2D cartesian coordinate) from a 64x64 image 56 net = Regressor((1, 64, 64), (2,), (2, 4, 8), (2, 2, 2)) 57 58 """ 59 60 def __init__( 61 self, 62 in_shape: Sequence[int], 63 out_shape: Sequence[int], 64 channels: Sequence[int], 65 strides: Sequence[int], 66 kernel_size: Sequence[int] | int = 3, 67 num_res_units: int = 2, 68 act=Act.PRELU, 69 norm=Norm.INSTANCE, 70 dropout: float | None = None, 71 bias: bool = True, 72 ) -> None: 73 super().__init__() 74 75 self.in_channels, *self.in_shape = ensure_tuple(in_shape) 76 self.dimensions = len(self.in_shape) 77 self.channels = ensure_tuple(channels) 78 self.strides = ensure_tuple(strides) 79 self.out_shape = ensure_tuple(out_shape) 80 self.kernel_size = ensure_tuple_rep(kernel_size, self.dimensions) 81 self.num_res_units = num_res_units 82 self.act = act 83 self.norm = norm 84 self.dropout = dropout 85 self.bias = bias 86 self.net = nn.Sequential() 87 88 echannel = self.in_channels 89 90 padding = same_padding(kernel_size) 91 92 self.final_size = np.asarray(self.in_shape, dtype=int) 93 self.reshape = Reshape(*self.out_shape) 94 95 # encode stage 96 for i, (c, s) in enumerate(zip(self.channels, self.strides)): 97 layer = self._get_layer(echannel, c, s, i == len(channels) - 1) 98 echannel = c # use the output channel number as the input for the next loop 99 self.net.add_module("layer_%i" % i, layer) 100 self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) # type: ignore 101 102 self.final = self._get_final_layer((echannel,) + self.final_size) 103 104 def _get_layer( 105 self, in_channels: int, out_channels: int, strides: int, is_last: bool 106 ) -> ResidualUnit | Convolution: 107 """ 108 Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels` 109 number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last` 110 is True this is the final layer and is not expected to include activation and normalization layers. 111 """ 112 113 layer: ResidualUnit | Convolution 114 115 if self.num_res_units > 0: 116 layer = ResidualUnit( 117 subunits=self.num_res_units, 118 last_conv_only=is_last, 119 spatial_dims=self.dimensions, 120 in_channels=in_channels, 121 out_channels=out_channels, 122 strides=strides, 123 kernel_size=self.kernel_size, 124 act=self.act, 125 norm=self.norm, 126 dropout=self.dropout, 127 bias=self.bias, 128 ) 129 else: 130 layer = Convolution( 131 conv_only=is_last, 132 spatial_dims=self.dimensions, 133 in_channels=in_channels, 134 out_channels=out_channels, 135 strides=strides, 136 kernel_size=self.kernel_size, 137 act=self.act, 138 norm=self.norm, 139 dropout=self.dropout, 140 bias=self.bias, 141 ) 142 143 return layer 144 145 def _get_final_layer(self, in_shape: Sequence[int]): 146 linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape))) 147 return nn.Sequential(nn.Flatten(), linear) 148 149 def forward(self, x: torch.Tensor) -> torch.Tensor: 150 x = self.net(x) 151 x = self.final(x) 152 x = self.reshape(x) 153 return x 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/monai/networks/nets/regressor.py b/monai/networks/nets/regressor.py --- a/monai/networks/nets/regressor.py +++ b/monai/networks/nets/regressor.py @@ -143,7 +143,7 @@ return layer def _get_final_layer(self, in_shape: Sequence[int]): - linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape))) + linear = nn.Linear(int(np.prod(in_shape)), int(np.prod(self.out_shape))) return nn.Sequential(nn.Flatten(), linear) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/monai/networks/nets/varautoencoder.py b/monai/networks/nets/varautoencoder.py --- a/monai/networks/nets/varautoencoder.py +++ b/monai/networks/nets/varautoencoder.py @@ -120,7 +120,7 @@ for s in strides: self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore - linear_size = int(np.product(self.final_size)) * self.encoded_channels + linear_size = int(np.prod(self.final_size)) * self.encoded_channels self.mu = nn.Linear(linear_size, self.latent_size) self.logvar = nn.Linear(linear_size, self.latent_size) self.decodeL = nn.Linear(self.latent_size, linear_size)
{"golden_diff": "diff --git a/monai/networks/nets/regressor.py b/monai/networks/nets/regressor.py\n--- a/monai/networks/nets/regressor.py\n+++ b/monai/networks/nets/regressor.py\n@@ -143,7 +143,7 @@\n return layer\n \n def _get_final_layer(self, in_shape: Sequence[int]):\n- linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape)))\n+ linear = nn.Linear(int(np.prod(in_shape)), int(np.prod(self.out_shape)))\n return nn.Sequential(nn.Flatten(), linear)\n \n def forward(self, x: torch.Tensor) -> torch.Tensor:\ndiff --git a/monai/networks/nets/varautoencoder.py b/monai/networks/nets/varautoencoder.py\n--- a/monai/networks/nets/varautoencoder.py\n+++ b/monai/networks/nets/varautoencoder.py\n@@ -120,7 +120,7 @@\n for s in strides:\n self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore\n \n- linear_size = int(np.product(self.final_size)) * self.encoded_channels\n+ linear_size = int(np.prod(self.final_size)) * self.encoded_channels\n self.mu = nn.Linear(linear_size, self.latent_size)\n self.logvar = nn.Linear(linear_size, self.latent_size)\n self.decodeL = nn.Linear(self.latent_size, linear_size)\n", "issue": "np.product is deprecated. Use np.prod instead.\n**Describe the bug**\r\n```\r\nmonai/networks/nets/regressor.py:146:32: error: Module has no attribute \"product\" [attr-defined]\r\nmonai/networks/nets/varautoencoder.py:123:27: error: Module has no attribute \"product\" [attr-defined]\r\n```\r\nnp.product is deprecated. Use np.prod instead. https://github.com/numpy/numpy/pull/23314\nnp.product is deprecated. Use np.prod instead.\n**Describe the bug**\r\n```\r\nmonai/networks/nets/regressor.py:146:32: error: Module has no attribute \"product\" [attr-defined]\r\nmonai/networks/nets/varautoencoder.py:123:27: error: Module has no attribute \"product\" [attr-defined]\r\n```\r\nnp.product is deprecated. Use np.prod instead. https://github.com/numpy/numpy/pull/23314\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom monai.networks.layers.convutils import calculate_out_shape, same_padding\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.nets import AutoEncoder\n\n__all__ = [\"VarAutoEncoder\"]\n\n\nclass VarAutoEncoder(AutoEncoder):\n \"\"\"\n Variational Autoencoder based on the paper - https://arxiv.org/abs/1312.6114\n\n Args:\n spatial_dims: number of spatial dimensions.\n in_shape: shape of input data starting with channel dimension.\n out_channels: number of output channels.\n latent_size: size of the latent variable.\n channels: sequence of channels. Top block first. The length of `channels` should be no less than 2.\n strides: sequence of convolution strides. The length of `stride` should equal to `len(channels) - 1`.\n kernel_size: convolution kernel size, the value(s) should be odd. If sequence,\n its length should equal to dimensions. Defaults to 3.\n up_kernel_size: upsampling convolution kernel size, the value(s) should be odd. If sequence,\n its length should equal to dimensions. Defaults to 3.\n num_res_units: number of residual units. Defaults to 0.\n inter_channels: sequence of channels defining the blocks in the intermediate layer between encode and decode.\n inter_dilations: defines the dilation value for each block of the intermediate layer. Defaults to 1.\n num_inter_units: number of residual units for each block of the intermediate layer. Defaults to 0.\n act: activation type and arguments. Defaults to PReLU.\n norm: feature normalization type and arguments. Defaults to instance norm.\n dropout: dropout ratio. Defaults to no dropout.\n bias: whether to have a bias term in convolution blocks. Defaults to True.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n use_sigmoid: whether to use the sigmoid function on final output. Defaults to True.\n\n Examples::\n\n from monai.networks.nets import VarAutoEncoder\n\n # 3 layer network accepting images with dimensions (1, 32, 32) and using a latent vector with 2 values\n model = VarAutoEncoder(\n spatial_dims=2,\n in_shape=(32, 32), # image spatial shape\n out_channels=1,\n latent_size=2,\n channels=(16, 32, 64),\n strides=(1, 2, 2),\n )\n\n see also:\n - Variational autoencoder network with MedNIST Dataset\n https://github.com/Project-MONAI/tutorials/blob/master/modules/varautoencoder_mednist.ipynb\n \"\"\"\n\n def __init__(\n self,\n spatial_dims: int,\n in_shape: Sequence[int],\n out_channels: int,\n latent_size: int,\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Sequence[int] | int = 3,\n up_kernel_size: Sequence[int] | int = 3,\n num_res_units: int = 0,\n inter_channels: list | None = None,\n inter_dilations: list | None = None,\n num_inter_units: int = 2,\n act: tuple | str | None = Act.PRELU,\n norm: tuple | str = Norm.INSTANCE,\n dropout: tuple | str | float | None = None,\n bias: bool = True,\n use_sigmoid: bool = True,\n ) -> None:\n self.in_channels, *self.in_shape = in_shape\n self.use_sigmoid = use_sigmoid\n\n self.latent_size = latent_size\n self.final_size = np.asarray(self.in_shape, dtype=int)\n\n super().__init__(\n spatial_dims,\n self.in_channels,\n out_channels,\n channels,\n strides,\n kernel_size,\n up_kernel_size,\n num_res_units,\n inter_channels,\n inter_dilations,\n num_inter_units,\n act,\n norm,\n dropout,\n bias,\n )\n\n padding = same_padding(self.kernel_size)\n\n for s in strides:\n self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore\n\n linear_size = int(np.product(self.final_size)) * self.encoded_channels\n self.mu = nn.Linear(linear_size, self.latent_size)\n self.logvar = nn.Linear(linear_size, self.latent_size)\n self.decodeL = nn.Linear(self.latent_size, linear_size)\n\n def encode_forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n x = self.encode(x)\n x = self.intermediate(x)\n x = x.view(x.shape[0], -1)\n mu = self.mu(x)\n logvar = self.logvar(x)\n return mu, logvar\n\n def decode_forward(self, z: torch.Tensor, use_sigmoid: bool = True) -> torch.Tensor:\n x = F.relu(self.decodeL(z))\n x = x.view(x.shape[0], self.channels[-1], *self.final_size)\n x = self.decode(x)\n if use_sigmoid:\n x = torch.sigmoid(x)\n return x\n\n def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n std = torch.exp(0.5 * logvar)\n\n if self.training: # multiply random noise with std only during training\n std = torch.randn_like(std).mul(std)\n\n return std.add_(mu)\n\n def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n mu, logvar = self.encode_forward(x)\n z = self.reparameterize(mu, logvar)\n return self.decode_forward(z, self.use_sigmoid), mu, logvar, z\n", "path": "monai/networks/nets/varautoencoder.py"}, {"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks import Convolution, ResidualUnit\nfrom monai.networks.layers.convutils import calculate_out_shape, same_padding\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.layers.simplelayers import Reshape\nfrom monai.utils import ensure_tuple, ensure_tuple_rep\n\n__all__ = [\"Regressor\"]\n\n\nclass Regressor(nn.Module):\n \"\"\"\n This defines a network for relating large-sized input tensors to small output tensors, ie. regressing large\n values to a prediction. An output of a single dimension can be used as value regression or multi-label\n classification prediction, an output of a single value can be used as a discriminator or critic prediction.\n\n The network is constructed as a sequence of layers, either :py:class:`monai.networks.blocks.Convolution` or\n :py:class:`monai.networks.blocks.ResidualUnit`, with a final fully-connected layer resizing the output from the\n blocks to the final size. Each block is defined with a stride value typically used to downsample the input using\n strided convolutions. In this way each block progressively condenses information from the input into a deep\n representation the final fully-connected layer relates to a final result.\n\n Args:\n in_shape: tuple of integers stating the dimension of the input tensor (minus batch dimension)\n out_shape: tuple of integers stating the dimension of the final output tensor (minus batch dimension)\n channels: tuple of integers stating the output channels of each convolutional layer\n strides: tuple of integers stating the stride (downscale factor) of each convolutional layer\n kernel_size: integer or tuple of integers stating size of convolutional kernels\n num_res_units: integer stating number of convolutions in residual units, 0 means no residual units\n act: name or type defining activation layers\n norm: name or type defining normalization layers\n dropout: optional float value in range [0, 1] stating dropout probability for layers, None for no dropout\n bias: boolean stating if convolution layers should have a bias component\n\n Examples::\n\n # infers a 2-value result (eg. a 2D cartesian coordinate) from a 64x64 image\n net = Regressor((1, 64, 64), (2,), (2, 4, 8), (2, 2, 2))\n\n \"\"\"\n\n def __init__(\n self,\n in_shape: Sequence[int],\n out_shape: Sequence[int],\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Sequence[int] | int = 3,\n num_res_units: int = 2,\n act=Act.PRELU,\n norm=Norm.INSTANCE,\n dropout: float | None = None,\n bias: bool = True,\n ) -> None:\n super().__init__()\n\n self.in_channels, *self.in_shape = ensure_tuple(in_shape)\n self.dimensions = len(self.in_shape)\n self.channels = ensure_tuple(channels)\n self.strides = ensure_tuple(strides)\n self.out_shape = ensure_tuple(out_shape)\n self.kernel_size = ensure_tuple_rep(kernel_size, self.dimensions)\n self.num_res_units = num_res_units\n self.act = act\n self.norm = norm\n self.dropout = dropout\n self.bias = bias\n self.net = nn.Sequential()\n\n echannel = self.in_channels\n\n padding = same_padding(kernel_size)\n\n self.final_size = np.asarray(self.in_shape, dtype=int)\n self.reshape = Reshape(*self.out_shape)\n\n # encode stage\n for i, (c, s) in enumerate(zip(self.channels, self.strides)):\n layer = self._get_layer(echannel, c, s, i == len(channels) - 1)\n echannel = c # use the output channel number as the input for the next loop\n self.net.add_module(\"layer_%i\" % i, layer)\n self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) # type: ignore\n\n self.final = self._get_final_layer((echannel,) + self.final_size)\n\n def _get_layer(\n self, in_channels: int, out_channels: int, strides: int, is_last: bool\n ) -> ResidualUnit | Convolution:\n \"\"\"\n Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`\n number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last`\n is True this is the final layer and is not expected to include activation and normalization layers.\n \"\"\"\n\n layer: ResidualUnit | Convolution\n\n if self.num_res_units > 0:\n layer = ResidualUnit(\n subunits=self.num_res_units,\n last_conv_only=is_last,\n spatial_dims=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n else:\n layer = Convolution(\n conv_only=is_last,\n spatial_dims=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n\n return layer\n\n def _get_final_layer(self, in_shape: Sequence[int]):\n linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape)))\n return nn.Sequential(nn.Flatten(), linear)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.net(x)\n x = self.final(x)\n x = self.reshape(x)\n return x\n", "path": "monai/networks/nets/regressor.py"}], "after_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom monai.networks.layers.convutils import calculate_out_shape, same_padding\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.nets import AutoEncoder\n\n__all__ = [\"VarAutoEncoder\"]\n\n\nclass VarAutoEncoder(AutoEncoder):\n \"\"\"\n Variational Autoencoder based on the paper - https://arxiv.org/abs/1312.6114\n\n Args:\n spatial_dims: number of spatial dimensions.\n in_shape: shape of input data starting with channel dimension.\n out_channels: number of output channels.\n latent_size: size of the latent variable.\n channels: sequence of channels. Top block first. The length of `channels` should be no less than 2.\n strides: sequence of convolution strides. The length of `stride` should equal to `len(channels) - 1`.\n kernel_size: convolution kernel size, the value(s) should be odd. If sequence,\n its length should equal to dimensions. Defaults to 3.\n up_kernel_size: upsampling convolution kernel size, the value(s) should be odd. If sequence,\n its length should equal to dimensions. Defaults to 3.\n num_res_units: number of residual units. Defaults to 0.\n inter_channels: sequence of channels defining the blocks in the intermediate layer between encode and decode.\n inter_dilations: defines the dilation value for each block of the intermediate layer. Defaults to 1.\n num_inter_units: number of residual units for each block of the intermediate layer. Defaults to 0.\n act: activation type and arguments. Defaults to PReLU.\n norm: feature normalization type and arguments. Defaults to instance norm.\n dropout: dropout ratio. Defaults to no dropout.\n bias: whether to have a bias term in convolution blocks. Defaults to True.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n use_sigmoid: whether to use the sigmoid function on final output. Defaults to True.\n\n Examples::\n\n from monai.networks.nets import VarAutoEncoder\n\n # 3 layer network accepting images with dimensions (1, 32, 32) and using a latent vector with 2 values\n model = VarAutoEncoder(\n spatial_dims=2,\n in_shape=(32, 32), # image spatial shape\n out_channels=1,\n latent_size=2,\n channels=(16, 32, 64),\n strides=(1, 2, 2),\n )\n\n see also:\n - Variational autoencoder network with MedNIST Dataset\n https://github.com/Project-MONAI/tutorials/blob/master/modules/varautoencoder_mednist.ipynb\n \"\"\"\n\n def __init__(\n self,\n spatial_dims: int,\n in_shape: Sequence[int],\n out_channels: int,\n latent_size: int,\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Sequence[int] | int = 3,\n up_kernel_size: Sequence[int] | int = 3,\n num_res_units: int = 0,\n inter_channels: list | None = None,\n inter_dilations: list | None = None,\n num_inter_units: int = 2,\n act: tuple | str | None = Act.PRELU,\n norm: tuple | str = Norm.INSTANCE,\n dropout: tuple | str | float | None = None,\n bias: bool = True,\n use_sigmoid: bool = True,\n ) -> None:\n self.in_channels, *self.in_shape = in_shape\n self.use_sigmoid = use_sigmoid\n\n self.latent_size = latent_size\n self.final_size = np.asarray(self.in_shape, dtype=int)\n\n super().__init__(\n spatial_dims,\n self.in_channels,\n out_channels,\n channels,\n strides,\n kernel_size,\n up_kernel_size,\n num_res_units,\n inter_channels,\n inter_dilations,\n num_inter_units,\n act,\n norm,\n dropout,\n bias,\n )\n\n padding = same_padding(self.kernel_size)\n\n for s in strides:\n self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore\n\n linear_size = int(np.prod(self.final_size)) * self.encoded_channels\n self.mu = nn.Linear(linear_size, self.latent_size)\n self.logvar = nn.Linear(linear_size, self.latent_size)\n self.decodeL = nn.Linear(self.latent_size, linear_size)\n\n def encode_forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n x = self.encode(x)\n x = self.intermediate(x)\n x = x.view(x.shape[0], -1)\n mu = self.mu(x)\n logvar = self.logvar(x)\n return mu, logvar\n\n def decode_forward(self, z: torch.Tensor, use_sigmoid: bool = True) -> torch.Tensor:\n x = F.relu(self.decodeL(z))\n x = x.view(x.shape[0], self.channels[-1], *self.final_size)\n x = self.decode(x)\n if use_sigmoid:\n x = torch.sigmoid(x)\n return x\n\n def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n std = torch.exp(0.5 * logvar)\n\n if self.training: # multiply random noise with std only during training\n std = torch.randn_like(std).mul(std)\n\n return std.add_(mu)\n\n def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n mu, logvar = self.encode_forward(x)\n z = self.reparameterize(mu, logvar)\n return self.decode_forward(z, self.use_sigmoid), mu, logvar, z\n", "path": "monai/networks/nets/varautoencoder.py"}, {"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks import Convolution, ResidualUnit\nfrom monai.networks.layers.convutils import calculate_out_shape, same_padding\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.layers.simplelayers import Reshape\nfrom monai.utils import ensure_tuple, ensure_tuple_rep\n\n__all__ = [\"Regressor\"]\n\n\nclass Regressor(nn.Module):\n \"\"\"\n This defines a network for relating large-sized input tensors to small output tensors, ie. regressing large\n values to a prediction. An output of a single dimension can be used as value regression or multi-label\n classification prediction, an output of a single value can be used as a discriminator or critic prediction.\n\n The network is constructed as a sequence of layers, either :py:class:`monai.networks.blocks.Convolution` or\n :py:class:`monai.networks.blocks.ResidualUnit`, with a final fully-connected layer resizing the output from the\n blocks to the final size. Each block is defined with a stride value typically used to downsample the input using\n strided convolutions. In this way each block progressively condenses information from the input into a deep\n representation the final fully-connected layer relates to a final result.\n\n Args:\n in_shape: tuple of integers stating the dimension of the input tensor (minus batch dimension)\n out_shape: tuple of integers stating the dimension of the final output tensor (minus batch dimension)\n channels: tuple of integers stating the output channels of each convolutional layer\n strides: tuple of integers stating the stride (downscale factor) of each convolutional layer\n kernel_size: integer or tuple of integers stating size of convolutional kernels\n num_res_units: integer stating number of convolutions in residual units, 0 means no residual units\n act: name or type defining activation layers\n norm: name or type defining normalization layers\n dropout: optional float value in range [0, 1] stating dropout probability for layers, None for no dropout\n bias: boolean stating if convolution layers should have a bias component\n\n Examples::\n\n # infers a 2-value result (eg. a 2D cartesian coordinate) from a 64x64 image\n net = Regressor((1, 64, 64), (2,), (2, 4, 8), (2, 2, 2))\n\n \"\"\"\n\n def __init__(\n self,\n in_shape: Sequence[int],\n out_shape: Sequence[int],\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Sequence[int] | int = 3,\n num_res_units: int = 2,\n act=Act.PRELU,\n norm=Norm.INSTANCE,\n dropout: float | None = None,\n bias: bool = True,\n ) -> None:\n super().__init__()\n\n self.in_channels, *self.in_shape = ensure_tuple(in_shape)\n self.dimensions = len(self.in_shape)\n self.channels = ensure_tuple(channels)\n self.strides = ensure_tuple(strides)\n self.out_shape = ensure_tuple(out_shape)\n self.kernel_size = ensure_tuple_rep(kernel_size, self.dimensions)\n self.num_res_units = num_res_units\n self.act = act\n self.norm = norm\n self.dropout = dropout\n self.bias = bias\n self.net = nn.Sequential()\n\n echannel = self.in_channels\n\n padding = same_padding(kernel_size)\n\n self.final_size = np.asarray(self.in_shape, dtype=int)\n self.reshape = Reshape(*self.out_shape)\n\n # encode stage\n for i, (c, s) in enumerate(zip(self.channels, self.strides)):\n layer = self._get_layer(echannel, c, s, i == len(channels) - 1)\n echannel = c # use the output channel number as the input for the next loop\n self.net.add_module(\"layer_%i\" % i, layer)\n self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) # type: ignore\n\n self.final = self._get_final_layer((echannel,) + self.final_size)\n\n def _get_layer(\n self, in_channels: int, out_channels: int, strides: int, is_last: bool\n ) -> ResidualUnit | Convolution:\n \"\"\"\n Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`\n number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last`\n is True this is the final layer and is not expected to include activation and normalization layers.\n \"\"\"\n\n layer: ResidualUnit | Convolution\n\n if self.num_res_units > 0:\n layer = ResidualUnit(\n subunits=self.num_res_units,\n last_conv_only=is_last,\n spatial_dims=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n else:\n layer = Convolution(\n conv_only=is_last,\n spatial_dims=self.dimensions,\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n bias=self.bias,\n )\n\n return layer\n\n def _get_final_layer(self, in_shape: Sequence[int]):\n linear = nn.Linear(int(np.prod(in_shape)), int(np.prod(self.out_shape)))\n return nn.Sequential(nn.Flatten(), linear)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.net(x)\n x = self.final(x)\n x = self.reshape(x)\n return x\n", "path": "monai/networks/nets/regressor.py"}]}
4,095
335
gh_patches_debug_6962
rasdani/github-patches
git_diff
magenta__magenta-1701
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error training latest version of MusicVAE #### Steps to reproduce the issue - Create new magenta environment as well as cloning magenta repo: `curl https://raw.githubusercontent.com/tensorflow/magenta/master/magenta/tools/magenta-install.sh > /tmp/magenta-install.sh` `bash /tmp/magenta-install.sh` `git clone https://github.com/tensorflow/magenta.git` - Training simple model: `music_vae_train --config=cat-mel_2bar_small --run_dir=/tmp/cat-mel_2bar_small/ --mode=train --examples_path=data/notesequences.tfrecord` #### Expected results: - Training loss and global_step print on console. #### Actual output: - `line 123, in _get_input_tensors iterator = dataset.make_one_shot_iterator() AttributeError: 'PrefetchDataset' object has no attribute 'make_one_shot_iterator'` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `magenta/models/music_vae/music_vae_train.py` Content: ``` 1 # Copyright 2020 The Magenta Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # Lint as: python3 16 """MusicVAE training script.""" 17 18 from __future__ import absolute_import 19 from __future__ import division 20 from __future__ import print_function 21 22 import os 23 24 from magenta.models.music_vae import configs 25 from magenta.models.music_vae import data 26 import tensorflow.compat.v1 as tf 27 from tensorflow.contrib import training as contrib_training 28 29 flags = tf.app.flags 30 FLAGS = flags.FLAGS 31 32 flags.DEFINE_string( 33 'master', '', 34 'The TensorFlow master to use.') 35 flags.DEFINE_string( 36 'examples_path', None, 37 'Path to a TFRecord file of NoteSequence examples. Overrides the config.') 38 flags.DEFINE_string( 39 'tfds_name', None, 40 'TensorFlow Datasets dataset name to use. Overrides the config.') 41 flags.DEFINE_string( 42 'run_dir', None, 43 'Path where checkpoints and summary events will be located during ' 44 'training and evaluation. Separate subdirectories `train` and `eval` ' 45 'will be created within this directory.') 46 flags.DEFINE_integer( 47 'num_steps', 200000, 48 'Number of training steps or `None` for infinite.') 49 flags.DEFINE_integer( 50 'eval_num_batches', None, 51 'Number of batches to use during evaluation or `None` for all batches ' 52 'in the data source.') 53 flags.DEFINE_integer( 54 'checkpoints_to_keep', 100, 55 'Maximum number of checkpoints to keep in `train` mode or 0 for infinite.') 56 flags.DEFINE_integer( 57 'keep_checkpoint_every_n_hours', 1, 58 'In addition to checkpoints_to_keep, keep a checkpoint every N hours.') 59 flags.DEFINE_string( 60 'mode', 'train', 61 'Which mode to use (`train` or `eval`).') 62 flags.DEFINE_string( 63 'config', '', 64 'The name of the config to use.') 65 flags.DEFINE_string( 66 'hparams', '', 67 'A comma-separated list of `name=value` hyperparameter values to merge ' 68 'with those in the config.') 69 flags.DEFINE_bool( 70 'cache_dataset', True, 71 'Whether to cache the dataset in memory for improved training speed. May ' 72 'cause memory errors for very large datasets.') 73 flags.DEFINE_integer( 74 'task', 0, 75 'The task number for this worker.') 76 flags.DEFINE_integer( 77 'num_ps_tasks', 0, 78 'The number of parameter server tasks.') 79 flags.DEFINE_integer( 80 'num_sync_workers', 0, 81 'The number of synchronized workers.') 82 flags.DEFINE_integer( 83 'num_data_threads', 4, 84 'The number of data preprocessing threads.') 85 flags.DEFINE_string( 86 'eval_dir_suffix', '', 87 'Suffix to add to eval output directory.') 88 flags.DEFINE_string( 89 'log', 'INFO', 90 'The threshold for what messages will be logged: ' 91 'DEBUG, INFO, WARN, ERROR, or FATAL.') 92 93 94 # Should not be called from within the graph to avoid redundant summaries. 95 def _trial_summary(hparams, examples_path, output_dir): 96 """Writes a tensorboard text summary of the trial.""" 97 98 examples_path_summary = tf.summary.text( 99 'examples_path', tf.constant(examples_path, name='examples_path'), 100 collections=[]) 101 102 hparams_dict = hparams.values() 103 104 # Create a markdown table from hparams. 105 header = '| Key | Value |\n| :--- | :--- |\n' 106 keys = sorted(hparams_dict.keys()) 107 lines = ['| %s | %s |' % (key, str(hparams_dict[key])) for key in keys] 108 hparams_table = header + '\n'.join(lines) + '\n' 109 110 hparam_summary = tf.summary.text( 111 'hparams', tf.constant(hparams_table, name='hparams'), collections=[]) 112 113 with tf.Session() as sess: 114 writer = tf.summary.FileWriter(output_dir, graph=sess.graph) 115 writer.add_summary(examples_path_summary.eval()) 116 writer.add_summary(hparam_summary.eval()) 117 writer.close() 118 119 120 def _get_input_tensors(dataset, config): 121 """Get input tensors from dataset.""" 122 batch_size = config.hparams.batch_size 123 iterator = dataset.make_one_shot_iterator() 124 (input_sequence, output_sequence, control_sequence, 125 sequence_length) = iterator.get_next() 126 input_sequence.set_shape( 127 [batch_size, None, config.data_converter.input_depth]) 128 output_sequence.set_shape( 129 [batch_size, None, config.data_converter.output_depth]) 130 if not config.data_converter.control_depth: 131 control_sequence = None 132 else: 133 control_sequence.set_shape( 134 [batch_size, None, config.data_converter.control_depth]) 135 sequence_length.set_shape([batch_size] + sequence_length.shape[1:].as_list()) 136 137 return { 138 'input_sequence': input_sequence, 139 'output_sequence': output_sequence, 140 'control_sequence': control_sequence, 141 'sequence_length': sequence_length 142 } 143 144 145 def train(train_dir, 146 config, 147 dataset_fn, 148 checkpoints_to_keep=5, 149 keep_checkpoint_every_n_hours=1, 150 num_steps=None, 151 master='', 152 num_sync_workers=0, 153 num_ps_tasks=0, 154 task=0): 155 """Train loop.""" 156 tf.gfile.MakeDirs(train_dir) 157 is_chief = (task == 0) 158 if is_chief: 159 _trial_summary( 160 config.hparams, config.train_examples_path or config.tfds_name, 161 train_dir) 162 with tf.Graph().as_default(): 163 with tf.device(tf.train.replica_device_setter( 164 num_ps_tasks, merge_devices=True)): 165 166 model = config.model 167 model.build(config.hparams, 168 config.data_converter.output_depth, 169 is_training=True) 170 171 optimizer = model.train(**_get_input_tensors(dataset_fn(), config)) 172 173 hooks = [] 174 if num_sync_workers: 175 optimizer = tf.train.SyncReplicasOptimizer( 176 optimizer, 177 num_sync_workers) 178 hooks.append(optimizer.make_session_run_hook(is_chief)) 179 180 grads, var_list = list(zip(*optimizer.compute_gradients(model.loss))) 181 global_norm = tf.global_norm(grads) 182 tf.summary.scalar('global_norm', global_norm) 183 184 if config.hparams.clip_mode == 'value': 185 g = config.hparams.grad_clip 186 clipped_grads = [tf.clip_by_value(grad, -g, g) for grad in grads] 187 elif config.hparams.clip_mode == 'global_norm': 188 clipped_grads = tf.cond( 189 global_norm < config.hparams.grad_norm_clip_to_zero, 190 lambda: tf.clip_by_global_norm( # pylint:disable=g-long-lambda 191 grads, config.hparams.grad_clip, use_norm=global_norm)[0], 192 lambda: [tf.zeros(tf.shape(g)) for g in grads]) 193 else: 194 raise ValueError( 195 'Unknown clip_mode: {}'.format(config.hparams.clip_mode)) 196 train_op = optimizer.apply_gradients( 197 list(zip(clipped_grads, var_list)), 198 global_step=model.global_step, 199 name='train_step') 200 201 logging_dict = {'global_step': model.global_step, 202 'loss': model.loss} 203 204 hooks.append(tf.train.LoggingTensorHook(logging_dict, every_n_iter=100)) 205 if num_steps: 206 hooks.append(tf.train.StopAtStepHook(last_step=num_steps)) 207 208 scaffold = tf.train.Scaffold( 209 saver=tf.train.Saver( 210 max_to_keep=checkpoints_to_keep, 211 keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)) 212 contrib_training.train( 213 train_op=train_op, 214 logdir=train_dir, 215 scaffold=scaffold, 216 hooks=hooks, 217 save_checkpoint_secs=60, 218 master=master, 219 is_chief=is_chief) 220 221 222 def evaluate(train_dir, 223 eval_dir, 224 config, 225 dataset_fn, 226 num_batches, 227 master=''): 228 """Evaluate the model repeatedly.""" 229 tf.gfile.MakeDirs(eval_dir) 230 231 _trial_summary( 232 config.hparams, config.eval_examples_path or config.tfds_name, eval_dir) 233 with tf.Graph().as_default(): 234 model = config.model 235 model.build(config.hparams, 236 config.data_converter.output_depth, 237 is_training=False) 238 239 eval_op = model.eval( 240 **_get_input_tensors(dataset_fn().take(num_batches), config)) 241 242 hooks = [ 243 contrib_training.StopAfterNEvalsHook(num_batches), 244 contrib_training.SummaryAtEndHook(eval_dir) 245 ] 246 contrib_training.evaluate_repeatedly( 247 train_dir, 248 eval_ops=eval_op, 249 hooks=hooks, 250 eval_interval_secs=60, 251 master=master) 252 253 254 def run(config_map, 255 tf_file_reader=tf.data.TFRecordDataset, 256 file_reader=tf.python_io.tf_record_iterator): 257 """Load model params, save config file and start trainer. 258 259 Args: 260 config_map: Dictionary mapping configuration name to Config object. 261 tf_file_reader: The tf.data.Dataset class to use for reading files. 262 file_reader: The Python reader to use for reading files. 263 264 Raises: 265 ValueError: if required flags are missing or invalid. 266 """ 267 if not FLAGS.run_dir: 268 raise ValueError('Invalid run directory: %s' % FLAGS.run_dir) 269 run_dir = os.path.expanduser(FLAGS.run_dir) 270 train_dir = os.path.join(run_dir, 'train') 271 272 if FLAGS.mode not in ['train', 'eval']: 273 raise ValueError('Invalid mode: %s' % FLAGS.mode) 274 275 if FLAGS.config not in config_map: 276 raise ValueError('Invalid config: %s' % FLAGS.config) 277 config = config_map[FLAGS.config] 278 if FLAGS.hparams: 279 config.hparams.parse(FLAGS.hparams) 280 config_update_map = {} 281 if FLAGS.examples_path: 282 config_update_map['%s_examples_path' % FLAGS.mode] = os.path.expanduser( 283 FLAGS.examples_path) 284 if FLAGS.tfds_name: 285 if FLAGS.examples_path: 286 raise ValueError( 287 'At most one of --examples_path and --tfds_name can be set.') 288 config_update_map['tfds_name'] = FLAGS.tfds_name 289 config_update_map['eval_examples_path'] = None 290 config_update_map['train_examples_path'] = None 291 config = configs.update_config(config, config_update_map) 292 if FLAGS.num_sync_workers: 293 config.hparams.batch_size //= FLAGS.num_sync_workers 294 295 if FLAGS.mode == 'train': 296 is_training = True 297 elif FLAGS.mode == 'eval': 298 is_training = False 299 else: 300 raise ValueError('Invalid mode: {}'.format(FLAGS.mode)) 301 302 def dataset_fn(): 303 return data.get_dataset( 304 config, 305 tf_file_reader=tf_file_reader, 306 num_threads=FLAGS.num_data_threads, 307 is_training=is_training, 308 cache_dataset=FLAGS.cache_dataset) 309 310 if is_training: 311 train( 312 train_dir, 313 config=config, 314 dataset_fn=dataset_fn, 315 checkpoints_to_keep=FLAGS.checkpoints_to_keep, 316 keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, 317 num_steps=FLAGS.num_steps, 318 master=FLAGS.master, 319 num_sync_workers=FLAGS.num_sync_workers, 320 num_ps_tasks=FLAGS.num_ps_tasks, 321 task=FLAGS.task) 322 else: 323 num_batches = FLAGS.eval_num_batches or data.count_examples( 324 config.eval_examples_path, 325 config.tfds_name, 326 config.data_converter, 327 file_reader) // config.hparams.batch_size 328 eval_dir = os.path.join(run_dir, 'eval' + FLAGS.eval_dir_suffix) 329 evaluate( 330 train_dir, 331 eval_dir, 332 config=config, 333 dataset_fn=dataset_fn, 334 num_batches=num_batches, 335 master=FLAGS.master) 336 337 338 def main(unused_argv): 339 tf.logging.set_verbosity(FLAGS.log) 340 run(configs.CONFIG_MAP) 341 342 343 def console_entry_point(): 344 tf.app.run(main) 345 346 347 if __name__ == '__main__': 348 console_entry_point() 349 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/magenta/models/music_vae/music_vae_train.py b/magenta/models/music_vae/music_vae_train.py --- a/magenta/models/music_vae/music_vae_train.py +++ b/magenta/models/music_vae/music_vae_train.py @@ -120,7 +120,7 @@ def _get_input_tensors(dataset, config): """Get input tensors from dataset.""" batch_size = config.hparams.batch_size - iterator = dataset.make_one_shot_iterator() + iterator = tf.data.make_one_shot_iterator(dataset) (input_sequence, output_sequence, control_sequence, sequence_length) = iterator.get_next() input_sequence.set_shape(
{"golden_diff": "diff --git a/magenta/models/music_vae/music_vae_train.py b/magenta/models/music_vae/music_vae_train.py\n--- a/magenta/models/music_vae/music_vae_train.py\n+++ b/magenta/models/music_vae/music_vae_train.py\n@@ -120,7 +120,7 @@\n def _get_input_tensors(dataset, config):\n \"\"\"Get input tensors from dataset.\"\"\"\n batch_size = config.hparams.batch_size\n- iterator = dataset.make_one_shot_iterator()\n+ iterator = tf.data.make_one_shot_iterator(dataset)\n (input_sequence, output_sequence, control_sequence,\n sequence_length) = iterator.get_next()\n input_sequence.set_shape(\n", "issue": "Error training latest version of MusicVAE\n#### Steps to reproduce the issue\r\n\r\n- Create new magenta environment as well as cloning magenta repo:\r\n`curl https://raw.githubusercontent.com/tensorflow/magenta/master/magenta/tools/magenta-install.sh > /tmp/magenta-install.sh`\r\n`bash /tmp/magenta-install.sh`\r\n`git clone https://github.com/tensorflow/magenta.git`\r\n\r\n- Training simple model:\r\n`music_vae_train --config=cat-mel_2bar_small --run_dir=/tmp/cat-mel_2bar_small/ --mode=train --examples_path=data/notesequences.tfrecord`\r\n\r\n#### Expected results:\r\n\r\n- Training loss and global_step print on console.\r\n\r\n#### Actual output:\r\n\r\n- `line 123, in _get_input_tensors\r\n iterator = dataset.make_one_shot_iterator()\r\nAttributeError: 'PrefetchDataset' object has no attribute 'make_one_shot_iterator'`\n", "before_files": [{"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"MusicVAE training script.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae import data\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import training as contrib_training\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'master', '',\n 'The TensorFlow master to use.')\nflags.DEFINE_string(\n 'examples_path', None,\n 'Path to a TFRecord file of NoteSequence examples. Overrides the config.')\nflags.DEFINE_string(\n 'tfds_name', None,\n 'TensorFlow Datasets dataset name to use. Overrides the config.')\nflags.DEFINE_string(\n 'run_dir', None,\n 'Path where checkpoints and summary events will be located during '\n 'training and evaluation. Separate subdirectories `train` and `eval` '\n 'will be created within this directory.')\nflags.DEFINE_integer(\n 'num_steps', 200000,\n 'Number of training steps or `None` for infinite.')\nflags.DEFINE_integer(\n 'eval_num_batches', None,\n 'Number of batches to use during evaluation or `None` for all batches '\n 'in the data source.')\nflags.DEFINE_integer(\n 'checkpoints_to_keep', 100,\n 'Maximum number of checkpoints to keep in `train` mode or 0 for infinite.')\nflags.DEFINE_integer(\n 'keep_checkpoint_every_n_hours', 1,\n 'In addition to checkpoints_to_keep, keep a checkpoint every N hours.')\nflags.DEFINE_string(\n 'mode', 'train',\n 'Which mode to use (`train` or `eval`).')\nflags.DEFINE_string(\n 'config', '',\n 'The name of the config to use.')\nflags.DEFINE_string(\n 'hparams', '',\n 'A comma-separated list of `name=value` hyperparameter values to merge '\n 'with those in the config.')\nflags.DEFINE_bool(\n 'cache_dataset', True,\n 'Whether to cache the dataset in memory for improved training speed. May '\n 'cause memory errors for very large datasets.')\nflags.DEFINE_integer(\n 'task', 0,\n 'The task number for this worker.')\nflags.DEFINE_integer(\n 'num_ps_tasks', 0,\n 'The number of parameter server tasks.')\nflags.DEFINE_integer(\n 'num_sync_workers', 0,\n 'The number of synchronized workers.')\nflags.DEFINE_integer(\n 'num_data_threads', 4,\n 'The number of data preprocessing threads.')\nflags.DEFINE_string(\n 'eval_dir_suffix', '',\n 'Suffix to add to eval output directory.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged: '\n 'DEBUG, INFO, WARN, ERROR, or FATAL.')\n\n\n# Should not be called from within the graph to avoid redundant summaries.\ndef _trial_summary(hparams, examples_path, output_dir):\n \"\"\"Writes a tensorboard text summary of the trial.\"\"\"\n\n examples_path_summary = tf.summary.text(\n 'examples_path', tf.constant(examples_path, name='examples_path'),\n collections=[])\n\n hparams_dict = hparams.values()\n\n # Create a markdown table from hparams.\n header = '| Key | Value |\\n| :--- | :--- |\\n'\n keys = sorted(hparams_dict.keys())\n lines = ['| %s | %s |' % (key, str(hparams_dict[key])) for key in keys]\n hparams_table = header + '\\n'.join(lines) + '\\n'\n\n hparam_summary = tf.summary.text(\n 'hparams', tf.constant(hparams_table, name='hparams'), collections=[])\n\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(output_dir, graph=sess.graph)\n writer.add_summary(examples_path_summary.eval())\n writer.add_summary(hparam_summary.eval())\n writer.close()\n\n\ndef _get_input_tensors(dataset, config):\n \"\"\"Get input tensors from dataset.\"\"\"\n batch_size = config.hparams.batch_size\n iterator = dataset.make_one_shot_iterator()\n (input_sequence, output_sequence, control_sequence,\n sequence_length) = iterator.get_next()\n input_sequence.set_shape(\n [batch_size, None, config.data_converter.input_depth])\n output_sequence.set_shape(\n [batch_size, None, config.data_converter.output_depth])\n if not config.data_converter.control_depth:\n control_sequence = None\n else:\n control_sequence.set_shape(\n [batch_size, None, config.data_converter.control_depth])\n sequence_length.set_shape([batch_size] + sequence_length.shape[1:].as_list())\n\n return {\n 'input_sequence': input_sequence,\n 'output_sequence': output_sequence,\n 'control_sequence': control_sequence,\n 'sequence_length': sequence_length\n }\n\n\ndef train(train_dir,\n config,\n dataset_fn,\n checkpoints_to_keep=5,\n keep_checkpoint_every_n_hours=1,\n num_steps=None,\n master='',\n num_sync_workers=0,\n num_ps_tasks=0,\n task=0):\n \"\"\"Train loop.\"\"\"\n tf.gfile.MakeDirs(train_dir)\n is_chief = (task == 0)\n if is_chief:\n _trial_summary(\n config.hparams, config.train_examples_path or config.tfds_name,\n train_dir)\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(\n num_ps_tasks, merge_devices=True)):\n\n model = config.model\n model.build(config.hparams,\n config.data_converter.output_depth,\n is_training=True)\n\n optimizer = model.train(**_get_input_tensors(dataset_fn(), config))\n\n hooks = []\n if num_sync_workers:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer,\n num_sync_workers)\n hooks.append(optimizer.make_session_run_hook(is_chief))\n\n grads, var_list = list(zip(*optimizer.compute_gradients(model.loss)))\n global_norm = tf.global_norm(grads)\n tf.summary.scalar('global_norm', global_norm)\n\n if config.hparams.clip_mode == 'value':\n g = config.hparams.grad_clip\n clipped_grads = [tf.clip_by_value(grad, -g, g) for grad in grads]\n elif config.hparams.clip_mode == 'global_norm':\n clipped_grads = tf.cond(\n global_norm < config.hparams.grad_norm_clip_to_zero,\n lambda: tf.clip_by_global_norm( # pylint:disable=g-long-lambda\n grads, config.hparams.grad_clip, use_norm=global_norm)[0],\n lambda: [tf.zeros(tf.shape(g)) for g in grads])\n else:\n raise ValueError(\n 'Unknown clip_mode: {}'.format(config.hparams.clip_mode))\n train_op = optimizer.apply_gradients(\n list(zip(clipped_grads, var_list)),\n global_step=model.global_step,\n name='train_step')\n\n logging_dict = {'global_step': model.global_step,\n 'loss': model.loss}\n\n hooks.append(tf.train.LoggingTensorHook(logging_dict, every_n_iter=100))\n if num_steps:\n hooks.append(tf.train.StopAtStepHook(last_step=num_steps))\n\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(\n max_to_keep=checkpoints_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))\n contrib_training.train(\n train_op=train_op,\n logdir=train_dir,\n scaffold=scaffold,\n hooks=hooks,\n save_checkpoint_secs=60,\n master=master,\n is_chief=is_chief)\n\n\ndef evaluate(train_dir,\n eval_dir,\n config,\n dataset_fn,\n num_batches,\n master=''):\n \"\"\"Evaluate the model repeatedly.\"\"\"\n tf.gfile.MakeDirs(eval_dir)\n\n _trial_summary(\n config.hparams, config.eval_examples_path or config.tfds_name, eval_dir)\n with tf.Graph().as_default():\n model = config.model\n model.build(config.hparams,\n config.data_converter.output_depth,\n is_training=False)\n\n eval_op = model.eval(\n **_get_input_tensors(dataset_fn().take(num_batches), config))\n\n hooks = [\n contrib_training.StopAfterNEvalsHook(num_batches),\n contrib_training.SummaryAtEndHook(eval_dir)\n ]\n contrib_training.evaluate_repeatedly(\n train_dir,\n eval_ops=eval_op,\n hooks=hooks,\n eval_interval_secs=60,\n master=master)\n\n\ndef run(config_map,\n tf_file_reader=tf.data.TFRecordDataset,\n file_reader=tf.python_io.tf_record_iterator):\n \"\"\"Load model params, save config file and start trainer.\n\n Args:\n config_map: Dictionary mapping configuration name to Config object.\n tf_file_reader: The tf.data.Dataset class to use for reading files.\n file_reader: The Python reader to use for reading files.\n\n Raises:\n ValueError: if required flags are missing or invalid.\n \"\"\"\n if not FLAGS.run_dir:\n raise ValueError('Invalid run directory: %s' % FLAGS.run_dir)\n run_dir = os.path.expanduser(FLAGS.run_dir)\n train_dir = os.path.join(run_dir, 'train')\n\n if FLAGS.mode not in ['train', 'eval']:\n raise ValueError('Invalid mode: %s' % FLAGS.mode)\n\n if FLAGS.config not in config_map:\n raise ValueError('Invalid config: %s' % FLAGS.config)\n config = config_map[FLAGS.config]\n if FLAGS.hparams:\n config.hparams.parse(FLAGS.hparams)\n config_update_map = {}\n if FLAGS.examples_path:\n config_update_map['%s_examples_path' % FLAGS.mode] = os.path.expanduser(\n FLAGS.examples_path)\n if FLAGS.tfds_name:\n if FLAGS.examples_path:\n raise ValueError(\n 'At most one of --examples_path and --tfds_name can be set.')\n config_update_map['tfds_name'] = FLAGS.tfds_name\n config_update_map['eval_examples_path'] = None\n config_update_map['train_examples_path'] = None\n config = configs.update_config(config, config_update_map)\n if FLAGS.num_sync_workers:\n config.hparams.batch_size //= FLAGS.num_sync_workers\n\n if FLAGS.mode == 'train':\n is_training = True\n elif FLAGS.mode == 'eval':\n is_training = False\n else:\n raise ValueError('Invalid mode: {}'.format(FLAGS.mode))\n\n def dataset_fn():\n return data.get_dataset(\n config,\n tf_file_reader=tf_file_reader,\n num_threads=FLAGS.num_data_threads,\n is_training=is_training,\n cache_dataset=FLAGS.cache_dataset)\n\n if is_training:\n train(\n train_dir,\n config=config,\n dataset_fn=dataset_fn,\n checkpoints_to_keep=FLAGS.checkpoints_to_keep,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_steps=FLAGS.num_steps,\n master=FLAGS.master,\n num_sync_workers=FLAGS.num_sync_workers,\n num_ps_tasks=FLAGS.num_ps_tasks,\n task=FLAGS.task)\n else:\n num_batches = FLAGS.eval_num_batches or data.count_examples(\n config.eval_examples_path,\n config.tfds_name,\n config.data_converter,\n file_reader) // config.hparams.batch_size\n eval_dir = os.path.join(run_dir, 'eval' + FLAGS.eval_dir_suffix)\n evaluate(\n train_dir,\n eval_dir,\n config=config,\n dataset_fn=dataset_fn,\n num_batches=num_batches,\n master=FLAGS.master)\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n run(configs.CONFIG_MAP)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/music_vae/music_vae_train.py"}], "after_files": [{"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"MusicVAE training script.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae import data\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import training as contrib_training\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'master', '',\n 'The TensorFlow master to use.')\nflags.DEFINE_string(\n 'examples_path', None,\n 'Path to a TFRecord file of NoteSequence examples. Overrides the config.')\nflags.DEFINE_string(\n 'tfds_name', None,\n 'TensorFlow Datasets dataset name to use. Overrides the config.')\nflags.DEFINE_string(\n 'run_dir', None,\n 'Path where checkpoints and summary events will be located during '\n 'training and evaluation. Separate subdirectories `train` and `eval` '\n 'will be created within this directory.')\nflags.DEFINE_integer(\n 'num_steps', 200000,\n 'Number of training steps or `None` for infinite.')\nflags.DEFINE_integer(\n 'eval_num_batches', None,\n 'Number of batches to use during evaluation or `None` for all batches '\n 'in the data source.')\nflags.DEFINE_integer(\n 'checkpoints_to_keep', 100,\n 'Maximum number of checkpoints to keep in `train` mode or 0 for infinite.')\nflags.DEFINE_integer(\n 'keep_checkpoint_every_n_hours', 1,\n 'In addition to checkpoints_to_keep, keep a checkpoint every N hours.')\nflags.DEFINE_string(\n 'mode', 'train',\n 'Which mode to use (`train` or `eval`).')\nflags.DEFINE_string(\n 'config', '',\n 'The name of the config to use.')\nflags.DEFINE_string(\n 'hparams', '',\n 'A comma-separated list of `name=value` hyperparameter values to merge '\n 'with those in the config.')\nflags.DEFINE_bool(\n 'cache_dataset', True,\n 'Whether to cache the dataset in memory for improved training speed. May '\n 'cause memory errors for very large datasets.')\nflags.DEFINE_integer(\n 'task', 0,\n 'The task number for this worker.')\nflags.DEFINE_integer(\n 'num_ps_tasks', 0,\n 'The number of parameter server tasks.')\nflags.DEFINE_integer(\n 'num_sync_workers', 0,\n 'The number of synchronized workers.')\nflags.DEFINE_integer(\n 'num_data_threads', 4,\n 'The number of data preprocessing threads.')\nflags.DEFINE_string(\n 'eval_dir_suffix', '',\n 'Suffix to add to eval output directory.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged: '\n 'DEBUG, INFO, WARN, ERROR, or FATAL.')\n\n\n# Should not be called from within the graph to avoid redundant summaries.\ndef _trial_summary(hparams, examples_path, output_dir):\n \"\"\"Writes a tensorboard text summary of the trial.\"\"\"\n\n examples_path_summary = tf.summary.text(\n 'examples_path', tf.constant(examples_path, name='examples_path'),\n collections=[])\n\n hparams_dict = hparams.values()\n\n # Create a markdown table from hparams.\n header = '| Key | Value |\\n| :--- | :--- |\\n'\n keys = sorted(hparams_dict.keys())\n lines = ['| %s | %s |' % (key, str(hparams_dict[key])) for key in keys]\n hparams_table = header + '\\n'.join(lines) + '\\n'\n\n hparam_summary = tf.summary.text(\n 'hparams', tf.constant(hparams_table, name='hparams'), collections=[])\n\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(output_dir, graph=sess.graph)\n writer.add_summary(examples_path_summary.eval())\n writer.add_summary(hparam_summary.eval())\n writer.close()\n\n\ndef _get_input_tensors(dataset, config):\n \"\"\"Get input tensors from dataset.\"\"\"\n batch_size = config.hparams.batch_size\n iterator = tf.data.make_one_shot_iterator(dataset)\n (input_sequence, output_sequence, control_sequence,\n sequence_length) = iterator.get_next()\n input_sequence.set_shape(\n [batch_size, None, config.data_converter.input_depth])\n output_sequence.set_shape(\n [batch_size, None, config.data_converter.output_depth])\n if not config.data_converter.control_depth:\n control_sequence = None\n else:\n control_sequence.set_shape(\n [batch_size, None, config.data_converter.control_depth])\n sequence_length.set_shape([batch_size] + sequence_length.shape[1:].as_list())\n\n return {\n 'input_sequence': input_sequence,\n 'output_sequence': output_sequence,\n 'control_sequence': control_sequence,\n 'sequence_length': sequence_length\n }\n\n\ndef train(train_dir,\n config,\n dataset_fn,\n checkpoints_to_keep=5,\n keep_checkpoint_every_n_hours=1,\n num_steps=None,\n master='',\n num_sync_workers=0,\n num_ps_tasks=0,\n task=0):\n \"\"\"Train loop.\"\"\"\n tf.gfile.MakeDirs(train_dir)\n is_chief = (task == 0)\n if is_chief:\n _trial_summary(\n config.hparams, config.train_examples_path or config.tfds_name,\n train_dir)\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(\n num_ps_tasks, merge_devices=True)):\n\n model = config.model\n model.build(config.hparams,\n config.data_converter.output_depth,\n is_training=True)\n\n optimizer = model.train(**_get_input_tensors(dataset_fn(), config))\n\n hooks = []\n if num_sync_workers:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer,\n num_sync_workers)\n hooks.append(optimizer.make_session_run_hook(is_chief))\n\n grads, var_list = list(zip(*optimizer.compute_gradients(model.loss)))\n global_norm = tf.global_norm(grads)\n tf.summary.scalar('global_norm', global_norm)\n\n if config.hparams.clip_mode == 'value':\n g = config.hparams.grad_clip\n clipped_grads = [tf.clip_by_value(grad, -g, g) for grad in grads]\n elif config.hparams.clip_mode == 'global_norm':\n clipped_grads = tf.cond(\n global_norm < config.hparams.grad_norm_clip_to_zero,\n lambda: tf.clip_by_global_norm( # pylint:disable=g-long-lambda\n grads, config.hparams.grad_clip, use_norm=global_norm)[0],\n lambda: [tf.zeros(tf.shape(g)) for g in grads])\n else:\n raise ValueError(\n 'Unknown clip_mode: {}'.format(config.hparams.clip_mode))\n train_op = optimizer.apply_gradients(\n list(zip(clipped_grads, var_list)),\n global_step=model.global_step,\n name='train_step')\n\n logging_dict = {'global_step': model.global_step,\n 'loss': model.loss}\n\n hooks.append(tf.train.LoggingTensorHook(logging_dict, every_n_iter=100))\n if num_steps:\n hooks.append(tf.train.StopAtStepHook(last_step=num_steps))\n\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(\n max_to_keep=checkpoints_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))\n contrib_training.train(\n train_op=train_op,\n logdir=train_dir,\n scaffold=scaffold,\n hooks=hooks,\n save_checkpoint_secs=60,\n master=master,\n is_chief=is_chief)\n\n\ndef evaluate(train_dir,\n eval_dir,\n config,\n dataset_fn,\n num_batches,\n master=''):\n \"\"\"Evaluate the model repeatedly.\"\"\"\n tf.gfile.MakeDirs(eval_dir)\n\n _trial_summary(\n config.hparams, config.eval_examples_path or config.tfds_name, eval_dir)\n with tf.Graph().as_default():\n model = config.model\n model.build(config.hparams,\n config.data_converter.output_depth,\n is_training=False)\n\n eval_op = model.eval(\n **_get_input_tensors(dataset_fn().take(num_batches), config))\n\n hooks = [\n contrib_training.StopAfterNEvalsHook(num_batches),\n contrib_training.SummaryAtEndHook(eval_dir)\n ]\n contrib_training.evaluate_repeatedly(\n train_dir,\n eval_ops=eval_op,\n hooks=hooks,\n eval_interval_secs=60,\n master=master)\n\n\ndef run(config_map,\n tf_file_reader=tf.data.TFRecordDataset,\n file_reader=tf.python_io.tf_record_iterator):\n \"\"\"Load model params, save config file and start trainer.\n\n Args:\n config_map: Dictionary mapping configuration name to Config object.\n tf_file_reader: The tf.data.Dataset class to use for reading files.\n file_reader: The Python reader to use for reading files.\n\n Raises:\n ValueError: if required flags are missing or invalid.\n \"\"\"\n if not FLAGS.run_dir:\n raise ValueError('Invalid run directory: %s' % FLAGS.run_dir)\n run_dir = os.path.expanduser(FLAGS.run_dir)\n train_dir = os.path.join(run_dir, 'train')\n\n if FLAGS.mode not in ['train', 'eval']:\n raise ValueError('Invalid mode: %s' % FLAGS.mode)\n\n if FLAGS.config not in config_map:\n raise ValueError('Invalid config: %s' % FLAGS.config)\n config = config_map[FLAGS.config]\n if FLAGS.hparams:\n config.hparams.parse(FLAGS.hparams)\n config_update_map = {}\n if FLAGS.examples_path:\n config_update_map['%s_examples_path' % FLAGS.mode] = os.path.expanduser(\n FLAGS.examples_path)\n if FLAGS.tfds_name:\n if FLAGS.examples_path:\n raise ValueError(\n 'At most one of --examples_path and --tfds_name can be set.')\n config_update_map['tfds_name'] = FLAGS.tfds_name\n config_update_map['eval_examples_path'] = None\n config_update_map['train_examples_path'] = None\n config = configs.update_config(config, config_update_map)\n if FLAGS.num_sync_workers:\n config.hparams.batch_size //= FLAGS.num_sync_workers\n\n if FLAGS.mode == 'train':\n is_training = True\n elif FLAGS.mode == 'eval':\n is_training = False\n else:\n raise ValueError('Invalid mode: {}'.format(FLAGS.mode))\n\n def dataset_fn():\n return data.get_dataset(\n config,\n tf_file_reader=tf_file_reader,\n num_threads=FLAGS.num_data_threads,\n is_training=is_training,\n cache_dataset=FLAGS.cache_dataset)\n\n if is_training:\n train(\n train_dir,\n config=config,\n dataset_fn=dataset_fn,\n checkpoints_to_keep=FLAGS.checkpoints_to_keep,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_steps=FLAGS.num_steps,\n master=FLAGS.master,\n num_sync_workers=FLAGS.num_sync_workers,\n num_ps_tasks=FLAGS.num_ps_tasks,\n task=FLAGS.task)\n else:\n num_batches = FLAGS.eval_num_batches or data.count_examples(\n config.eval_examples_path,\n config.tfds_name,\n config.data_converter,\n file_reader) // config.hparams.batch_size\n eval_dir = os.path.join(run_dir, 'eval' + FLAGS.eval_dir_suffix)\n evaluate(\n train_dir,\n eval_dir,\n config=config,\n dataset_fn=dataset_fn,\n num_batches=num_batches,\n master=FLAGS.master)\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n run(configs.CONFIG_MAP)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/music_vae/music_vae_train.py"}]}
4,030
145
gh_patches_debug_19706
rasdani/github-patches
git_diff
buildbot__buildbot-2332
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Scheduled weekly dependency update for week 28 ## Updates Here's a list of all the updates bundled in this pull request. I've added some links to make it easier for you to find all the information you need. <table align="center"> <tr> <td><b>boto</b></td> <td align="center">2.47.0</td> <td align="center">&raquo;</td> <td align="center">2.48.0</td> <td> <a href="https://pypi.python.org/pypi/boto">PyPI</a> | <a href="https://pyup.io/changelogs/boto/">Changelog</a> | <a href="https://github.com/boto/boto/">Repo</a> </td> <tr> <td><b>botocore</b></td> <td align="center">1.5.78</td> <td align="center">&raquo;</td> <td align="center">1.5.80</td> <td> <a href="https://pypi.python.org/pypi/botocore">PyPI</a> | <a href="https://pyup.io/changelogs/botocore/">Changelog</a> | <a href="https://github.com/boto/botocore">Repo</a> </td> <tr> <td><b>Pillow</b></td> <td align="center">4.2.0</td> <td align="center">&raquo;</td> <td align="center">4.2.1</td> <td> <a href="https://pypi.python.org/pypi/pillow">PyPI</a> | <a href="https://pyup.io/changelogs/pillow/">Changelog</a> | <a href="https://python-pillow.org">Homepage</a> </td> <tr> <td><b>pycparser</b></td> <td align="center">2.17</td> <td align="center">&raquo;</td> <td align="center">2.18</td> <td> <a href="https://pypi.python.org/pypi/pycparser">PyPI</a> | <a href="https://pyup.io/changelogs/pycparser/">Changelog</a> | <a href="https://github.com/eliben/pycparser">Repo</a> </td> <tr> <td><b>buildbot-www</b></td> <td align="center">0.9.9.post1</td> <td align="center">&raquo;</td> <td align="center">0.9.9.post2</td> <td> <a href="https://pypi.python.org/pypi/buildbot-www">PyPI</a> | <a href="http://buildbot.net/">Homepage</a> </td> </tr> </table> ## Changelogs ### boto 2.47.0 -> 2.48.0 >### 2.48.0 >============ >:date: 2017/06/29 >This release addresses a few S3 related bugs as well as a bug with the recent >endpoint heuristics feature. >Changes >------- >* Fix generate_url() AttributeError when using anonymous connections (:issue:`3734`, :sha:`83481807`) >* Use RegionInfo by default with heuristics (:issue:`3737`, :sha:`0a9b1140`) >* Allow specifying s3 host from boto config file. (:issue:`3738`, :sha:`dcfc7512`) ### botocore 1.5.78 -> 1.5.80 >### 1.5.80 >====== >* api-change:``kinesis``: Update kinesis client to latest version >* api-change:``kms``: Update kms client to latest version >* api-change:``ds``: Update ds client to latest version >* api-change:``ssm``: Update ssm client to latest version >### 1.5.79 >====== >* api-change:``cloudwatch``: Update cloudwatch client to latest version >* api-change:``route53``: Update route53 client to latest version ### Pillow 4.2.0 -> 4.2.1 >### 4.2.1 >------------------ >- CI: Fix version specification and test on CI for PyPy/Windows 2608 > [wiredfool] ### pycparser 2.17 -> 2.18 >### 2.18 > - PR 161 &amp; 184: Update bundled PLY version to 3.10 > - PR 158: Add support for the __int128 type. > - PR 169: Handle more tricky TYPEID in declarators. > - PR 178: Add columns to the coord of each node That's it for now! Happy merging! 🤖 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/data/forceschedulers.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 from twisted.internet import defer 17 18 from buildbot.data import base 19 from buildbot.data import types 20 from buildbot.schedulers import forcesched 21 from buildbot.www.rest import JSONRPC_CODES 22 from buildbot.www.rest import BadJsonRpc2 23 24 25 def forceScheduler2Data(sched): 26 ret = dict(all_fields=[], 27 name=unicode(sched.name), 28 button_name=unicode(sched.buttonName), 29 label=unicode(sched.label), 30 builder_names=map(unicode, sched.builderNames)) 31 ret["all_fields"] = [field.getSpec() for field in sched.all_fields] 32 return ret 33 34 35 class ForceSchedulerEndpoint(base.Endpoint): 36 37 isCollection = False 38 pathPatterns = """ 39 /forceschedulers/i:schedulername 40 """ 41 42 def findForceScheduler(self, schedulername): 43 # eventually this may be db backed. This is why the API is async 44 for sched in self.master.allSchedulers(): 45 if sched.name == schedulername and isinstance(sched, forcesched.ForceScheduler): 46 return defer.succeed(sched) 47 48 @defer.inlineCallbacks 49 def get(self, resultSpec, kwargs): 50 sched = yield self.findForceScheduler(kwargs['schedulername']) 51 if sched is not None: 52 defer.returnValue(forceScheduler2Data(sched)) 53 54 @defer.inlineCallbacks 55 def control(self, action, args, kwargs): 56 if action == "force": 57 sched = yield self.findForceScheduler(kwargs['schedulername']) 58 if "owner" not in args: 59 args['owner'] = "user" 60 try: 61 res = yield sched.force(**args) 62 defer.returnValue(res) 63 except forcesched.CollectedValidationError as e: 64 raise BadJsonRpc2(e.errors, JSONRPC_CODES["invalid_params"]) 65 defer.returnValue(None) 66 67 68 class ForceSchedulersEndpoint(base.Endpoint): 69 70 isCollection = True 71 pathPatterns = """ 72 /forceschedulers 73 /builders/:builderid/forceschedulers 74 """ 75 rootLinkName = 'forceschedulers' 76 77 @defer.inlineCallbacks 78 def get(self, resultSpec, kwargs): 79 l = [] 80 builderid = kwargs.get('builderid', None) 81 if builderid is not None: 82 bdict = yield self.master.db.builders.getBuilder(builderid) 83 for sched in self.master.allSchedulers(): 84 if isinstance(sched, forcesched.ForceScheduler): 85 if builderid is not None and bdict['name'] not in sched.builderNames: 86 continue 87 l.append(forceScheduler2Data(sched)) 88 defer.returnValue(l) 89 90 91 class ForceScheduler(base.ResourceType): 92 93 name = "forcescheduler" 94 plural = "forceschedulers" 95 endpoints = [ForceSchedulerEndpoint, ForceSchedulersEndpoint] 96 keyFields = [] 97 98 class EntityType(types.Entity): 99 name = types.Identifier(20) 100 button_name = types.String() 101 label = types.String() 102 builder_names = types.List(of=types.Identifier(20)) 103 all_fields = types.List(of=types.JsonObject()) 104 entityType = EntityType(name) 105 ``` Path: `master/buildbot/data/builders.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 from twisted.internet import defer 17 18 from buildbot.data import base 19 from buildbot.data import types 20 21 22 class BuilderEndpoint(base.Endpoint): 23 24 isCollection = False 25 pathPatterns = """ 26 /builders/n:builderid 27 /masters/n:masterid/builders/n:builderid 28 """ 29 30 @defer.inlineCallbacks 31 def get(self, resultSpec, kwargs): 32 builderid = kwargs['builderid'] 33 bdict = yield self.master.db.builders.getBuilder(builderid) 34 if not bdict: 35 defer.returnValue(None) 36 return 37 if 'masterid' in kwargs: 38 if kwargs['masterid'] not in bdict['masterids']: 39 defer.returnValue(None) 40 return 41 defer.returnValue( 42 dict(builderid=builderid, 43 name=bdict['name'], 44 masterids=bdict['masterids'], 45 description=bdict['description'], 46 tags=bdict['tags'])) 47 48 49 class BuildersEndpoint(base.Endpoint): 50 51 isCollection = True 52 rootLinkName = 'builders' 53 pathPatterns = """ 54 /builders 55 /masters/n:masterid/builders 56 """ 57 58 @defer.inlineCallbacks 59 def get(self, resultSpec, kwargs): 60 bdicts = yield self.master.db.builders.getBuilders( 61 masterid=kwargs.get('masterid', None)) 62 defer.returnValue([ 63 dict(builderid=bd['id'], 64 name=bd['name'], 65 masterids=bd['masterids'], 66 description=bd['description'], 67 tags=bd['tags']) 68 for bd in bdicts]) 69 70 71 class Builder(base.ResourceType): 72 73 name = "builder" 74 plural = "builders" 75 endpoints = [BuilderEndpoint, BuildersEndpoint] 76 keyFields = ['builderid'] 77 78 class EntityType(types.Entity): 79 builderid = types.Integer() 80 name = types.Identifier(20) 81 masterids = types.List(of=types.Integer()) 82 description = types.NoneOk(types.String()) 83 tags = types.List(of=types.String()) 84 entityType = EntityType(name) 85 86 def __init__(self, master): 87 base.ResourceType.__init__(self, master) 88 89 @base.updateMethod 90 def findBuilderId(self, name): 91 return self.master.db.builders.findBuilderId(name) 92 93 @base.updateMethod 94 def updateBuilderInfo(self, builderid, description, tags): 95 return self.master.db.builders.updateBuilderInfo(builderid, description, tags) 96 97 @base.updateMethod 98 @defer.inlineCallbacks 99 def updateBuilderList(self, masterid, builderNames): 100 # get the "current" list of builders for this master, so we know what 101 # changes to make. Race conditions here aren't a great worry, as this 102 # is the only master inserting or deleting these records. 103 builders = yield self.master.db.builders.getBuilders(masterid=masterid) 104 105 # figure out what to remove and remove it 106 builderNames_set = set(builderNames) 107 for bldr in builders: 108 if bldr['name'] not in builderNames_set: 109 builderid = bldr['id'] 110 yield self.master.db.builders.removeBuilderMaster( 111 masterid=masterid, builderid=builderid) 112 self.master.mq.produce(('builders', str(builderid), 'stopped'), 113 dict(builderid=builderid, masterid=masterid, 114 name=bldr['name'])) 115 else: 116 builderNames_set.remove(bldr['name']) 117 118 # now whatever's left in builderNames_set is new 119 for name in builderNames_set: 120 builderid = yield self.master.db.builders.findBuilderId(name) 121 yield self.master.db.builders.addBuilderMaster( 122 masterid=masterid, builderid=builderid) 123 self.master.mq.produce(('builders', str(builderid), 'started'), 124 dict(builderid=builderid, masterid=masterid, name=name)) 125 126 @defer.inlineCallbacks 127 def _masterDeactivated(self, masterid): 128 # called from the masters rtype to indicate that the given master is 129 # deactivated 130 yield self.updateBuilderList(masterid, []) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/data/builders.py b/master/buildbot/data/builders.py --- a/master/buildbot/data/builders.py +++ b/master/buildbot/data/builders.py @@ -77,7 +77,7 @@ class EntityType(types.Entity): builderid = types.Integer() - name = types.Identifier(20) + name = types.Identifier(50) masterids = types.List(of=types.Integer()) description = types.NoneOk(types.String()) tags = types.List(of=types.String()) diff --git a/master/buildbot/data/forceschedulers.py b/master/buildbot/data/forceschedulers.py --- a/master/buildbot/data/forceschedulers.py +++ b/master/buildbot/data/forceschedulers.py @@ -96,9 +96,9 @@ keyFields = [] class EntityType(types.Entity): - name = types.Identifier(20) + name = types.Identifier(50) button_name = types.String() label = types.String() - builder_names = types.List(of=types.Identifier(20)) + builder_names = types.List(of=types.Identifier(50)) all_fields = types.List(of=types.JsonObject()) entityType = EntityType(name)
{"golden_diff": "diff --git a/master/buildbot/data/builders.py b/master/buildbot/data/builders.py\n--- a/master/buildbot/data/builders.py\n+++ b/master/buildbot/data/builders.py\n@@ -77,7 +77,7 @@\n \n class EntityType(types.Entity):\n builderid = types.Integer()\n- name = types.Identifier(20)\n+ name = types.Identifier(50)\n masterids = types.List(of=types.Integer())\n description = types.NoneOk(types.String())\n tags = types.List(of=types.String())\ndiff --git a/master/buildbot/data/forceschedulers.py b/master/buildbot/data/forceschedulers.py\n--- a/master/buildbot/data/forceschedulers.py\n+++ b/master/buildbot/data/forceschedulers.py\n@@ -96,9 +96,9 @@\n keyFields = []\n \n class EntityType(types.Entity):\n- name = types.Identifier(20)\n+ name = types.Identifier(50)\n button_name = types.String()\n label = types.String()\n- builder_names = types.List(of=types.Identifier(20))\n+ builder_names = types.List(of=types.Identifier(50))\n all_fields = types.List(of=types.JsonObject())\n entityType = EntityType(name)\n", "issue": "Scheduled weekly dependency update for week 28\n\n\n\n## Updates\nHere's a list of all the updates bundled in this pull request. I've added some links to make it easier for you to find all the information you need.\n<table align=\"center\">\n\n<tr>\n<td><b>boto</b></td>\n<td align=\"center\">2.47.0</td>\n<td align=\"center\">&raquo;</td>\n<td align=\"center\">2.48.0</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/boto\">PyPI</a> | <a href=\"https://pyup.io/changelogs/boto/\">Changelog</a> | <a href=\"https://github.com/boto/boto/\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>botocore</b></td>\n<td align=\"center\">1.5.78</td>\n<td align=\"center\">&raquo;</td>\n<td align=\"center\">1.5.80</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/botocore\">PyPI</a> | <a href=\"https://pyup.io/changelogs/botocore/\">Changelog</a> | <a href=\"https://github.com/boto/botocore\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>Pillow</b></td>\n<td align=\"center\">4.2.0</td>\n<td align=\"center\">&raquo;</td>\n<td align=\"center\">4.2.1</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/pillow\">PyPI</a> | <a href=\"https://pyup.io/changelogs/pillow/\">Changelog</a> | <a href=\"https://python-pillow.org\">Homepage</a> \n\n</td>\n\n<tr>\n<td><b>pycparser</b></td>\n<td align=\"center\">2.17</td>\n<td align=\"center\">&raquo;</td>\n<td align=\"center\">2.18</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/pycparser\">PyPI</a> | <a href=\"https://pyup.io/changelogs/pycparser/\">Changelog</a> | <a href=\"https://github.com/eliben/pycparser\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>buildbot-www</b></td>\n<td align=\"center\">0.9.9.post1</td>\n<td align=\"center\">&raquo;</td>\n<td align=\"center\">0.9.9.post2</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/buildbot-www\">PyPI</a> | <a href=\"http://buildbot.net/\">Homepage</a> \n\n</td>\n\n</tr>\n</table>\n\n\n\n## Changelogs\n\n\n### boto 2.47.0 -> 2.48.0\n\n>### 2.48.0\n\n>============\n\n>:date: 2017/06/29\n\n>This release addresses a few S3 related bugs as well as a bug with the recent\n>endpoint heuristics feature.\n\n>Changes\n>-------\n>* Fix generate_url() AttributeError when using anonymous connections (:issue:`3734`, :sha:`83481807`)\n>* Use RegionInfo by default with heuristics (:issue:`3737`, :sha:`0a9b1140`)\n>* Allow specifying s3 host from boto config file. (:issue:`3738`, :sha:`dcfc7512`)\n\n\n\n\n\n\n\n\n\n\n### botocore 1.5.78 -> 1.5.80\n\n>### 1.5.80\n\n>======\n\n>* api-change:``kinesis``: Update kinesis client to latest version\n>* api-change:``kms``: Update kms client to latest version\n>* api-change:``ds``: Update ds client to latest version\n>* api-change:``ssm``: Update ssm client to latest version\n\n\n\n\n>### 1.5.79\n\n>======\n\n>* api-change:``cloudwatch``: Update cloudwatch client to latest version\n>* api-change:``route53``: Update route53 client to latest version\n\n\n\n\n\n\n\n\n### Pillow 4.2.0 -> 4.2.1\n\n>### 4.2.1\n\n>------------------\n\n>- CI: Fix version specification and test on CI for PyPy/Windows 2608\n> [wiredfool]\n\n\n\n\n\n\n\n### pycparser 2.17 -> 2.18\n\n>### 2.18\n\n\n> - PR 161 &amp; 184: Update bundled PLY version to 3.10\n> - PR 158: Add support for the __int128 type.\n> - PR 169: Handle more tricky TYPEID in declarators.\n> - PR 178: Add columns to the coord of each node\n\n\n\n\n\n\n\n\n\n\nThat's it for now!\n\nHappy merging! \ud83e\udd16\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\nfrom buildbot.schedulers import forcesched\nfrom buildbot.www.rest import JSONRPC_CODES\nfrom buildbot.www.rest import BadJsonRpc2\n\n\ndef forceScheduler2Data(sched):\n ret = dict(all_fields=[],\n name=unicode(sched.name),\n button_name=unicode(sched.buttonName),\n label=unicode(sched.label),\n builder_names=map(unicode, sched.builderNames))\n ret[\"all_fields\"] = [field.getSpec() for field in sched.all_fields]\n return ret\n\n\nclass ForceSchedulerEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /forceschedulers/i:schedulername\n \"\"\"\n\n def findForceScheduler(self, schedulername):\n # eventually this may be db backed. This is why the API is async\n for sched in self.master.allSchedulers():\n if sched.name == schedulername and isinstance(sched, forcesched.ForceScheduler):\n return defer.succeed(sched)\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if sched is not None:\n defer.returnValue(forceScheduler2Data(sched))\n\n @defer.inlineCallbacks\n def control(self, action, args, kwargs):\n if action == \"force\":\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if \"owner\" not in args:\n args['owner'] = \"user\"\n try:\n res = yield sched.force(**args)\n defer.returnValue(res)\n except forcesched.CollectedValidationError as e:\n raise BadJsonRpc2(e.errors, JSONRPC_CODES[\"invalid_params\"])\n defer.returnValue(None)\n\n\nclass ForceSchedulersEndpoint(base.Endpoint):\n\n isCollection = True\n pathPatterns = \"\"\"\n /forceschedulers\n /builders/:builderid/forceschedulers\n \"\"\"\n rootLinkName = 'forceschedulers'\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n l = []\n builderid = kwargs.get('builderid', None)\n if builderid is not None:\n bdict = yield self.master.db.builders.getBuilder(builderid)\n for sched in self.master.allSchedulers():\n if isinstance(sched, forcesched.ForceScheduler):\n if builderid is not None and bdict['name'] not in sched.builderNames:\n continue\n l.append(forceScheduler2Data(sched))\n defer.returnValue(l)\n\n\nclass ForceScheduler(base.ResourceType):\n\n name = \"forcescheduler\"\n plural = \"forceschedulers\"\n endpoints = [ForceSchedulerEndpoint, ForceSchedulersEndpoint]\n keyFields = []\n\n class EntityType(types.Entity):\n name = types.Identifier(20)\n button_name = types.String()\n label = types.String()\n builder_names = types.List(of=types.Identifier(20))\n all_fields = types.List(of=types.JsonObject())\n entityType = EntityType(name)\n", "path": "master/buildbot/data/forceschedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\n\n\nclass BuilderEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /builders/n:builderid\n /masters/n:masterid/builders/n:builderid\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n builderid = kwargs['builderid']\n bdict = yield self.master.db.builders.getBuilder(builderid)\n if not bdict:\n defer.returnValue(None)\n return\n if 'masterid' in kwargs:\n if kwargs['masterid'] not in bdict['masterids']:\n defer.returnValue(None)\n return\n defer.returnValue(\n dict(builderid=builderid,\n name=bdict['name'],\n masterids=bdict['masterids'],\n description=bdict['description'],\n tags=bdict['tags']))\n\n\nclass BuildersEndpoint(base.Endpoint):\n\n isCollection = True\n rootLinkName = 'builders'\n pathPatterns = \"\"\"\n /builders\n /masters/n:masterid/builders\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n bdicts = yield self.master.db.builders.getBuilders(\n masterid=kwargs.get('masterid', None))\n defer.returnValue([\n dict(builderid=bd['id'],\n name=bd['name'],\n masterids=bd['masterids'],\n description=bd['description'],\n tags=bd['tags'])\n for bd in bdicts])\n\n\nclass Builder(base.ResourceType):\n\n name = \"builder\"\n plural = \"builders\"\n endpoints = [BuilderEndpoint, BuildersEndpoint]\n keyFields = ['builderid']\n\n class EntityType(types.Entity):\n builderid = types.Integer()\n name = types.Identifier(20)\n masterids = types.List(of=types.Integer())\n description = types.NoneOk(types.String())\n tags = types.List(of=types.String())\n entityType = EntityType(name)\n\n def __init__(self, master):\n base.ResourceType.__init__(self, master)\n\n @base.updateMethod\n def findBuilderId(self, name):\n return self.master.db.builders.findBuilderId(name)\n\n @base.updateMethod\n def updateBuilderInfo(self, builderid, description, tags):\n return self.master.db.builders.updateBuilderInfo(builderid, description, tags)\n\n @base.updateMethod\n @defer.inlineCallbacks\n def updateBuilderList(self, masterid, builderNames):\n # get the \"current\" list of builders for this master, so we know what\n # changes to make. Race conditions here aren't a great worry, as this\n # is the only master inserting or deleting these records.\n builders = yield self.master.db.builders.getBuilders(masterid=masterid)\n\n # figure out what to remove and remove it\n builderNames_set = set(builderNames)\n for bldr in builders:\n if bldr['name'] not in builderNames_set:\n builderid = bldr['id']\n yield self.master.db.builders.removeBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'stopped'),\n dict(builderid=builderid, masterid=masterid,\n name=bldr['name']))\n else:\n builderNames_set.remove(bldr['name'])\n\n # now whatever's left in builderNames_set is new\n for name in builderNames_set:\n builderid = yield self.master.db.builders.findBuilderId(name)\n yield self.master.db.builders.addBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'started'),\n dict(builderid=builderid, masterid=masterid, name=name))\n\n @defer.inlineCallbacks\n def _masterDeactivated(self, masterid):\n # called from the masters rtype to indicate that the given master is\n # deactivated\n yield self.updateBuilderList(masterid, [])\n", "path": "master/buildbot/data/builders.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\nfrom buildbot.schedulers import forcesched\nfrom buildbot.www.rest import JSONRPC_CODES\nfrom buildbot.www.rest import BadJsonRpc2\n\n\ndef forceScheduler2Data(sched):\n ret = dict(all_fields=[],\n name=unicode(sched.name),\n button_name=unicode(sched.buttonName),\n label=unicode(sched.label),\n builder_names=map(unicode, sched.builderNames))\n ret[\"all_fields\"] = [field.getSpec() for field in sched.all_fields]\n return ret\n\n\nclass ForceSchedulerEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /forceschedulers/i:schedulername\n \"\"\"\n\n def findForceScheduler(self, schedulername):\n # eventually this may be db backed. This is why the API is async\n for sched in self.master.allSchedulers():\n if sched.name == schedulername and isinstance(sched, forcesched.ForceScheduler):\n return defer.succeed(sched)\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if sched is not None:\n defer.returnValue(forceScheduler2Data(sched))\n\n @defer.inlineCallbacks\n def control(self, action, args, kwargs):\n if action == \"force\":\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if \"owner\" not in args:\n args['owner'] = \"user\"\n try:\n res = yield sched.force(**args)\n defer.returnValue(res)\n except forcesched.CollectedValidationError as e:\n raise BadJsonRpc2(e.errors, JSONRPC_CODES[\"invalid_params\"])\n defer.returnValue(None)\n\n\nclass ForceSchedulersEndpoint(base.Endpoint):\n\n isCollection = True\n pathPatterns = \"\"\"\n /forceschedulers\n /builders/:builderid/forceschedulers\n \"\"\"\n rootLinkName = 'forceschedulers'\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n l = []\n builderid = kwargs.get('builderid', None)\n if builderid is not None:\n bdict = yield self.master.db.builders.getBuilder(builderid)\n for sched in self.master.allSchedulers():\n if isinstance(sched, forcesched.ForceScheduler):\n if builderid is not None and bdict['name'] not in sched.builderNames:\n continue\n l.append(forceScheduler2Data(sched))\n defer.returnValue(l)\n\n\nclass ForceScheduler(base.ResourceType):\n\n name = \"forcescheduler\"\n plural = \"forceschedulers\"\n endpoints = [ForceSchedulerEndpoint, ForceSchedulersEndpoint]\n keyFields = []\n\n class EntityType(types.Entity):\n name = types.Identifier(50)\n button_name = types.String()\n label = types.String()\n builder_names = types.List(of=types.Identifier(50))\n all_fields = types.List(of=types.JsonObject())\n entityType = EntityType(name)\n", "path": "master/buildbot/data/forceschedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\n\n\nclass BuilderEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /builders/n:builderid\n /masters/n:masterid/builders/n:builderid\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n builderid = kwargs['builderid']\n bdict = yield self.master.db.builders.getBuilder(builderid)\n if not bdict:\n defer.returnValue(None)\n return\n if 'masterid' in kwargs:\n if kwargs['masterid'] not in bdict['masterids']:\n defer.returnValue(None)\n return\n defer.returnValue(\n dict(builderid=builderid,\n name=bdict['name'],\n masterids=bdict['masterids'],\n description=bdict['description'],\n tags=bdict['tags']))\n\n\nclass BuildersEndpoint(base.Endpoint):\n\n isCollection = True\n rootLinkName = 'builders'\n pathPatterns = \"\"\"\n /builders\n /masters/n:masterid/builders\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n bdicts = yield self.master.db.builders.getBuilders(\n masterid=kwargs.get('masterid', None))\n defer.returnValue([\n dict(builderid=bd['id'],\n name=bd['name'],\n masterids=bd['masterids'],\n description=bd['description'],\n tags=bd['tags'])\n for bd in bdicts])\n\n\nclass Builder(base.ResourceType):\n\n name = \"builder\"\n plural = \"builders\"\n endpoints = [BuilderEndpoint, BuildersEndpoint]\n keyFields = ['builderid']\n\n class EntityType(types.Entity):\n builderid = types.Integer()\n name = types.Identifier(50)\n masterids = types.List(of=types.Integer())\n description = types.NoneOk(types.String())\n tags = types.List(of=types.String())\n entityType = EntityType(name)\n\n def __init__(self, master):\n base.ResourceType.__init__(self, master)\n\n @base.updateMethod\n def findBuilderId(self, name):\n return self.master.db.builders.findBuilderId(name)\n\n @base.updateMethod\n def updateBuilderInfo(self, builderid, description, tags):\n return self.master.db.builders.updateBuilderInfo(builderid, description, tags)\n\n @base.updateMethod\n @defer.inlineCallbacks\n def updateBuilderList(self, masterid, builderNames):\n # get the \"current\" list of builders for this master, so we know what\n # changes to make. Race conditions here aren't a great worry, as this\n # is the only master inserting or deleting these records.\n builders = yield self.master.db.builders.getBuilders(masterid=masterid)\n\n # figure out what to remove and remove it\n builderNames_set = set(builderNames)\n for bldr in builders:\n if bldr['name'] not in builderNames_set:\n builderid = bldr['id']\n yield self.master.db.builders.removeBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'stopped'),\n dict(builderid=builderid, masterid=masterid,\n name=bldr['name']))\n else:\n builderNames_set.remove(bldr['name'])\n\n # now whatever's left in builderNames_set is new\n for name in builderNames_set:\n builderid = yield self.master.db.builders.findBuilderId(name)\n yield self.master.db.builders.addBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'started'),\n dict(builderid=builderid, masterid=masterid, name=name))\n\n @defer.inlineCallbacks\n def _masterDeactivated(self, masterid):\n # called from the masters rtype to indicate that the given master is\n # deactivated\n yield self.updateBuilderList(masterid, [])\n", "path": "master/buildbot/data/builders.py"}]}
3,734
265
gh_patches_debug_39102
rasdani/github-patches
git_diff
kedro-org__kedro-3680
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide better error message when you try to use a project command from outside a project > **Note**. This ticket does not handle the question of what happens if you're in a subdirectory of the project. That is done in #1831 . This ticket is just about what happens if you're outside the project altogether (e.g. the level above the project). When a kedro project command (e.g. `kedro run`) is run from outside a project then currently you get the very uninformative message `run command is not found`. We should change this so that it gives a much clearer message that you are outside a kedro project and should `cd` into the project before executing the command. This has been started in #1720 @noklam . You could either continue working on that PR or start a new one. Relevant modifications to make: * since we're not interested in iterating upwards through parent directories here, the existing upwards iteration using `find_project_root` is no longer relevant. That leaves open the question of whether and how we might suggest what the correct kedro project directory is. It's definitely a bad idea to iterate _downwards_ through directory structure arbitrarily deep (could take a long time). We could go one directory deep so long as it's not too expensive an operation (maybe need to check there aren't too many folders in the current directory first? Needs some quick time profiling to see). Or just not make any suggestion as to the right `cd` command. * if we're totally outside a kedro project, a helpful error message is great but: * is there some better way to check if the command is a project one without overriding `get_command`, e.g. check if it's in `self.project_groups`? * I don't think this formatting is "on brand" yet since we haven't moved the rest of the CLI to rich yet. Let's just do it as `click.secho` for now. Doing rich styling for CLI messages is a separate issue for another time (e.g. we might move to https://github.com/ewels/rich-click) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kedro/framework/cli/__init__.py` Content: ``` 1 """``kedro.framework.cli`` implements commands available from Kedro's CLI. 2 """ 3 4 from .cli import main 5 from .utils import command_with_verbosity, load_entry_points 6 7 __all__ = ["main", "command_with_verbosity", "load_entry_points"] 8 ``` Path: `kedro/framework/cli/cli.py` Content: ``` 1 """kedro is a CLI for managing Kedro projects. 2 3 This module implements commands available from the kedro CLI. 4 """ 5 from __future__ import annotations 6 7 import importlib 8 import sys 9 from collections import defaultdict 10 from pathlib import Path 11 from typing import Any, Sequence 12 13 import click 14 15 from kedro import __version__ as version 16 from kedro.framework.cli.catalog import catalog_cli 17 from kedro.framework.cli.hooks import get_cli_hook_manager 18 from kedro.framework.cli.jupyter import jupyter_cli 19 from kedro.framework.cli.micropkg import micropkg_cli 20 from kedro.framework.cli.pipeline import pipeline_cli 21 from kedro.framework.cli.project import project_group 22 from kedro.framework.cli.registry import registry_cli 23 from kedro.framework.cli.starters import create_cli 24 from kedro.framework.cli.utils import ( 25 CONTEXT_SETTINGS, 26 ENTRY_POINT_GROUPS, 27 CommandCollection, 28 KedroCliError, 29 _get_entry_points, 30 load_entry_points, 31 ) 32 from kedro.framework.project import LOGGING # noqa: F401 33 from kedro.framework.startup import _is_project, bootstrap_project 34 35 LOGO = rf""" 36 _ _ 37 | | _____ __| |_ __ ___ 38 | |/ / _ \/ _` | '__/ _ \ 39 | < __/ (_| | | | (_) | 40 |_|\_\___|\__,_|_| \___/ 41 v{version} 42 """ 43 44 45 @click.group(context_settings=CONTEXT_SETTINGS, name="Kedro") 46 @click.version_option(version, "--version", "-V", help="Show version and exit") 47 def cli() -> None: # pragma: no cover 48 """Kedro is a CLI for creating and using Kedro projects. For more 49 information, type ``kedro info``. 50 51 """ 52 pass 53 54 55 @cli.command() 56 def info() -> None: 57 """Get more information about kedro.""" 58 click.secho(LOGO, fg="green") 59 click.echo( 60 "Kedro is a Python framework for\n" 61 "creating reproducible, maintainable\n" 62 "and modular data science code." 63 ) 64 65 plugin_versions = {} 66 plugin_entry_points = defaultdict(set) 67 for plugin_entry_point in ENTRY_POINT_GROUPS: 68 for entry_point in _get_entry_points(plugin_entry_point): 69 module_name = entry_point.module.split(".")[0] 70 plugin_versions[module_name] = entry_point.dist.version 71 plugin_entry_points[module_name].add(plugin_entry_point) 72 73 click.echo() 74 if plugin_versions: 75 click.echo("Installed plugins:") 76 for plugin_name, plugin_version in sorted(plugin_versions.items()): 77 entrypoints_str = ",".join(sorted(plugin_entry_points[plugin_name])) 78 click.echo( 79 f"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})" 80 ) 81 else: 82 click.echo("No plugins installed") 83 84 85 def _init_plugins() -> None: 86 init_hooks = load_entry_points("init") 87 for init_hook in init_hooks: 88 init_hook() 89 90 91 class KedroCLI(CommandCollection): 92 """A CommandCollection class to encapsulate the KedroCLI command 93 loading. 94 """ 95 96 def __init__(self, project_path: Path): 97 self._metadata = None # running in package mode 98 if _is_project(project_path): 99 self._metadata = bootstrap_project(project_path) 100 self._cli_hook_manager = get_cli_hook_manager() 101 102 super().__init__( 103 ("Global commands", self.global_groups), 104 ("Project specific commands", self.project_groups), 105 ) 106 107 def main( 108 self, 109 args: Any | None = None, 110 prog_name: Any | None = None, 111 complete_var: Any | None = None, 112 standalone_mode: bool = True, 113 **extra: Any, 114 ) -> Any: 115 if self._metadata: 116 extra.update(obj=self._metadata) 117 118 # This is how click's internals parse sys.argv, which include the command, 119 # subcommand, arguments and options. click doesn't store this information anywhere 120 # so we have to re-do it. 121 args = sys.argv[1:] if args is None else list(args) 122 self._cli_hook_manager.hook.before_command_run( 123 project_metadata=self._metadata, command_args=args 124 ) 125 126 try: 127 super().main( 128 args=args, 129 prog_name=prog_name, 130 complete_var=complete_var, 131 standalone_mode=standalone_mode, 132 **extra, 133 ) 134 # click.core.main() method exits by default, we capture this and then 135 # exit as originally intended 136 except SystemExit as exc: 137 self._cli_hook_manager.hook.after_command_run( 138 project_metadata=self._metadata, command_args=args, exit_code=exc.code 139 ) 140 sys.exit(exc.code) 141 142 @property 143 def global_groups(self) -> Sequence[click.MultiCommand]: 144 """Property which loads all global command groups from plugins and 145 combines them with the built-in ones (eventually overriding the 146 built-in ones if they are redefined by plugins). 147 """ 148 return [cli, create_cli, *load_entry_points("global")] 149 150 @property 151 def project_groups(self) -> Sequence[click.MultiCommand]: 152 """Property which loads all project command groups from the 153 project and the plugins, then combines them with the built-in ones. 154 Built-in commands can be overridden by plugins, which can be 155 overridden by a custom project cli.py. 156 See https://kedro.readthedocs.io/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands 157 on how to add this. 158 """ 159 if not self._metadata: 160 return [] 161 162 built_in = [ 163 catalog_cli, 164 jupyter_cli, 165 pipeline_cli, 166 micropkg_cli, 167 project_group, 168 registry_cli, 169 ] 170 171 plugins = load_entry_points("project") 172 173 try: 174 project_cli = importlib.import_module(f"{self._metadata.package_name}.cli") 175 # fail gracefully if cli.py does not exist 176 except ModuleNotFoundError: 177 # return only built-in commands and commands from plugins 178 # (plugins can override built-in commands) 179 return [*built_in, *plugins] 180 181 # fail badly if cli.py exists, but has no `cli` in it 182 if not hasattr(project_cli, "cli"): 183 raise KedroCliError( 184 f"Cannot load commands from {self._metadata.package_name}.cli" 185 ) 186 user_defined = project_cli.cli 187 # return built-in commands, plugin commands and user defined commands 188 # (overriding happens as follows built-in < plugins < cli.py) 189 return [*built_in, *plugins, user_defined] 190 191 192 def main() -> None: # pragma: no cover 193 """Main entry point. Look for a ``cli.py``, and, if found, add its 194 commands to `kedro`'s before invoking the CLI. 195 """ 196 _init_plugins() 197 cli_collection = KedroCLI(project_path=Path.cwd()) 198 cli_collection() 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kedro/framework/cli/__init__.py b/kedro/framework/cli/__init__.py --- a/kedro/framework/cli/__init__.py +++ b/kedro/framework/cli/__init__.py @@ -1,7 +1,11 @@ """``kedro.framework.cli`` implements commands available from Kedro's CLI. """ -from .cli import main -from .utils import command_with_verbosity, load_entry_points +# The constant need to be defined first otherwise it causes circular depdencies +ORANGE = (255, 175, 0) +BRIGHT_BLACK = (128, 128, 128) + +from .cli import main # noqa: E402 +from .utils import command_with_verbosity, load_entry_points # noqa: E402 __all__ = ["main", "command_with_verbosity", "load_entry_points"] diff --git a/kedro/framework/cli/cli.py b/kedro/framework/cli/cli.py --- a/kedro/framework/cli/cli.py +++ b/kedro/framework/cli/cli.py @@ -6,6 +6,7 @@ import importlib import sys +import traceback from collections import defaultdict from pathlib import Path from typing import Any, Sequence @@ -13,6 +14,7 @@ import click from kedro import __version__ as version +from kedro.framework.cli import BRIGHT_BLACK, ORANGE from kedro.framework.cli.catalog import catalog_cli from kedro.framework.cli.hooks import get_cli_hook_manager from kedro.framework.cli.jupyter import jupyter_cli @@ -133,10 +135,40 @@ ) # click.core.main() method exits by default, we capture this and then # exit as originally intended + except SystemExit as exc: self._cli_hook_manager.hook.after_command_run( project_metadata=self._metadata, command_args=args, exit_code=exc.code ) + # When CLI is run outside of a project, project_groups are not registered + catch_exception = "click.exceptions.UsageError: No such command" + # click convert exception handles to error message + if catch_exception in traceback.format_exc() and not self.project_groups: + warn = click.style( + "\nKedro project not found in this directory. ", + fg=ORANGE, + bold=True, + ) + result = ( + click.style("Project specific commands such as ") + + click.style("'run' ", fg="cyan") + + "or " + + click.style("'jupyter' ", fg="cyan") + + "are only available within a project directory." + ) + message = warn + result + hint = ( + click.style( + "\nHint: Kedro is looking for a file called ", fg=BRIGHT_BLACK + ) + + click.style("'pyproject.toml", fg="magenta") + + click.style( + ", is one present in your current working directory?", + fg=BRIGHT_BLACK, + ) + ) + click.echo(message) + click.echo(hint) sys.exit(exc.code) @property
{"golden_diff": "diff --git a/kedro/framework/cli/__init__.py b/kedro/framework/cli/__init__.py\n--- a/kedro/framework/cli/__init__.py\n+++ b/kedro/framework/cli/__init__.py\n@@ -1,7 +1,11 @@\n \"\"\"``kedro.framework.cli`` implements commands available from Kedro's CLI.\n \"\"\"\n \n-from .cli import main\n-from .utils import command_with_verbosity, load_entry_points\n+# The constant need to be defined first otherwise it causes circular depdencies\n+ORANGE = (255, 175, 0)\n+BRIGHT_BLACK = (128, 128, 128)\n+\n+from .cli import main # noqa: E402\n+from .utils import command_with_verbosity, load_entry_points # noqa: E402\n \n __all__ = [\"main\", \"command_with_verbosity\", \"load_entry_points\"]\ndiff --git a/kedro/framework/cli/cli.py b/kedro/framework/cli/cli.py\n--- a/kedro/framework/cli/cli.py\n+++ b/kedro/framework/cli/cli.py\n@@ -6,6 +6,7 @@\n \n import importlib\n import sys\n+import traceback\n from collections import defaultdict\n from pathlib import Path\n from typing import Any, Sequence\n@@ -13,6 +14,7 @@\n import click\n \n from kedro import __version__ as version\n+from kedro.framework.cli import BRIGHT_BLACK, ORANGE\n from kedro.framework.cli.catalog import catalog_cli\n from kedro.framework.cli.hooks import get_cli_hook_manager\n from kedro.framework.cli.jupyter import jupyter_cli\n@@ -133,10 +135,40 @@\n )\n # click.core.main() method exits by default, we capture this and then\n # exit as originally intended\n+\n except SystemExit as exc:\n self._cli_hook_manager.hook.after_command_run(\n project_metadata=self._metadata, command_args=args, exit_code=exc.code\n )\n+ # When CLI is run outside of a project, project_groups are not registered\n+ catch_exception = \"click.exceptions.UsageError: No such command\"\n+ # click convert exception handles to error message\n+ if catch_exception in traceback.format_exc() and not self.project_groups:\n+ warn = click.style(\n+ \"\\nKedro project not found in this directory. \",\n+ fg=ORANGE,\n+ bold=True,\n+ )\n+ result = (\n+ click.style(\"Project specific commands such as \")\n+ + click.style(\"'run' \", fg=\"cyan\")\n+ + \"or \"\n+ + click.style(\"'jupyter' \", fg=\"cyan\")\n+ + \"are only available within a project directory.\"\n+ )\n+ message = warn + result\n+ hint = (\n+ click.style(\n+ \"\\nHint: Kedro is looking for a file called \", fg=BRIGHT_BLACK\n+ )\n+ + click.style(\"'pyproject.toml\", fg=\"magenta\")\n+ + click.style(\n+ \", is one present in your current working directory?\",\n+ fg=BRIGHT_BLACK,\n+ )\n+ )\n+ click.echo(message)\n+ click.echo(hint)\n sys.exit(exc.code)\n \n @property\n", "issue": "Provide better error message when you try to use a project command from outside a project\n> **Note**. This ticket does not handle the question of what happens if you're in a subdirectory of the project. That is done in #1831 . This ticket is just about what happens if you're outside the project altogether (e.g. the level above the project).\r\n\r\nWhen a kedro project command (e.g. `kedro run`) is run from outside a project then currently you get the very uninformative message `run command is not found`. We should change this so that it gives a much clearer message that you are outside a kedro project and should `cd` into the project before executing the command.\r\n\r\nThis has been started in #1720 @noklam . You could either continue working on that PR or start a new one. Relevant modifications to make:\r\n* since we're not interested in iterating upwards through parent directories here, the existing upwards iteration using `find_project_root` is no longer relevant. That leaves open the question of whether and how we might suggest what the correct kedro project directory is. It's definitely a bad idea to iterate _downwards_ through directory structure arbitrarily deep (could take a long time). We could go one directory deep so long as it's not too expensive an operation (maybe need to check there aren't too many folders in the current directory first? Needs some quick time profiling to see). Or just not make any suggestion as to the right `cd` command. \r\n* if we're totally outside a kedro project, a helpful error message is great but:\r\n * is there some better way to check if the command is a project one without overriding `get_command`, e.g. check if it's in `self.project_groups`?\r\n * I don't think this formatting is \"on brand\" yet since we haven't moved the rest of the CLI to rich yet. Let's just do it as `click.secho` for now. Doing rich styling for CLI messages is a separate issue for another time (e.g. we might move to https://github.com/ewels/rich-click)\n", "before_files": [{"content": "\"\"\"``kedro.framework.cli`` implements commands available from Kedro's CLI.\n\"\"\"\n\nfrom .cli import main\nfrom .utils import command_with_verbosity, load_entry_points\n\n__all__ = [\"main\", \"command_with_verbosity\", \"load_entry_points\"]\n", "path": "kedro/framework/cli/__init__.py"}, {"content": "\"\"\"kedro is a CLI for managing Kedro projects.\n\nThis module implements commands available from the kedro CLI.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, Sequence\n\nimport click\n\nfrom kedro import __version__ as version\nfrom kedro.framework.cli.catalog import catalog_cli\nfrom kedro.framework.cli.hooks import get_cli_hook_manager\nfrom kedro.framework.cli.jupyter import jupyter_cli\nfrom kedro.framework.cli.micropkg import micropkg_cli\nfrom kedro.framework.cli.pipeline import pipeline_cli\nfrom kedro.framework.cli.project import project_group\nfrom kedro.framework.cli.registry import registry_cli\nfrom kedro.framework.cli.starters import create_cli\nfrom kedro.framework.cli.utils import (\n CONTEXT_SETTINGS,\n ENTRY_POINT_GROUPS,\n CommandCollection,\n KedroCliError,\n _get_entry_points,\n load_entry_points,\n)\nfrom kedro.framework.project import LOGGING # noqa: F401\nfrom kedro.framework.startup import _is_project, bootstrap_project\n\nLOGO = rf\"\"\"\n _ _\n| | _____ __| |_ __ ___\n| |/ / _ \\/ _` | '__/ _ \\\n| < __/ (_| | | | (_) |\n|_|\\_\\___|\\__,_|_| \\___/\nv{version}\n\"\"\"\n\n\[email protected](context_settings=CONTEXT_SETTINGS, name=\"Kedro\")\[email protected]_option(version, \"--version\", \"-V\", help=\"Show version and exit\")\ndef cli() -> None: # pragma: no cover\n \"\"\"Kedro is a CLI for creating and using Kedro projects. For more\n information, type ``kedro info``.\n\n \"\"\"\n pass\n\n\[email protected]()\ndef info() -> None:\n \"\"\"Get more information about kedro.\"\"\"\n click.secho(LOGO, fg=\"green\")\n click.echo(\n \"Kedro is a Python framework for\\n\"\n \"creating reproducible, maintainable\\n\"\n \"and modular data science code.\"\n )\n\n plugin_versions = {}\n plugin_entry_points = defaultdict(set)\n for plugin_entry_point in ENTRY_POINT_GROUPS:\n for entry_point in _get_entry_points(plugin_entry_point):\n module_name = entry_point.module.split(\".\")[0]\n plugin_versions[module_name] = entry_point.dist.version\n plugin_entry_points[module_name].add(plugin_entry_point)\n\n click.echo()\n if plugin_versions:\n click.echo(\"Installed plugins:\")\n for plugin_name, plugin_version in sorted(plugin_versions.items()):\n entrypoints_str = \",\".join(sorted(plugin_entry_points[plugin_name]))\n click.echo(\n f\"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})\"\n )\n else:\n click.echo(\"No plugins installed\")\n\n\ndef _init_plugins() -> None:\n init_hooks = load_entry_points(\"init\")\n for init_hook in init_hooks:\n init_hook()\n\n\nclass KedroCLI(CommandCollection):\n \"\"\"A CommandCollection class to encapsulate the KedroCLI command\n loading.\n \"\"\"\n\n def __init__(self, project_path: Path):\n self._metadata = None # running in package mode\n if _is_project(project_path):\n self._metadata = bootstrap_project(project_path)\n self._cli_hook_manager = get_cli_hook_manager()\n\n super().__init__(\n (\"Global commands\", self.global_groups),\n (\"Project specific commands\", self.project_groups),\n )\n\n def main(\n self,\n args: Any | None = None,\n prog_name: Any | None = None,\n complete_var: Any | None = None,\n standalone_mode: bool = True,\n **extra: Any,\n ) -> Any:\n if self._metadata:\n extra.update(obj=self._metadata)\n\n # This is how click's internals parse sys.argv, which include the command,\n # subcommand, arguments and options. click doesn't store this information anywhere\n # so we have to re-do it.\n args = sys.argv[1:] if args is None else list(args)\n self._cli_hook_manager.hook.before_command_run(\n project_metadata=self._metadata, command_args=args\n )\n\n try:\n super().main(\n args=args,\n prog_name=prog_name,\n complete_var=complete_var,\n standalone_mode=standalone_mode,\n **extra,\n )\n # click.core.main() method exits by default, we capture this and then\n # exit as originally intended\n except SystemExit as exc:\n self._cli_hook_manager.hook.after_command_run(\n project_metadata=self._metadata, command_args=args, exit_code=exc.code\n )\n sys.exit(exc.code)\n\n @property\n def global_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all global command groups from plugins and\n combines them with the built-in ones (eventually overriding the\n built-in ones if they are redefined by plugins).\n \"\"\"\n return [cli, create_cli, *load_entry_points(\"global\")]\n\n @property\n def project_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all project command groups from the\n project and the plugins, then combines them with the built-in ones.\n Built-in commands can be overridden by plugins, which can be\n overridden by a custom project cli.py.\n See https://kedro.readthedocs.io/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands\n on how to add this.\n \"\"\"\n if not self._metadata:\n return []\n\n built_in = [\n catalog_cli,\n jupyter_cli,\n pipeline_cli,\n micropkg_cli,\n project_group,\n registry_cli,\n ]\n\n plugins = load_entry_points(\"project\")\n\n try:\n project_cli = importlib.import_module(f\"{self._metadata.package_name}.cli\")\n # fail gracefully if cli.py does not exist\n except ModuleNotFoundError:\n # return only built-in commands and commands from plugins\n # (plugins can override built-in commands)\n return [*built_in, *plugins]\n\n # fail badly if cli.py exists, but has no `cli` in it\n if not hasattr(project_cli, \"cli\"):\n raise KedroCliError(\n f\"Cannot load commands from {self._metadata.package_name}.cli\"\n )\n user_defined = project_cli.cli\n # return built-in commands, plugin commands and user defined commands\n # (overriding happens as follows built-in < plugins < cli.py)\n return [*built_in, *plugins, user_defined]\n\n\ndef main() -> None: # pragma: no cover\n \"\"\"Main entry point. Look for a ``cli.py``, and, if found, add its\n commands to `kedro`'s before invoking the CLI.\n \"\"\"\n _init_plugins()\n cli_collection = KedroCLI(project_path=Path.cwd())\n cli_collection()\n", "path": "kedro/framework/cli/cli.py"}], "after_files": [{"content": "\"\"\"``kedro.framework.cli`` implements commands available from Kedro's CLI.\n\"\"\"\n\n# The constant need to be defined first otherwise it causes circular depdencies\nORANGE = (255, 175, 0)\nBRIGHT_BLACK = (128, 128, 128)\n\nfrom .cli import main # noqa: E402\nfrom .utils import command_with_verbosity, load_entry_points # noqa: E402\n\n__all__ = [\"main\", \"command_with_verbosity\", \"load_entry_points\"]\n", "path": "kedro/framework/cli/__init__.py"}, {"content": "\"\"\"kedro is a CLI for managing Kedro projects.\n\nThis module implements commands available from the kedro CLI.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport sys\nimport traceback\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, Sequence\n\nimport click\n\nfrom kedro import __version__ as version\nfrom kedro.framework.cli import BRIGHT_BLACK, ORANGE\nfrom kedro.framework.cli.catalog import catalog_cli\nfrom kedro.framework.cli.hooks import get_cli_hook_manager\nfrom kedro.framework.cli.jupyter import jupyter_cli\nfrom kedro.framework.cli.micropkg import micropkg_cli\nfrom kedro.framework.cli.pipeline import pipeline_cli\nfrom kedro.framework.cli.project import project_group\nfrom kedro.framework.cli.registry import registry_cli\nfrom kedro.framework.cli.starters import create_cli\nfrom kedro.framework.cli.utils import (\n CONTEXT_SETTINGS,\n ENTRY_POINT_GROUPS,\n CommandCollection,\n KedroCliError,\n _get_entry_points,\n load_entry_points,\n)\nfrom kedro.framework.project import LOGGING # noqa: F401\nfrom kedro.framework.startup import _is_project, bootstrap_project\n\nLOGO = rf\"\"\"\n _ _\n| | _____ __| |_ __ ___\n| |/ / _ \\/ _` | '__/ _ \\\n| < __/ (_| | | | (_) |\n|_|\\_\\___|\\__,_|_| \\___/\nv{version}\n\"\"\"\n\n\[email protected](context_settings=CONTEXT_SETTINGS, name=\"Kedro\")\[email protected]_option(version, \"--version\", \"-V\", help=\"Show version and exit\")\ndef cli() -> None: # pragma: no cover\n \"\"\"Kedro is a CLI for creating and using Kedro projects. For more\n information, type ``kedro info``.\n\n \"\"\"\n pass\n\n\[email protected]()\ndef info() -> None:\n \"\"\"Get more information about kedro.\"\"\"\n click.secho(LOGO, fg=\"green\")\n click.echo(\n \"Kedro is a Python framework for\\n\"\n \"creating reproducible, maintainable\\n\"\n \"and modular data science code.\"\n )\n\n plugin_versions = {}\n plugin_entry_points = defaultdict(set)\n for plugin_entry_point in ENTRY_POINT_GROUPS:\n for entry_point in _get_entry_points(plugin_entry_point):\n module_name = entry_point.module.split(\".\")[0]\n plugin_versions[module_name] = entry_point.dist.version\n plugin_entry_points[module_name].add(plugin_entry_point)\n\n click.echo()\n if plugin_versions:\n click.echo(\"Installed plugins:\")\n for plugin_name, plugin_version in sorted(plugin_versions.items()):\n entrypoints_str = \",\".join(sorted(plugin_entry_points[plugin_name]))\n click.echo(\n f\"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})\"\n )\n else:\n click.echo(\"No plugins installed\")\n\n\ndef _init_plugins() -> None:\n init_hooks = load_entry_points(\"init\")\n for init_hook in init_hooks:\n init_hook()\n\n\nclass KedroCLI(CommandCollection):\n \"\"\"A CommandCollection class to encapsulate the KedroCLI command\n loading.\n \"\"\"\n\n def __init__(self, project_path: Path):\n self._metadata = None # running in package mode\n if _is_project(project_path):\n self._metadata = bootstrap_project(project_path)\n self._cli_hook_manager = get_cli_hook_manager()\n\n super().__init__(\n (\"Global commands\", self.global_groups),\n (\"Project specific commands\", self.project_groups),\n )\n\n def main(\n self,\n args: Any | None = None,\n prog_name: Any | None = None,\n complete_var: Any | None = None,\n standalone_mode: bool = True,\n **extra: Any,\n ) -> Any:\n if self._metadata:\n extra.update(obj=self._metadata)\n\n # This is how click's internals parse sys.argv, which include the command,\n # subcommand, arguments and options. click doesn't store this information anywhere\n # so we have to re-do it.\n args = sys.argv[1:] if args is None else list(args)\n self._cli_hook_manager.hook.before_command_run(\n project_metadata=self._metadata, command_args=args\n )\n\n try:\n super().main(\n args=args,\n prog_name=prog_name,\n complete_var=complete_var,\n standalone_mode=standalone_mode,\n **extra,\n )\n # click.core.main() method exits by default, we capture this and then\n # exit as originally intended\n\n except SystemExit as exc:\n self._cli_hook_manager.hook.after_command_run(\n project_metadata=self._metadata, command_args=args, exit_code=exc.code\n )\n # When CLI is run outside of a project, project_groups are not registered\n catch_exception = \"click.exceptions.UsageError: No such command\"\n # click convert exception handles to error message\n if catch_exception in traceback.format_exc() and not self.project_groups:\n warn = click.style(\n \"\\nKedro project not found in this directory. \",\n fg=ORANGE,\n bold=True,\n )\n result = (\n click.style(\"Project specific commands such as \")\n + click.style(\"'run' \", fg=\"cyan\")\n + \"or \"\n + click.style(\"'jupyter' \", fg=\"cyan\")\n + \"are only available within a project directory.\"\n )\n message = warn + result\n hint = (\n click.style(\n \"\\nHint: Kedro is looking for a file called \", fg=BRIGHT_BLACK\n )\n + click.style(\"'pyproject.toml\", fg=\"magenta\")\n + click.style(\n \", is one present in your current working directory?\",\n fg=BRIGHT_BLACK,\n )\n )\n click.echo(message)\n click.echo(hint)\n sys.exit(exc.code)\n\n @property\n def global_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all global command groups from plugins and\n combines them with the built-in ones (eventually overriding the\n built-in ones if they are redefined by plugins).\n \"\"\"\n return [cli, create_cli, *load_entry_points(\"global\")]\n\n @property\n def project_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all project command groups from the\n project and the plugins, then combines them with the built-in ones.\n Built-in commands can be overridden by plugins, which can be\n overridden by a custom project cli.py.\n See https://kedro.readthedocs.io/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands\n on how to add this.\n \"\"\"\n if not self._metadata:\n return []\n\n built_in = [\n catalog_cli,\n jupyter_cli,\n pipeline_cli,\n micropkg_cli,\n project_group,\n registry_cli,\n ]\n\n plugins = load_entry_points(\"project\")\n\n try:\n project_cli = importlib.import_module(f\"{self._metadata.package_name}.cli\")\n # fail gracefully if cli.py does not exist\n except ModuleNotFoundError:\n # return only built-in commands and commands from plugins\n # (plugins can override built-in commands)\n return [*built_in, *plugins]\n\n # fail badly if cli.py exists, but has no `cli` in it\n if not hasattr(project_cli, \"cli\"):\n raise KedroCliError(\n f\"Cannot load commands from {self._metadata.package_name}.cli\"\n )\n user_defined = project_cli.cli\n # return built-in commands, plugin commands and user defined commands\n # (overriding happens as follows built-in < plugins < cli.py)\n return [*built_in, *plugins, user_defined]\n\n\ndef main() -> None: # pragma: no cover\n \"\"\"Main entry point. Look for a ``cli.py``, and, if found, add its\n commands to `kedro`'s before invoking the CLI.\n \"\"\"\n _init_plugins()\n cli_collection = KedroCLI(project_path=Path.cwd())\n cli_collection()\n", "path": "kedro/framework/cli/cli.py"}]}
2,785
713
gh_patches_debug_25531
rasdani/github-patches
git_diff
scikit-hep__awkward-2387
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ak.with_field is has an overzealous check for excluding RecordArrays ### Version of Awkward Array HEAD ### Description and code to reproduce One should be able to add new fields to a `RecordArray` with no fields: ```python >>> no_fields = ak.Array([{}, {}, {}, {}, {}]) >>> no_fields["new_field"] = ak.Array([1, 2, 3, 4, 5]) ``` but there's an error: ``` ValueError: no tuples or records in array; cannot add a new field This error occurred while calling ak.Array.__setitem__( self = <Array [{}, {}, {}, {}, {}] type='5 * {}'> field_name = 'new_field' field_value = <Array [1, 2, 3, 4, 5] type='5 * int64'> ) ``` This happens because https://github.com/scikit-hep/awkward/blob/be60b5330f7f872fd01b34c44d2686eeafa9bbfc/src/awkward/operations/ak_with_field.py#L86-L87 is protecting the subsequent code from having to check for the case that `base` doesn't even have a `RecordArray`, but `base.fields == []` isn't the way to do it: you can also get that from a `RecordArray` with no fields. Instead, that guard needs to check for the existence of a `RecordArray` some other way. Perhaps the following? ```python try: ak._do.recursively_apply(no_fields.layout, lambda x, **kwargs: None, allow_records=False) except ValueError: pass else: raise ValueError("no tuples or records in array; cannot add a new field") from None ``` I'm not entirely happy about repurposing an anti-guard for one function as a guard for another. Also, it should complain if any of the branches of a `UnionArray` are not `RecordArrays`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/awkward/operations/ak_with_field.py` Content: ``` 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 __all__ = ("with_field",) 3 import copy 4 5 import awkward as ak 6 from awkward._behavior import behavior_of 7 from awkward._layout import wrap_layout 8 from awkward._nplikes.numpylike import NumpyMetadata 9 from awkward._regularize import is_non_string_like_sequence 10 11 np = NumpyMetadata.instance() 12 13 14 def with_field(array, what, where=None, *, highlevel=True, behavior=None): 15 """ 16 Args: 17 array: Array-like data (anything #ak.to_layout recognizes). 18 what: Array-like data (anything #ak.to_layout recognizes) to add as a new field. 19 where (None or str or non-empy sequence of str): If None, the new field 20 has no name (can be accessed as an integer slot number in a 21 string); If str, the name of the new field. If a sequence, it is 22 interpreted as a path where to add the field in a nested record. 23 highlevel (bool): If True, return an #ak.Array; otherwise, return 24 a low-level #ak.contents.Content subclass. 25 behavior (None or dict): Custom #ak.behavior for the output array, if 26 high-level. 27 28 Returns an #ak.Array or #ak.Record (or low-level equivalent, if 29 `highlevel=False`) with a new field attached. This function does not 30 change the array in-place. 31 32 See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that 33 changes the high-level object in-place. (These methods internally use 34 #ak.with_field, so performance is not a factor in choosing one over the 35 other.) 36 """ 37 with ak._errors.OperationErrorContext( 38 "ak.with_field", 39 { 40 "array": array, 41 "what": what, 42 "where": where, 43 "highlevel": highlevel, 44 "behavior": behavior, 45 }, 46 ): 47 return _impl(array, what, where, highlevel, behavior) 48 49 50 def _impl(base, what, where, highlevel, behavior): 51 if not ( 52 where is None 53 or isinstance(where, str) 54 or ( 55 is_non_string_like_sequence(where) 56 and all(isinstance(x, str) for x in where) 57 ) 58 ): 59 raise TypeError( 60 "New fields may only be assigned by field name(s) " 61 "or as a new integer slot by passing None for 'where'" 62 ) 63 64 if is_non_string_like_sequence(where) and len(where) > 1: 65 return _impl( 66 base, 67 _impl( 68 base[where[0]], 69 what, 70 where[1:], 71 highlevel, 72 behavior, 73 ), 74 where[0], 75 highlevel, 76 behavior, 77 ) 78 else: 79 # If we have an iterable here, pull out the only ti 80 if is_non_string_like_sequence(where): 81 where = where[0] 82 83 behavior = behavior_of(base, what, behavior=behavior) 84 base = ak.operations.to_layout(base, allow_record=True, allow_other=False) 85 86 if len(base.fields) == 0: 87 raise ValueError("no tuples or records in array; cannot add a new field") 88 89 what = ak.operations.to_layout(what, allow_record=True, allow_other=True) 90 91 keys = copy.copy(base.fields) 92 if where in base.fields: 93 keys.remove(where) 94 95 def action(inputs, **kwargs): 96 base, what = inputs 97 backend = base.backend 98 99 if isinstance(base, ak.contents.RecordArray): 100 if what is None: 101 what = ak.contents.IndexedOptionArray( 102 ak.index.Index64( 103 backend.index_nplike.full(len(base), -1, dtype=np.int64), 104 nplike=backend.index_nplike, 105 ), 106 ak.contents.EmptyArray(), 107 ) 108 elif not isinstance(what, ak.contents.Content): 109 what = ak.contents.NumpyArray( 110 backend.nplike.repeat(what, len(base)) 111 ) 112 if base.is_tuple: 113 # Preserve tuple-ness 114 if where is None: 115 fields = None 116 # Otherwise the tuple becomes a record 117 else: 118 fields = [*keys, where] 119 # Records with `where=None` will create a tuple-like key 120 elif where is None: 121 fields = [*keys, str(len(keys))] 122 else: 123 fields = [*keys, where] 124 out = ak.contents.RecordArray( 125 [base[k] for k in keys] + [what], 126 fields, 127 parameters=base.parameters, 128 ) 129 return (out,) 130 else: 131 return None 132 133 out = ak._broadcasting.broadcast_and_apply( 134 [base, what], 135 action, 136 behavior, 137 right_broadcast=False, 138 ) 139 140 assert isinstance(out, tuple) and len(out) == 1 141 142 return wrap_layout(out[0], behavior, highlevel) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/awkward/operations/ak_with_field.py b/src/awkward/operations/ak_with_field.py --- a/src/awkward/operations/ak_with_field.py +++ b/src/awkward/operations/ak_with_field.py @@ -82,16 +82,33 @@ behavior = behavior_of(base, what, behavior=behavior) base = ak.operations.to_layout(base, allow_record=True, allow_other=False) - - if len(base.fields) == 0: - raise ValueError("no tuples or records in array; cannot add a new field") - what = ak.operations.to_layout(what, allow_record=True, allow_other=True) keys = copy.copy(base.fields) if where in base.fields: keys.remove(where) + def purelist_is_record(layout): + result = False + + def action_is_record(input, **kwargs): + nonlocal result + + if input.is_record: + result = True + return input + elif input.is_union: + result = all(purelist_is_record(x) for x in input.contents) + return input + else: + return None + + ak._do.recursively_apply(layout, action_is_record, return_array=False) + return result + + if not purelist_is_record(base): + raise ValueError("no tuples or records in array; cannot add a new field") + def action(inputs, **kwargs): base, what = inputs backend = base.backend
{"golden_diff": "diff --git a/src/awkward/operations/ak_with_field.py b/src/awkward/operations/ak_with_field.py\n--- a/src/awkward/operations/ak_with_field.py\n+++ b/src/awkward/operations/ak_with_field.py\n@@ -82,16 +82,33 @@\n \n behavior = behavior_of(base, what, behavior=behavior)\n base = ak.operations.to_layout(base, allow_record=True, allow_other=False)\n-\n- if len(base.fields) == 0:\n- raise ValueError(\"no tuples or records in array; cannot add a new field\")\n-\n what = ak.operations.to_layout(what, allow_record=True, allow_other=True)\n \n keys = copy.copy(base.fields)\n if where in base.fields:\n keys.remove(where)\n \n+ def purelist_is_record(layout):\n+ result = False\n+\n+ def action_is_record(input, **kwargs):\n+ nonlocal result\n+\n+ if input.is_record:\n+ result = True\n+ return input\n+ elif input.is_union:\n+ result = all(purelist_is_record(x) for x in input.contents)\n+ return input\n+ else:\n+ return None\n+\n+ ak._do.recursively_apply(layout, action_is_record, return_array=False)\n+ return result\n+\n+ if not purelist_is_record(base):\n+ raise ValueError(\"no tuples or records in array; cannot add a new field\")\n+\n def action(inputs, **kwargs):\n base, what = inputs\n backend = base.backend\n", "issue": "ak.with_field is has an overzealous check for excluding RecordArrays\n### Version of Awkward Array\n\nHEAD\n\n### Description and code to reproduce\n\nOne should be able to add new fields to a `RecordArray` with no fields:\r\n\r\n```python\r\n>>> no_fields = ak.Array([{}, {}, {}, {}, {}])\r\n>>> no_fields[\"new_field\"] = ak.Array([1, 2, 3, 4, 5])\r\n```\r\n\r\nbut there's an error:\r\n\r\n```\r\nValueError: no tuples or records in array; cannot add a new field\r\n\r\nThis error occurred while calling\r\n\r\n ak.Array.__setitem__(\r\n self = <Array [{}, {}, {}, {}, {}] type='5 * {}'>\r\n field_name = 'new_field'\r\n field_value = <Array [1, 2, 3, 4, 5] type='5 * int64'>\r\n )\r\n```\r\n\r\nThis happens because\r\n\r\nhttps://github.com/scikit-hep/awkward/blob/be60b5330f7f872fd01b34c44d2686eeafa9bbfc/src/awkward/operations/ak_with_field.py#L86-L87\r\n\r\nis protecting the subsequent code from having to check for the case that `base` doesn't even have a `RecordArray`, but `base.fields == []` isn't the way to do it: you can also get that from a `RecordArray` with no fields.\r\n\r\nInstead, that guard needs to check for the existence of a `RecordArray` some other way. Perhaps the following?\r\n\r\n```python\r\ntry:\r\n ak._do.recursively_apply(no_fields.layout, lambda x, **kwargs: None, allow_records=False)\r\nexcept ValueError:\r\n pass\r\nelse:\r\n raise ValueError(\"no tuples or records in array; cannot add a new field\") from None\r\n```\r\n\r\nI'm not entirely happy about repurposing an anti-guard for one function as a guard for another. Also, it should complain if any of the branches of a `UnionArray` are not `RecordArrays`.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"with_field\",)\nimport copy\n\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\nfrom awkward._regularize import is_non_string_like_sequence\n\nnp = NumpyMetadata.instance()\n\n\ndef with_field(array, what, where=None, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n what: Array-like data (anything #ak.to_layout recognizes) to add as a new field.\n where (None or str or non-empy sequence of str): If None, the new field\n has no name (can be accessed as an integer slot number in a\n string); If str, the name of the new field. If a sequence, it is\n interpreted as a path where to add the field in a nested record.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Returns an #ak.Array or #ak.Record (or low-level equivalent, if\n `highlevel=False`) with a new field attached. This function does not\n change the array in-place.\n\n See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that\n changes the high-level object in-place. (These methods internally use\n #ak.with_field, so performance is not a factor in choosing one over the\n other.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.with_field\",\n {\n \"array\": array,\n \"what\": what,\n \"where\": where,\n \"highlevel\": highlevel,\n \"behavior\": behavior,\n },\n ):\n return _impl(array, what, where, highlevel, behavior)\n\n\ndef _impl(base, what, where, highlevel, behavior):\n if not (\n where is None\n or isinstance(where, str)\n or (\n is_non_string_like_sequence(where)\n and all(isinstance(x, str) for x in where)\n )\n ):\n raise TypeError(\n \"New fields may only be assigned by field name(s) \"\n \"or as a new integer slot by passing None for 'where'\"\n )\n\n if is_non_string_like_sequence(where) and len(where) > 1:\n return _impl(\n base,\n _impl(\n base[where[0]],\n what,\n where[1:],\n highlevel,\n behavior,\n ),\n where[0],\n highlevel,\n behavior,\n )\n else:\n # If we have an iterable here, pull out the only ti\n if is_non_string_like_sequence(where):\n where = where[0]\n\n behavior = behavior_of(base, what, behavior=behavior)\n base = ak.operations.to_layout(base, allow_record=True, allow_other=False)\n\n if len(base.fields) == 0:\n raise ValueError(\"no tuples or records in array; cannot add a new field\")\n\n what = ak.operations.to_layout(what, allow_record=True, allow_other=True)\n\n keys = copy.copy(base.fields)\n if where in base.fields:\n keys.remove(where)\n\n def action(inputs, **kwargs):\n base, what = inputs\n backend = base.backend\n\n if isinstance(base, ak.contents.RecordArray):\n if what is None:\n what = ak.contents.IndexedOptionArray(\n ak.index.Index64(\n backend.index_nplike.full(len(base), -1, dtype=np.int64),\n nplike=backend.index_nplike,\n ),\n ak.contents.EmptyArray(),\n )\n elif not isinstance(what, ak.contents.Content):\n what = ak.contents.NumpyArray(\n backend.nplike.repeat(what, len(base))\n )\n if base.is_tuple:\n # Preserve tuple-ness\n if where is None:\n fields = None\n # Otherwise the tuple becomes a record\n else:\n fields = [*keys, where]\n # Records with `where=None` will create a tuple-like key\n elif where is None:\n fields = [*keys, str(len(keys))]\n else:\n fields = [*keys, where]\n out = ak.contents.RecordArray(\n [base[k] for k in keys] + [what],\n fields,\n parameters=base.parameters,\n )\n return (out,)\n else:\n return None\n\n out = ak._broadcasting.broadcast_and_apply(\n [base, what],\n action,\n behavior,\n right_broadcast=False,\n )\n\n assert isinstance(out, tuple) and len(out) == 1\n\n return wrap_layout(out[0], behavior, highlevel)\n", "path": "src/awkward/operations/ak_with_field.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"with_field\",)\nimport copy\n\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\nfrom awkward._regularize import is_non_string_like_sequence\n\nnp = NumpyMetadata.instance()\n\n\ndef with_field(array, what, where=None, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n what: Array-like data (anything #ak.to_layout recognizes) to add as a new field.\n where (None or str or non-empy sequence of str): If None, the new field\n has no name (can be accessed as an integer slot number in a\n string); If str, the name of the new field. If a sequence, it is\n interpreted as a path where to add the field in a nested record.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Returns an #ak.Array or #ak.Record (or low-level equivalent, if\n `highlevel=False`) with a new field attached. This function does not\n change the array in-place.\n\n See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that\n changes the high-level object in-place. (These methods internally use\n #ak.with_field, so performance is not a factor in choosing one over the\n other.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.with_field\",\n {\n \"array\": array,\n \"what\": what,\n \"where\": where,\n \"highlevel\": highlevel,\n \"behavior\": behavior,\n },\n ):\n return _impl(array, what, where, highlevel, behavior)\n\n\ndef _impl(base, what, where, highlevel, behavior):\n if not (\n where is None\n or isinstance(where, str)\n or (\n is_non_string_like_sequence(where)\n and all(isinstance(x, str) for x in where)\n )\n ):\n raise TypeError(\n \"New fields may only be assigned by field name(s) \"\n \"or as a new integer slot by passing None for 'where'\"\n )\n\n if is_non_string_like_sequence(where) and len(where) > 1:\n return _impl(\n base,\n _impl(\n base[where[0]],\n what,\n where[1:],\n highlevel,\n behavior,\n ),\n where[0],\n highlevel,\n behavior,\n )\n else:\n # If we have an iterable here, pull out the only ti\n if is_non_string_like_sequence(where):\n where = where[0]\n\n behavior = behavior_of(base, what, behavior=behavior)\n base = ak.operations.to_layout(base, allow_record=True, allow_other=False)\n what = ak.operations.to_layout(what, allow_record=True, allow_other=True)\n\n keys = copy.copy(base.fields)\n if where in base.fields:\n keys.remove(where)\n\n def purelist_is_record(layout):\n result = False\n\n def action_is_record(input, **kwargs):\n nonlocal result\n\n if input.is_record:\n result = True\n return input\n elif input.is_union:\n result = all(purelist_is_record(x) for x in input.contents)\n return input\n else:\n return None\n\n ak._do.recursively_apply(layout, action_is_record, return_array=False)\n return result\n\n if not purelist_is_record(base):\n raise ValueError(\"no tuples or records in array; cannot add a new field\")\n\n def action(inputs, **kwargs):\n base, what = inputs\n backend = base.backend\n\n if isinstance(base, ak.contents.RecordArray):\n if what is None:\n what = ak.contents.IndexedOptionArray(\n ak.index.Index64(\n backend.index_nplike.full(len(base), -1, dtype=np.int64),\n nplike=backend.index_nplike,\n ),\n ak.contents.EmptyArray(),\n )\n elif not isinstance(what, ak.contents.Content):\n what = ak.contents.NumpyArray(\n backend.nplike.repeat(what, len(base))\n )\n if base.is_tuple:\n # Preserve tuple-ness\n if where is None:\n fields = None\n # Otherwise the tuple becomes a record\n else:\n fields = [*keys, where]\n # Records with `where=None` will create a tuple-like key\n elif where is None:\n fields = [*keys, str(len(keys))]\n else:\n fields = [*keys, where]\n out = ak.contents.RecordArray(\n [base[k] for k in keys] + [what],\n fields,\n parameters=base.parameters,\n )\n return (out,)\n else:\n return None\n\n out = ak._broadcasting.broadcast_and_apply(\n [base, what],\n action,\n behavior,\n right_broadcast=False,\n )\n\n assert isinstance(out, tuple) and len(out) == 1\n\n return wrap_layout(out[0], behavior, highlevel)\n", "path": "src/awkward/operations/ak_with_field.py"}]}
2,095
338
gh_patches_debug_33253
rasdani/github-patches
git_diff
angr__angr-3184
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- An error occurred while analysis the CFG of the mips program. An error occurs when I use angr to analyze the CFG of the mips program (net-cgi). The error message is shown below. > File "/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py", line 106, in _resolve gp_used = self._is_gp_used_on_slice(project, b) File "/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py", line 183, in _is_gp_used_on_slice stmt = block.vex.statements[block_stmt_idx] IndexError: list index out of range The code is shown below. ``` import angr proj = angr.Project(net-cgi, auto_load_libs=False) cfg = proj.analyses.CFG() ``` The net-cgi can be found in [net-cgi.zip](https://github.com/angr/angr/files/8160732/net-cgi.zip) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py` Content: ``` 1 from typing import Dict, TYPE_CHECKING 2 import logging 3 4 import pyvex 5 import archinfo 6 7 8 from .... import options, BP_BEFORE 9 from ....blade import Blade 10 from ....annocfg import AnnotatedCFG 11 from ....exploration_techniques import Slicecutor 12 13 from .resolver import IndirectJumpResolver 14 15 if TYPE_CHECKING: 16 from angr.block import Block 17 18 19 l = logging.getLogger(name=__name__) 20 21 22 class OverwriteTmpValueCallback: 23 """ 24 Overwrites temporary values during resolution 25 """ 26 def __init__(self, gp_value): 27 self.gp_value = gp_value 28 29 def overwrite_tmp_value(self, state): 30 state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits) 31 32 33 class MipsElfFastResolver(IndirectJumpResolver): 34 """ 35 Indirect Jump Resolver for MIPs 36 """ 37 def __init__(self, project): 38 super().__init__(project, timeless=True) 39 40 def filter(self, cfg, addr, func_addr, block, jumpkind): 41 if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )): 42 return False 43 return True 44 45 def resolve(self, cfg, addr, func_addr, block, jumpkind): 46 """ 47 Wrapper for _resolve that slowly increments the max_depth used by Blade for finding sources 48 until we can resolve the addr or we reach the default max_depth 49 50 :param cfg: A CFG instance. 51 :param int addr: IRSB address. 52 :param int func_addr: The function address. 53 :param pyvex.IRSB block: The IRSB. 54 :param str jumpkind: The jumpkind. 55 :return: If it was resolved and targets alongside it 56 :rtype: tuple 57 """ 58 for max_level in range(2, 4): 59 resolved, resolved_targets = self._resolve(cfg, addr, func_addr, block, jumpkind, max_level=max_level) 60 if resolved: 61 return resolved, resolved_targets 62 return False, [] 63 64 def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): 65 """ 66 Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp. 67 68 :param cfg: A CFG instance. 69 :param int addr: IRSB address. 70 :param int func_addr: The function address. 71 :param pyvex.IRSB block: The IRSB. 72 :param str jumpkind: The jumpkind. 73 :param int max_level: maximum level for Blade to resolve when looking for sources 74 :return: If it was resolved and targets alongside it 75 :rtype: tuple 76 """ 77 78 project = self.project 79 80 b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True, 81 ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True, max_level=max_level 82 ) 83 84 sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0] 85 if not sources: 86 return False, [] 87 88 source = sources[0] 89 source_addr = source[0] 90 annotated_cfg = AnnotatedCFG(project, None, detect_loops=False) 91 annotated_cfg.from_digraph(b.slice) 92 93 state = project.factory.blank_state(addr=source_addr, mode="fastpath", 94 remove_options=options.refs, 95 # suppress unconstrained stack reads for `gp` 96 add_options={ 97 options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS, 98 options.SYMBOL_FILL_UNCONSTRAINED_MEMORY, 99 options.NO_CROSS_INSN_OPT, 100 }, 101 ) 102 state.regs._t9 = func_addr 103 func = cfg.kb.functions.function(addr=func_addr) 104 105 # see if gp is used on this slice at all 106 gp_used = self._is_gp_used_on_slice(project, b) 107 108 gp_value = None 109 if gp_used: 110 if 'gp' not in func.info: 111 # this might a special case: gp is only used once in this function, and it can be initialized right 112 # before its use site. 113 # however, it should have been determined in CFGFast 114 # cannot determine the value of gp. quit 115 pass 116 else: 117 gp_value = func.info['gp'] 118 119 if gp_value is None: 120 l.warning('Failed to determine value of register gp for function %#x.', func.addr) 121 return False, [] 122 123 # Special handling for cases where `gp` is stored on the stack 124 gp_offset = project.arch.registers['gp'][0] 125 self._set_gp_load_callback(state, b, project, gp_offset, gp_value) 126 state.regs._gp = gp_value 127 128 simgr = self.project.factory.simulation_manager(state) 129 simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True)) 130 simgr.run() 131 132 if simgr.cut: 133 # pick the successor that is cut right after executing `addr` 134 try: 135 target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr)) 136 except StopIteration: 137 l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) 138 return False, [ ] 139 target = target_state.addr 140 141 if self._is_target_valid(cfg, target) and target != func_addr: 142 l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target) 143 return True, [ target ] 144 145 l.info("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target) 146 return False, [ ] 147 148 l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self)) 149 return False, [ ] 150 151 @staticmethod 152 def _set_gp_load_callback(state, blade, project, gp_offset, gp_value): 153 tmps = {} 154 for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()): 155 for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements: 156 if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load): 157 # Load from memory to a tmp - assuming it's loading from the stack 158 tmps[stmt.tmp] = 'stack' 159 elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset: 160 if isinstance(stmt.data, pyvex.IRExpr.RdTmp): 161 tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop 162 if tmps.get(tmp_offset, None) == 'stack': 163 # found the load from stack 164 # we must make sure value of that temporary variable equals to the correct gp value 165 state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE, 166 condition=lambda s, bbl_addr_=block_addr_in_slice, 167 tmp_offset_=tmp_offset: 168 s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_, 169 action=OverwriteTmpValueCallback( 170 gp_value).overwrite_tmp_value 171 ) 172 break 173 174 @staticmethod 175 def _is_gp_used_on_slice(project, b: Blade) -> bool: 176 gp_offset = project.arch.registers['gp'][0] 177 blocks_on_slice: Dict[int, 'Block'] = { } 178 for block_addr, block_stmt_idx in b.slice.nodes(): 179 if block_addr not in blocks_on_slice: 180 blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False) 181 block = blocks_on_slice[block_addr] 182 stmt = block.vex.statements[block_stmt_idx] 183 if isinstance(stmt, pyvex.IRStmt.WrTmp) \ 184 and isinstance(stmt.data, pyvex.IRExpr.Get) \ 185 and stmt.data.offset == gp_offset: 186 gp_used = True 187 break 188 else: 189 gp_used = False 190 191 return gp_used 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py --- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py +++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py @@ -9,7 +9,7 @@ from ....blade import Blade from ....annocfg import AnnotatedCFG from ....exploration_techniques import Slicecutor - +from ....utils.constants import DEFAULT_STATEMENT from .resolver import IndirectJumpResolver if TYPE_CHECKING: @@ -61,7 +61,7 @@ return resolved, resolved_targets return False, [] - def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): + def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): # pylint:disable=unused-argument """ Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp. @@ -179,12 +179,17 @@ if block_addr not in blocks_on_slice: blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False) block = blocks_on_slice[block_addr] - stmt = block.vex.statements[block_stmt_idx] - if isinstance(stmt, pyvex.IRStmt.WrTmp) \ - and isinstance(stmt.data, pyvex.IRExpr.Get) \ - and stmt.data.offset == gp_offset: - gp_used = True - break + if block_stmt_idx == DEFAULT_STATEMENT: + if isinstance(block.vex.next, pyvex.IRExpr.Get) and block.vex.next.offset == gp_offset: + gp_used = True + break + else: + stmt = block.vex.statements[block_stmt_idx] + if isinstance(stmt, pyvex.IRStmt.WrTmp) \ + and isinstance(stmt.data, pyvex.IRExpr.Get) \ + and stmt.data.offset == gp_offset: + gp_used = True + break else: gp_used = False
{"golden_diff": "diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n--- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n+++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n@@ -9,7 +9,7 @@\n from ....blade import Blade\n from ....annocfg import AnnotatedCFG\n from ....exploration_techniques import Slicecutor\n-\n+from ....utils.constants import DEFAULT_STATEMENT\n from .resolver import IndirectJumpResolver\n \n if TYPE_CHECKING:\n@@ -61,7 +61,7 @@\n return resolved, resolved_targets\n return False, []\n \n- def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):\n+ def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): # pylint:disable=unused-argument\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n \n@@ -179,12 +179,17 @@\n if block_addr not in blocks_on_slice:\n blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)\n block = blocks_on_slice[block_addr]\n- stmt = block.vex.statements[block_stmt_idx]\n- if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n- and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n- and stmt.data.offset == gp_offset:\n- gp_used = True\n- break\n+ if block_stmt_idx == DEFAULT_STATEMENT:\n+ if isinstance(block.vex.next, pyvex.IRExpr.Get) and block.vex.next.offset == gp_offset:\n+ gp_used = True\n+ break\n+ else:\n+ stmt = block.vex.statements[block_stmt_idx]\n+ if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n+ and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n+ and stmt.data.offset == gp_offset:\n+ gp_used = True\n+ break\n else:\n gp_used = False\n", "issue": "An error occurred while analysis the CFG of the mips program.\nAn error occurs when I use angr to analyze the CFG of the mips program (net-cgi). \r\nThe error message is shown below.\r\n\r\n> File \"/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\", line 106, in _resolve\r\n gp_used = self._is_gp_used_on_slice(project, b)\r\n File \"/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\", line 183, in _is_gp_used_on_slice\r\n stmt = block.vex.statements[block_stmt_idx]\r\nIndexError: list index out of range\r\n\r\nThe code is shown below.\r\n```\r\nimport angr\r\nproj = angr.Project(net-cgi, auto_load_libs=False)\r\ncfg = proj.analyses.CFG()\r\n```\r\n\r\nThe net-cgi can be found in [net-cgi.zip](https://github.com/angr/angr/files/8160732/net-cgi.zip)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, TYPE_CHECKING\nimport logging\n\nimport pyvex\nimport archinfo\n\n\nfrom .... import options, BP_BEFORE\nfrom ....blade import Blade\nfrom ....annocfg import AnnotatedCFG\nfrom ....exploration_techniques import Slicecutor\n\nfrom .resolver import IndirectJumpResolver\n\nif TYPE_CHECKING:\n from angr.block import Block\n\n\nl = logging.getLogger(name=__name__)\n\n\nclass OverwriteTmpValueCallback:\n \"\"\"\n Overwrites temporary values during resolution\n \"\"\"\n def __init__(self, gp_value):\n self.gp_value = gp_value\n\n def overwrite_tmp_value(self, state):\n state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)\n\n\nclass MipsElfFastResolver(IndirectJumpResolver):\n \"\"\"\n Indirect Jump Resolver for MIPs\n \"\"\"\n def __init__(self, project):\n super().__init__(project, timeless=True)\n\n def filter(self, cfg, addr, func_addr, block, jumpkind):\n if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):\n return False\n return True\n\n def resolve(self, cfg, addr, func_addr, block, jumpkind):\n \"\"\"\n Wrapper for _resolve that slowly increments the max_depth used by Blade for finding sources\n until we can resolve the addr or we reach the default max_depth\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n for max_level in range(2, 4):\n resolved, resolved_targets = self._resolve(cfg, addr, func_addr, block, jumpkind, max_level=max_level)\n if resolved:\n return resolved, resolved_targets\n return False, []\n\n def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :param int max_level: maximum level for Blade to resolve when looking for sources\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n\n project = self.project\n\n b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,\n ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True, max_level=max_level\n )\n\n sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]\n if not sources:\n return False, []\n\n source = sources[0]\n source_addr = source[0]\n annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)\n annotated_cfg.from_digraph(b.slice)\n\n state = project.factory.blank_state(addr=source_addr, mode=\"fastpath\",\n remove_options=options.refs,\n # suppress unconstrained stack reads for `gp`\n add_options={\n options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,\n options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,\n options.NO_CROSS_INSN_OPT,\n },\n )\n state.regs._t9 = func_addr\n func = cfg.kb.functions.function(addr=func_addr)\n\n # see if gp is used on this slice at all\n gp_used = self._is_gp_used_on_slice(project, b)\n\n gp_value = None\n if gp_used:\n if 'gp' not in func.info:\n # this might a special case: gp is only used once in this function, and it can be initialized right\n # before its use site.\n # however, it should have been determined in CFGFast\n # cannot determine the value of gp. quit\n pass\n else:\n gp_value = func.info['gp']\n\n if gp_value is None:\n l.warning('Failed to determine value of register gp for function %#x.', func.addr)\n return False, []\n\n # Special handling for cases where `gp` is stored on the stack\n gp_offset = project.arch.registers['gp'][0]\n self._set_gp_load_callback(state, b, project, gp_offset, gp_value)\n state.regs._gp = gp_value\n\n simgr = self.project.factory.simulation_manager(state)\n simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True))\n simgr.run()\n\n if simgr.cut:\n # pick the successor that is cut right after executing `addr`\n try:\n target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))\n except StopIteration:\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n target = target_state.addr\n\n if self._is_target_valid(cfg, target) and target != func_addr:\n l.debug(\"Indirect jump at %#x is resolved to target %#x.\", addr, target)\n return True, [ target ]\n\n l.info(\"Indirect jump at %#x is resolved to target %#x, which seems to be invalid.\", addr, target)\n return False, [ ]\n\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n\n @staticmethod\n def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):\n tmps = {}\n for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):\n for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:\n if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):\n # Load from memory to a tmp - assuming it's loading from the stack\n tmps[stmt.tmp] = 'stack'\n elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:\n if isinstance(stmt.data, pyvex.IRExpr.RdTmp):\n tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop\n if tmps.get(tmp_offset, None) == 'stack':\n # found the load from stack\n # we must make sure value of that temporary variable equals to the correct gp value\n state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,\n condition=lambda s, bbl_addr_=block_addr_in_slice,\n tmp_offset_=tmp_offset:\n s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,\n action=OverwriteTmpValueCallback(\n gp_value).overwrite_tmp_value\n )\n break\n\n @staticmethod\n def _is_gp_used_on_slice(project, b: Blade) -> bool:\n gp_offset = project.arch.registers['gp'][0]\n blocks_on_slice: Dict[int, 'Block'] = { }\n for block_addr, block_stmt_idx in b.slice.nodes():\n if block_addr not in blocks_on_slice:\n blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)\n block = blocks_on_slice[block_addr]\n stmt = block.vex.statements[block_stmt_idx]\n if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n and stmt.data.offset == gp_offset:\n gp_used = True\n break\n else:\n gp_used = False\n\n return gp_used\n", "path": "angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py"}], "after_files": [{"content": "from typing import Dict, TYPE_CHECKING\nimport logging\n\nimport pyvex\nimport archinfo\n\n\nfrom .... import options, BP_BEFORE\nfrom ....blade import Blade\nfrom ....annocfg import AnnotatedCFG\nfrom ....exploration_techniques import Slicecutor\nfrom ....utils.constants import DEFAULT_STATEMENT\nfrom .resolver import IndirectJumpResolver\n\nif TYPE_CHECKING:\n from angr.block import Block\n\n\nl = logging.getLogger(name=__name__)\n\n\nclass OverwriteTmpValueCallback:\n \"\"\"\n Overwrites temporary values during resolution\n \"\"\"\n def __init__(self, gp_value):\n self.gp_value = gp_value\n\n def overwrite_tmp_value(self, state):\n state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)\n\n\nclass MipsElfFastResolver(IndirectJumpResolver):\n \"\"\"\n Indirect Jump Resolver for MIPs\n \"\"\"\n def __init__(self, project):\n super().__init__(project, timeless=True)\n\n def filter(self, cfg, addr, func_addr, block, jumpkind):\n if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):\n return False\n return True\n\n def resolve(self, cfg, addr, func_addr, block, jumpkind):\n \"\"\"\n Wrapper for _resolve that slowly increments the max_depth used by Blade for finding sources\n until we can resolve the addr or we reach the default max_depth\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n for max_level in range(2, 4):\n resolved, resolved_targets = self._resolve(cfg, addr, func_addr, block, jumpkind, max_level=max_level)\n if resolved:\n return resolved, resolved_targets\n return False, []\n\n def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): # pylint:disable=unused-argument\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :param int max_level: maximum level for Blade to resolve when looking for sources\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n\n project = self.project\n\n b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,\n ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True, max_level=max_level\n )\n\n sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]\n if not sources:\n return False, []\n\n source = sources[0]\n source_addr = source[0]\n annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)\n annotated_cfg.from_digraph(b.slice)\n\n state = project.factory.blank_state(addr=source_addr, mode=\"fastpath\",\n remove_options=options.refs,\n # suppress unconstrained stack reads for `gp`\n add_options={\n options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,\n options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,\n options.NO_CROSS_INSN_OPT,\n },\n )\n state.regs._t9 = func_addr\n func = cfg.kb.functions.function(addr=func_addr)\n\n # see if gp is used on this slice at all\n gp_used = self._is_gp_used_on_slice(project, b)\n\n gp_value = None\n if gp_used:\n if 'gp' not in func.info:\n # this might a special case: gp is only used once in this function, and it can be initialized right\n # before its use site.\n # however, it should have been determined in CFGFast\n # cannot determine the value of gp. quit\n pass\n else:\n gp_value = func.info['gp']\n\n if gp_value is None:\n l.warning('Failed to determine value of register gp for function %#x.', func.addr)\n return False, []\n\n # Special handling for cases where `gp` is stored on the stack\n gp_offset = project.arch.registers['gp'][0]\n self._set_gp_load_callback(state, b, project, gp_offset, gp_value)\n state.regs._gp = gp_value\n\n simgr = self.project.factory.simulation_manager(state)\n simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True))\n simgr.run()\n\n if simgr.cut:\n # pick the successor that is cut right after executing `addr`\n try:\n target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))\n except StopIteration:\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n target = target_state.addr\n\n if self._is_target_valid(cfg, target) and target != func_addr:\n l.debug(\"Indirect jump at %#x is resolved to target %#x.\", addr, target)\n return True, [ target ]\n\n l.info(\"Indirect jump at %#x is resolved to target %#x, which seems to be invalid.\", addr, target)\n return False, [ ]\n\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n\n @staticmethod\n def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):\n tmps = {}\n for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):\n for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:\n if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):\n # Load from memory to a tmp - assuming it's loading from the stack\n tmps[stmt.tmp] = 'stack'\n elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:\n if isinstance(stmt.data, pyvex.IRExpr.RdTmp):\n tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop\n if tmps.get(tmp_offset, None) == 'stack':\n # found the load from stack\n # we must make sure value of that temporary variable equals to the correct gp value\n state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,\n condition=lambda s, bbl_addr_=block_addr_in_slice,\n tmp_offset_=tmp_offset:\n s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,\n action=OverwriteTmpValueCallback(\n gp_value).overwrite_tmp_value\n )\n break\n\n @staticmethod\n def _is_gp_used_on_slice(project, b: Blade) -> bool:\n gp_offset = project.arch.registers['gp'][0]\n blocks_on_slice: Dict[int, 'Block'] = { }\n for block_addr, block_stmt_idx in b.slice.nodes():\n if block_addr not in blocks_on_slice:\n blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)\n block = blocks_on_slice[block_addr]\n if block_stmt_idx == DEFAULT_STATEMENT:\n if isinstance(block.vex.next, pyvex.IRExpr.Get) and block.vex.next.offset == gp_offset:\n gp_used = True\n break\n else:\n stmt = block.vex.statements[block_stmt_idx]\n if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n and stmt.data.offset == gp_offset:\n gp_used = True\n break\n else:\n gp_used = False\n\n return gp_used\n", "path": "angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py"}]}
2,761
510
gh_patches_debug_40195
rasdani/github-patches
git_diff
scikit-hep__pyhf-1183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- use strategy 0 for user-provided gradients in minuit # Description since we have exact gradient we can disable the checks minuit does cc @alexander-held --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/optimize/opt_minuit.py` Content: ``` 1 """Minuit Optimizer Class.""" 2 from .. import default_backend, exceptions 3 from .mixins import OptimizerMixin 4 import scipy 5 import iminuit 6 7 8 class minuit_optimizer(OptimizerMixin): 9 """ 10 Optimizer that uses iminuit.Minuit.migrad. 11 """ 12 13 __slots__ = ['name', 'errordef', 'steps'] 14 15 def __init__(self, *args, **kwargs): 16 """ 17 Create MINUIT Optimizer. 18 19 .. note:: 20 21 ``errordef`` should be 1.0 for a least-squares cost function and 0.5 22 for negative log-likelihood function. See page 37 of 23 http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes 24 called ``UP`` in the ``MINUIT`` docs. 25 26 27 Args: 28 errordef (:obj:`float`): See minuit docs. Default is 1.0. 29 steps (:obj:`int`): Number of steps for the bounds. Default is 1000. 30 """ 31 self.name = 'minuit' 32 self.errordef = kwargs.pop('errordef', 1) 33 self.steps = kwargs.pop('steps', 1000) 34 super().__init__(*args, **kwargs) 35 36 def _get_minimizer( 37 self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False 38 ): 39 40 step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds] 41 fixed_vals = fixed_vals or [] 42 # Minuit wants True/False for each parameter 43 fixed_bools = [False] * len(init_pars) 44 for index, val in fixed_vals: 45 fixed_bools[index] = True 46 init_pars[index] = val 47 step_sizes[index] = 0.0 48 49 # Minuit requires jac=callable 50 if do_grad: 51 wrapped_objective = lambda pars: objective_and_grad(pars)[0] 52 jac = lambda pars: objective_and_grad(pars)[1] 53 else: 54 wrapped_objective = objective_and_grad 55 jac = None 56 57 kwargs = dict( 58 fcn=wrapped_objective, 59 grad=jac, 60 start=init_pars, 61 error=step_sizes, 62 limit=init_bounds, 63 fix=fixed_bools, 64 print_level=self.verbose, 65 errordef=self.errordef, 66 ) 67 return iminuit.Minuit.from_array_func(**kwargs) 68 69 def _minimize( 70 self, 71 minimizer, 72 func, 73 x0, 74 do_grad=False, 75 bounds=None, 76 fixed_vals=None, 77 return_uncertainties=False, 78 options={}, 79 ): 80 81 """ 82 Same signature as :func:`scipy.optimize.minimize`. 83 84 Note: an additional `minuit` is injected into the fitresult to get the 85 underlying minimizer. 86 87 Minimizer Options: 88 maxiter (:obj:`int`): maximum number of iterations. Default is 100000. 89 return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off. 90 91 Returns: 92 fitresult (scipy.optimize.OptimizeResult): the fit result 93 """ 94 maxiter = options.pop('maxiter', self.maxiter) 95 return_uncertainties = options.pop('return_uncertainties', False) 96 if options: 97 raise exceptions.Unsupported( 98 f"Unsupported options were passed in: {list(options.keys())}." 99 ) 100 101 minimizer.migrad(ncall=maxiter) 102 # Following lines below come from: 103 # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125 104 message = "Optimization terminated successfully." 105 if not minimizer.valid: 106 message = "Optimization failed." 107 fmin = minimizer.fmin 108 if fmin.has_reached_call_limit: 109 message += " Call limit was reached." 110 if fmin.is_above_max_edm: 111 message += " Estimated distance to minimum too large." 112 113 n = len(x0) 114 hess_inv = default_backend.ones((n, n)) 115 if minimizer.valid: 116 hess_inv = minimizer.np_covariance() 117 118 unc = None 119 if return_uncertainties: 120 unc = minimizer.np_errors() 121 122 return scipy.optimize.OptimizeResult( 123 x=minimizer.np_values(), 124 unc=unc, 125 success=minimizer.valid, 126 fun=minimizer.fval, 127 hess_inv=hess_inv, 128 message=message, 129 nfev=minimizer.ncalls, 130 njev=minimizer.ngrads, 131 minuit=minimizer, 132 ) 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py --- a/src/pyhf/optimize/opt_minuit.py +++ b/src/pyhf/optimize/opt_minuit.py @@ -10,7 +10,7 @@ Optimizer that uses iminuit.Minuit.migrad. """ - __slots__ = ['name', 'errordef', 'steps'] + __slots__ = ['name', 'errordef', 'steps', 'strategy'] def __init__(self, *args, **kwargs): """ @@ -27,10 +27,12 @@ Args: errordef (:obj:`float`): See minuit docs. Default is 1.0. steps (:obj:`int`): Number of steps for the bounds. Default is 1000. + strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None. """ self.name = 'minuit' self.errordef = kwargs.pop('errordef', 1) self.steps = kwargs.pop('steps', 1000) + self.strategy = kwargs.pop('strategy', None) super().__init__(*args, **kwargs) def _get_minimizer( @@ -87,17 +89,24 @@ Minimizer Options: maxiter (:obj:`int`): maximum number of iterations. Default is 100000. return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off. + strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`. Returns: fitresult (scipy.optimize.OptimizeResult): the fit result """ maxiter = options.pop('maxiter', self.maxiter) return_uncertainties = options.pop('return_uncertainties', False) + # 0: Fast, user-provided gradient + # 1: Default, no user-provided gradient + strategy = options.pop( + 'strategy', self.strategy if self.strategy else not do_grad + ) if options: raise exceptions.Unsupported( f"Unsupported options were passed in: {list(options.keys())}." ) + minimizer.strategy = strategy minimizer.migrad(ncall=maxiter) # Following lines below come from: # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125 @@ -113,6 +122,8 @@ n = len(x0) hess_inv = default_backend.ones((n, n)) if minimizer.valid: + # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster. + minimizer.hesse() hess_inv = minimizer.np_covariance() unc = None
{"golden_diff": "diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -10,7 +10,7 @@\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n \n- __slots__ = ['name', 'errordef', 'steps']\n+ __slots__ = ['name', 'errordef', 'steps', 'strategy']\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -27,10 +27,12 @@\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n+ self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n \n def _get_minimizer(\n@@ -87,17 +89,24 @@\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n \n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n+ # 0: Fast, user-provided gradient\n+ # 1: Default, no user-provided gradient\n+ strategy = options.pop(\n+ 'strategy', self.strategy if self.strategy else not do_grad\n+ )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n \n+ minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n@@ -113,6 +122,8 @@\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n+ minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n \n unc = None\n", "issue": "use strategy 0 for user-provided gradients in minuit\n# Description\r\n\r\nsince we have exact gradient we can disable the checks minuit does\r\n\r\ncc @alexander-held \n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}], "after_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps', 'strategy']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n # 0: Fast, user-provided gradient\n # 1: Default, no user-provided gradient\n strategy = options.pop(\n 'strategy', self.strategy if self.strategy else not do_grad\n )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}]}
1,643
706
gh_patches_debug_26193
rasdani/github-patches
git_diff
python-discord__site-1165
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support rescheduling of offensive messages When a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database. When this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around. We should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end). However, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydis_site/apps/api/viewsets/bot/offensive_message.py` Content: ``` 1 from rest_framework.mixins import ( 2 CreateModelMixin, 3 DestroyModelMixin, 4 ListModelMixin 5 ) 6 from rest_framework.viewsets import GenericViewSet 7 8 from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage 9 from pydis_site.apps.api.serializers import OffensiveMessageSerializer 10 11 12 class OffensiveMessageViewSet( 13 CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet 14 ): 15 """ 16 View providing CRUD access to offensive messages. 17 18 ## Routes 19 ### GET /bot/offensive-messages 20 Returns all offensive messages in the database. 21 22 #### Response format 23 >>> [ 24 ... { 25 ... 'id': '631953598091100200', 26 ... 'channel_id': '291284109232308226', 27 ... 'delete_date': '2019-11-01T21:51:15.545000Z' 28 ... }, 29 ... ... 30 ... ] 31 32 #### Status codes 33 - 200: returned on success 34 35 ### POST /bot/offensive-messages 36 Create a new offensive message object. 37 38 #### Request body 39 >>> { 40 ... 'id': int, 41 ... 'channel_id': int, 42 ... 'delete_date': datetime.datetime # ISO-8601-formatted date 43 ... } 44 45 #### Status codes 46 - 201: returned on success 47 - 400: if the body format is invalid 48 49 ### DELETE /bot/offensive-messages/<id:int> 50 Delete the offensive message object with the given `id`. 51 52 #### Status codes 53 - 204: returned on success 54 - 404: if a offensive message object with the given `id` does not exist 55 56 ## Authentication 57 Requires an API token. 58 """ 59 60 serializer_class = OffensiveMessageSerializer 61 queryset = OffensiveMessage.objects.all() 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py --- a/pydis_site/apps/api/viewsets/bot/offensive_message.py +++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py @@ -1,6 +1,7 @@ from rest_framework.mixins import ( CreateModelMixin, DestroyModelMixin, + UpdateModelMixin, ListModelMixin ) from rest_framework.viewsets import GenericViewSet @@ -10,7 +11,7 @@ class OffensiveMessageViewSet( - CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet + CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet ): """ View providing CRUD access to offensive messages. @@ -46,6 +47,16 @@ - 201: returned on success - 400: if the body format is invalid + ### PATCH /bot/offensive-messages/<id:int> + Perform a partial update of the offensive message with the given `id`. + Intended to allow rescheduling the deletion date in case the bot's attempt + to delete the message failed due to another error than the message already + being deleted. + + #### Status codes + - 200: returned on success + - 404: if a offensive message object with the given `id` does not exist + ### DELETE /bot/offensive-messages/<id:int> Delete the offensive message object with the given `id`.
{"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py\n+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n@@ -1,6 +1,7 @@\n from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n+ UpdateModelMixin,\n ListModelMixin\n )\n from rest_framework.viewsets import GenericViewSet\n@@ -10,7 +11,7 @@\n \n \n class OffensiveMessageViewSet(\n- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n ):\n \"\"\"\n View providing CRUD access to offensive messages.\n@@ -46,6 +47,16 @@\n - 201: returned on success\n - 400: if the body format is invalid\n \n+ ### PATCH /bot/offensive-messages/<id:int>\n+ Perform a partial update of the offensive message with the given `id`.\n+ Intended to allow rescheduling the deletion date in case the bot's attempt\n+ to delete the message failed due to another error than the message already\n+ being deleted.\n+\n+ #### Status codes\n+ - 200: returned on success\n+ - 404: if a offensive message object with the given `id` does not exist\n+\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n", "issue": "Support rescheduling of offensive messages\nWhen a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.\r\n\r\nWhen this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.\r\n\r\nWe should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).\r\n\r\nHowever, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.\n", "before_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}], "after_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n UpdateModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### PATCH /bot/offensive-messages/<id:int>\n Perform a partial update of the offensive message with the given `id`.\n Intended to allow rescheduling the deletion date in case the bot's attempt\n to delete the message failed due to another error than the message already\n being deleted.\n\n #### Status codes\n - 200: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}]}
1,006
353
gh_patches_debug_40983
rasdani/github-patches
git_diff
microsoft__torchgeo-644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: missing "not" operator to set ignore_zeros in segmentation trainer I believe there's a bug in the translation from ignore_zeros to ignore_index in the [\_\_init__ of SemanticSegmentationTask](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L89). `self.ignore_zeros = None if kwargs["ignore_zeros"] else 0` should be: `self.ignore_zeros = None if kwargs["ignore_zeros"] is None else 0` as is done [when define loss function](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L57) `self.ignore_zeros` is later used to define the "self.ignore_index" of the self.train_metrics. For sake of clarity, I'd suggest renaming it to `self.ignore_index` directly in the \_\_init__. Therefore, this ``` ... self.ignore_zeros = None if kwargs["ignore_zeros"] else 0 self.config_task() self.train_metrics = MetricCollection( [ Accuracy( num_classes=self.hparams["num_classes"], ignore_index=self.ignore_zeros, ... ``` would become ``` ... self.ignore_index = None if kwargs["ignore_zeros"] is None else 0 self.config_task() self.train_metrics = MetricCollection( [ Accuracy( num_classes=self.hparams["num_classes"], ignore_index=self.ignore_index, ... ``` If you'd like, I can definitely push a PR for to fix this bug if the solution seems ok to all. Let me know. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchgeo/trainers/segmentation.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """Segmentation tasks.""" 5 6 from typing import Any, Dict, cast 7 8 import segmentation_models_pytorch as smp 9 import torch 10 import torch.nn as nn 11 from pytorch_lightning.core.lightning import LightningModule 12 from torch import Tensor 13 from torch.optim.lr_scheduler import ReduceLROnPlateau 14 from torch.utils.data import DataLoader 15 from torchmetrics import Accuracy, JaccardIndex, MetricCollection 16 17 from ..datasets.utils import unbind_samples 18 from ..models import FCN 19 20 # https://github.com/pytorch/pytorch/issues/60979 21 # https://github.com/pytorch/pytorch/pull/61045 22 DataLoader.__module__ = "torch.utils.data" 23 24 25 class SemanticSegmentationTask(LightningModule): 26 """LightningModule for semantic segmentation of images.""" 27 28 def config_task(self) -> None: 29 """Configures the task based on kwargs parameters passed to the constructor.""" 30 if self.hyperparams["segmentation_model"] == "unet": 31 self.model = smp.Unet( 32 encoder_name=self.hyperparams["encoder_name"], 33 encoder_weights=self.hyperparams["encoder_weights"], 34 in_channels=self.hyperparams["in_channels"], 35 classes=self.hyperparams["num_classes"], 36 ) 37 elif self.hyperparams["segmentation_model"] == "deeplabv3+": 38 self.model = smp.DeepLabV3Plus( 39 encoder_name=self.hyperparams["encoder_name"], 40 encoder_weights=self.hyperparams["encoder_weights"], 41 in_channels=self.hyperparams["in_channels"], 42 classes=self.hyperparams["num_classes"], 43 ) 44 elif self.hyperparams["segmentation_model"] == "fcn": 45 self.model = FCN( 46 in_channels=self.hyperparams["in_channels"], 47 classes=self.hyperparams["num_classes"], 48 num_filters=self.hyperparams["num_filters"], 49 ) 50 else: 51 raise ValueError( 52 f"Model type '{self.hyperparams['segmentation_model']}' is not valid." 53 ) 54 55 if self.hyperparams["loss"] == "ce": 56 self.loss = nn.CrossEntropyLoss( 57 ignore_index=-1000 if self.ignore_zeros is None else 0 58 ) 59 elif self.hyperparams["loss"] == "jaccard": 60 self.loss = smp.losses.JaccardLoss( 61 mode="multiclass", classes=self.hyperparams["num_classes"] 62 ) 63 elif self.hyperparams["loss"] == "focal": 64 self.loss = smp.losses.FocalLoss( 65 "multiclass", ignore_index=self.ignore_zeros, normalized=True 66 ) 67 else: 68 raise ValueError(f"Loss type '{self.hyperparams['loss']}' is not valid.") 69 70 def __init__(self, **kwargs: Any) -> None: 71 """Initialize the LightningModule with a model and loss function. 72 73 Keyword Args: 74 segmentation_model: Name of the segmentation model type to use 75 encoder_name: Name of the encoder model backbone to use 76 encoder_weights: None or "imagenet" to use imagenet pretrained weights in 77 the encoder model 78 in_channels: Number of channels in input image 79 num_classes: Number of semantic classes to predict 80 loss: Name of the loss function 81 ignore_zeros: Whether to ignore the "0" class value in the loss and metrics 82 83 Raises: 84 ValueError: if kwargs arguments are invalid 85 """ 86 super().__init__() 87 88 # Creates `self.hparams` from kwargs 89 self.save_hyperparameters() # type: ignore[operator] 90 self.hyperparams = cast(Dict[str, Any], self.hparams) 91 92 self.ignore_zeros = None if kwargs["ignore_zeros"] else 0 93 94 self.config_task() 95 96 self.train_metrics = MetricCollection( 97 [ 98 Accuracy( 99 num_classes=self.hyperparams["num_classes"], 100 ignore_index=self.ignore_zeros, 101 mdmc_average="global", 102 ), 103 JaccardIndex( 104 num_classes=self.hyperparams["num_classes"], 105 ignore_index=self.ignore_zeros, 106 ), 107 ], 108 prefix="train_", 109 ) 110 self.val_metrics = self.train_metrics.clone(prefix="val_") 111 self.test_metrics = self.train_metrics.clone(prefix="test_") 112 113 def forward(self, *args: Any, **kwargs: Any) -> Any: 114 """Forward pass of the model. 115 116 Args: 117 x: tensor of data to run through the model 118 119 Returns: 120 output from the model 121 """ 122 return self.model(*args, **kwargs) 123 124 def training_step(self, *args: Any, **kwargs: Any) -> Tensor: 125 """Compute and return the training loss. 126 127 Args: 128 batch: the output of your DataLoader 129 130 Returns: 131 training loss 132 """ 133 batch = args[0] 134 x = batch["image"] 135 y = batch["mask"] 136 y_hat = self.forward(x) 137 y_hat_hard = y_hat.argmax(dim=1) 138 139 loss = self.loss(y_hat, y) 140 141 # by default, the train step logs every `log_every_n_steps` steps where 142 # `log_every_n_steps` is a parameter to the `Trainer` object 143 self.log("train_loss", loss, on_step=True, on_epoch=False) 144 self.train_metrics(y_hat_hard, y) 145 146 return cast(Tensor, loss) 147 148 def training_epoch_end(self, outputs: Any) -> None: 149 """Logs epoch level training metrics. 150 151 Args: 152 outputs: list of items returned by training_step 153 """ 154 self.log_dict(self.train_metrics.compute()) 155 self.train_metrics.reset() 156 157 def validation_step(self, *args: Any, **kwargs: Any) -> None: 158 """Compute validation loss and log example predictions. 159 160 Args: 161 batch: the output of your DataLoader 162 batch_idx: the index of this batch 163 """ 164 batch = args[0] 165 batch_idx = args[1] 166 x = batch["image"] 167 y = batch["mask"] 168 y_hat = self.forward(x) 169 y_hat_hard = y_hat.argmax(dim=1) 170 171 loss = self.loss(y_hat, y) 172 173 self.log("val_loss", loss, on_step=False, on_epoch=True) 174 self.val_metrics(y_hat_hard, y) 175 176 if batch_idx < 10: 177 try: 178 datamodule = self.trainer.datamodule # type: ignore[union-attr] 179 batch["prediction"] = y_hat_hard 180 for key in ["image", "mask", "prediction"]: 181 batch[key] = batch[key].cpu() 182 sample = unbind_samples(batch)[0] 183 fig = datamodule.plot(sample) 184 summary_writer = self.logger.experiment # type: ignore[union-attr] 185 summary_writer.add_figure( 186 f"image/{batch_idx}", fig, global_step=self.global_step 187 ) 188 except AttributeError: 189 pass 190 191 def validation_epoch_end(self, outputs: Any) -> None: 192 """Logs epoch level validation metrics. 193 194 Args: 195 outputs: list of items returned by validation_step 196 """ 197 self.log_dict(self.val_metrics.compute()) 198 self.val_metrics.reset() 199 200 def test_step(self, *args: Any, **kwargs: Any) -> None: 201 """Compute test loss. 202 203 Args: 204 batch: the output of your DataLoader 205 """ 206 batch = args[0] 207 x = batch["image"] 208 y = batch["mask"] 209 y_hat = self.forward(x) 210 y_hat_hard = y_hat.argmax(dim=1) 211 212 loss = self.loss(y_hat, y) 213 214 # by default, the test and validation steps only log per *epoch* 215 self.log("test_loss", loss, on_step=False, on_epoch=True) 216 self.test_metrics(y_hat_hard, y) 217 218 def test_epoch_end(self, outputs: Any) -> None: 219 """Logs epoch level test metrics. 220 221 Args: 222 outputs: list of items returned by test_step 223 """ 224 self.log_dict(self.test_metrics.compute()) 225 self.test_metrics.reset() 226 227 def configure_optimizers(self) -> Dict[str, Any]: 228 """Initialize the optimizer and learning rate scheduler. 229 230 Returns: 231 a "lr dict" according to the pytorch lightning documentation -- 232 https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers 233 """ 234 optimizer = torch.optim.Adam( 235 self.model.parameters(), lr=self.hyperparams["learning_rate"] 236 ) 237 return { 238 "optimizer": optimizer, 239 "lr_scheduler": { 240 "scheduler": ReduceLROnPlateau( 241 optimizer, 242 patience=self.hyperparams["learning_rate_schedule_patience"], 243 ), 244 "monitor": "val_loss", 245 }, 246 } 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py --- a/torchgeo/trainers/segmentation.py +++ b/torchgeo/trainers/segmentation.py @@ -3,6 +3,7 @@ """Segmentation tasks.""" +import warnings from typing import Any, Dict, cast import segmentation_models_pytorch as smp @@ -53,16 +54,15 @@ ) if self.hyperparams["loss"] == "ce": - self.loss = nn.CrossEntropyLoss( - ignore_index=-1000 if self.ignore_zeros is None else 0 - ) + ignore_value = -1000 if self.ignore_index is None else self.ignore_index + self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value) elif self.hyperparams["loss"] == "jaccard": self.loss = smp.losses.JaccardLoss( mode="multiclass", classes=self.hyperparams["num_classes"] ) elif self.hyperparams["loss"] == "focal": self.loss = smp.losses.FocalLoss( - "multiclass", ignore_index=self.ignore_zeros, normalized=True + "multiclass", ignore_index=self.ignore_index, normalized=True ) else: raise ValueError(f"Loss type '{self.hyperparams['loss']}' is not valid.") @@ -78,7 +78,7 @@ in_channels: Number of channels in input image num_classes: Number of semantic classes to predict loss: Name of the loss function - ignore_zeros: Whether to ignore the "0" class value in the loss and metrics + ignore_index: Optional integer class index to ignore in the loss and metrics Raises: ValueError: if kwargs arguments are invalid @@ -89,20 +89,26 @@ self.save_hyperparameters() # type: ignore[operator] self.hyperparams = cast(Dict[str, Any], self.hparams) - self.ignore_zeros = None if kwargs["ignore_zeros"] else 0 - + if not isinstance(kwargs["ignore_index"], (int, type(None))): + raise ValueError("ignore_index must be an int or None") + if (kwargs["ignore_index"] is not None) and (kwargs["loss"] == "jaccard"): + warnings.warn( + "ignore_index has no effect on training when loss='jaccard'", + UserWarning, + ) + self.ignore_index = kwargs["ignore_index"] self.config_task() self.train_metrics = MetricCollection( [ Accuracy( num_classes=self.hyperparams["num_classes"], - ignore_index=self.ignore_zeros, + ignore_index=self.ignore_index, mdmc_average="global", ), JaccardIndex( num_classes=self.hyperparams["num_classes"], - ignore_index=self.ignore_zeros, + ignore_index=self.ignore_index, ), ], prefix="train_",
{"golden_diff": "diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py\n--- a/torchgeo/trainers/segmentation.py\n+++ b/torchgeo/trainers/segmentation.py\n@@ -3,6 +3,7 @@\n \n \"\"\"Segmentation tasks.\"\"\"\n \n+import warnings\n from typing import Any, Dict, cast\n \n import segmentation_models_pytorch as smp\n@@ -53,16 +54,15 @@\n )\n \n if self.hyperparams[\"loss\"] == \"ce\":\n- self.loss = nn.CrossEntropyLoss(\n- ignore_index=-1000 if self.ignore_zeros is None else 0\n- )\n+ ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n+ self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n- \"multiclass\", ignore_index=self.ignore_zeros, normalized=True\n+ \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(f\"Loss type '{self.hyperparams['loss']}' is not valid.\")\n@@ -78,7 +78,7 @@\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n- ignore_zeros: Whether to ignore the \"0\" class value in the loss and metrics\n+ ignore_index: Optional integer class index to ignore in the loss and metrics\n \n Raises:\n ValueError: if kwargs arguments are invalid\n@@ -89,20 +89,26 @@\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n \n- self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\n-\n+ if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n+ raise ValueError(\"ignore_index must be an int or None\")\n+ if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n+ warnings.warn(\n+ \"ignore_index has no effect on training when loss='jaccard'\",\n+ UserWarning,\n+ )\n+ self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n \n self.train_metrics = MetricCollection(\n [\n Accuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n- ignore_index=self.ignore_zeros,\n+ ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n JaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n- ignore_index=self.ignore_zeros,\n+ ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n", "issue": "Bug: missing \"not\" operator to set ignore_zeros in segmentation trainer\nI believe there's a bug in the translation from ignore_zeros to ignore_index in the [\\_\\_init__ of SemanticSegmentationTask](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L89).\r\n`self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0` should be:\r\n`self.ignore_zeros = None if kwargs[\"ignore_zeros\"] is None else 0` as is done [when define loss function](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L57)\r\n\r\n`self.ignore_zeros` is later used to define the \"self.ignore_index\" of the self.train_metrics. For sake of clarity, I'd suggest renaming it to `self.ignore_index` directly in the \\_\\_init__. \r\n\r\nTherefore, this\r\n```\r\n...\r\n self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\r\n\r\n self.config_task()\r\n\r\n self.train_metrics = MetricCollection(\r\n [\r\n Accuracy(\r\n num_classes=self.hparams[\"num_classes\"],\r\n ignore_index=self.ignore_zeros,\r\n...\r\n```\r\nwould become\r\n```\r\n...\r\n self.ignore_index = None if kwargs[\"ignore_zeros\"] is None else 0\r\n\r\n self.config_task()\r\n\r\n self.train_metrics = MetricCollection(\r\n [\r\n Accuracy(\r\n num_classes=self.hparams[\"num_classes\"],\r\n ignore_index=self.ignore_index,\r\n...\r\n```\r\nIf you'd like, I can definitely push a PR for to fix this bug if the solution seems ok to all. Let me know.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nfrom typing import Any, Dict, cast\n\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy, JaccardIndex, MetricCollection\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n self.loss = nn.CrossEntropyLoss(\n ignore_index=-1000 if self.ignore_zeros is None else 0\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_zeros, normalized=True\n )\n else:\n raise ValueError(f\"Loss type '{self.hyperparams['loss']}' is not valid.\")\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_zeros: Whether to ignore the \"0\" class value in the loss and metrics\n\n Raises:\n ValueError: if kwargs arguments are invalid\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\n\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n Accuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_zeros,\n mdmc_average=\"global\",\n ),\n JaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_zeros,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[union-attr]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nimport warnings\nfrom typing import Any, Dict, cast\n\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy, JaccardIndex, MetricCollection\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(f\"Loss type '{self.hyperparams['loss']}' is not valid.\")\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_index: Optional integer class index to ignore in the loss and metrics\n\n Raises:\n ValueError: if kwargs arguments are invalid\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n raise ValueError(\"ignore_index must be an int or None\")\n if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n warnings.warn(\n \"ignore_index has no effect on training when loss='jaccard'\",\n UserWarning,\n )\n self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n Accuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n JaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[union-attr]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}]}
3,198
660
gh_patches_debug_15880
rasdani/github-patches
git_diff
learningequality__kolibri-1266
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot play MP3 files in Kolibri ## Summary MP3 files are inside the `.kolibri\content\storage` folder and I can play them in the Windows Media Player, but do not load from Kolibri. ## System information - Version: KolibriSetup-0.2.0.dev3.exe - Operating system: Windows 8.1 - Browser: Firefox ## Screenshots ![mp32](https://cloud.githubusercontent.com/assets/1457929/23808849/3645b420-0580-11e7-9628-e0e01cac6073.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/logger/api.py` Content: ``` 1 from django.db.models.query import F 2 from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter 3 from kolibri.auth.filters import HierarchyRelationsFilter 4 from kolibri.content.api import OptionalPageNumberPagination 5 from rest_framework import filters, viewsets 6 7 from .models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog 8 from .serializers import ( 9 AttemptLogSerializer, ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, ExamAttemptLogSerializer, 10 ExamLogSerializer, MasteryLogSerializer, UserSessionLogSerializer 11 ) 12 13 14 class BaseLogFilter(filters.FilterSet): 15 facility = filters.django_filters.MethodFilter() 16 classroom = filters.django_filters.MethodFilter() 17 learner_group = filters.django_filters.MethodFilter() 18 19 # Only device owner (superuser) can filter by facilities 20 def filter_facility(self, queryset, value): 21 return queryset.filter(user__facility_id=value) 22 23 def filter_classroom(self, queryset, value): 24 return HierarchyRelationsFilter(queryset).filter_by_hierarchy( 25 ancestor_collection=value, 26 target_user=F("user"), 27 ) 28 29 def filter_learner_group(self, queryset, value): 30 return HierarchyRelationsFilter(queryset).filter_by_hierarchy( 31 ancestor_collection=value, 32 target_user=F("user"), 33 ) 34 35 36 class ContentSessionLogFilter(BaseLogFilter): 37 38 class Meta: 39 model = ContentSessionLog 40 fields = ['user_id', 'content_id'] 41 42 43 class ContentSessionLogViewSet(viewsets.ModelViewSet): 44 permission_classes = (KolibriAuthPermissions,) 45 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 46 queryset = ContentSessionLog.objects.all() 47 serializer_class = ContentSessionLogSerializer 48 pagination_class = OptionalPageNumberPagination 49 filter_class = ContentSessionLogFilter 50 51 52 class ContentSummaryLogFilter(BaseLogFilter): 53 54 class Meta: 55 model = ContentSummaryLog 56 fields = ['user_id', 'content_id'] 57 58 59 class ContentSummaryLogViewSet(viewsets.ModelViewSet): 60 permission_classes = (KolibriAuthPermissions,) 61 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 62 queryset = ContentSummaryLog.objects.all() 63 serializer_class = ContentSummaryLogSerializer 64 pagination_class = OptionalPageNumberPagination 65 filter_class = ContentSummaryLogFilter 66 67 68 class ContentRatingLogFilter(BaseLogFilter): 69 70 class Meta: 71 model = ContentRatingLog 72 fields = ['user_id', 'content_id'] 73 74 75 class ContentRatingLogViewSet(viewsets.ModelViewSet): 76 permission_classes = (KolibriAuthPermissions,) 77 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 78 queryset = ContentRatingLog.objects.all() 79 serializer_class = ContentRatingLogSerializer 80 pagination_class = OptionalPageNumberPagination 81 filter_class = ContentRatingLogFilter 82 83 84 class UserSessionLogFilter(BaseLogFilter): 85 86 class Meta: 87 model = UserSessionLog 88 fields = ['user_id'] 89 90 91 class UserSessionLogViewSet(viewsets.ModelViewSet): 92 permission_classes = (KolibriAuthPermissions,) 93 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 94 queryset = UserSessionLog.objects.all() 95 serializer_class = UserSessionLogSerializer 96 pagination_class = OptionalPageNumberPagination 97 filter_class = UserSessionLogFilter 98 99 100 class MasteryFilter(filters.FilterSet): 101 102 class Meta: 103 model = MasteryLog 104 fields = ['summarylog'] 105 106 class MasteryLogViewSet(viewsets.ModelViewSet): 107 permission_classes = (KolibriAuthPermissions,) 108 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 109 queryset = MasteryLog.objects.all() 110 serializer_class = MasteryLogSerializer 111 pagination_class = OptionalPageNumberPagination 112 filter_class = MasteryFilter 113 114 class AttemptFilter(filters.FilterSet): 115 content = filters.django_filters.MethodFilter() 116 user = filters.django_filters.MethodFilter() 117 118 def filter_content(self, queryset, value): 119 return queryset.filter(masterylog__summarylog__content_id=value) 120 121 def filter_user(self, queryset, value): 122 return queryset.filter(masterylog__summarylog__user_id=value) 123 124 class Meta: 125 model = AttemptLog 126 fields = ['masterylog', 'complete'] 127 128 class AttemptLogViewSet(viewsets.ModelViewSet): 129 permission_classes = (KolibriAuthPermissions,) 130 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter) 131 queryset = AttemptLog.objects.all() 132 serializer_class = AttemptLogSerializer 133 pagination_class = OptionalPageNumberPagination 134 filter_class = AttemptFilter 135 ordering_fields = ('end_timestamp',) 136 ordering = ('end_timestamp',) 137 138 139 class ExamAttemptFilter(filters.FilterSet): 140 exam = filters.django_filters.MethodFilter() 141 user = filters.django_filters.MethodFilter() 142 143 def filter_exam(self, queryset, value): 144 return queryset.filter(examlog__exam=value) 145 146 def filter_user(self, queryset, value): 147 return queryset.filter(examlog__user=value) 148 149 class Meta: 150 model = ExamAttemptLog 151 fields = ['examlog', 'exam', 'user'] 152 153 class ExamAttemptLogViewSet(viewsets.ModelViewSet): 154 permission_classes = (KolibriAuthPermissions,) 155 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter) 156 queryset = ExamAttemptLog.objects.all() 157 serializer_class = ExamAttemptLogSerializer 158 pagination_class = OptionalPageNumberPagination 159 filter_class = ExamAttemptFilter 160 161 class ExamLogFilter(BaseLogFilter): 162 163 class Meta: 164 model = ExamLog 165 fields = ['user', 'exam'] 166 167 class ExamLogViewSet(viewsets.ModelViewSet): 168 permission_classes = (KolibriAuthPermissions,) 169 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend) 170 queryset = ExamLog.objects.all() 171 serializer_class = ExamLogSerializer 172 pagination_class = OptionalPageNumberPagination 173 filter_class = ExamLogFilter 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/logger/api.py b/kolibri/logger/api.py --- a/kolibri/logger/api.py +++ b/kolibri/logger/api.py @@ -113,17 +113,13 @@ class AttemptFilter(filters.FilterSet): content = filters.django_filters.MethodFilter() - user = filters.django_filters.MethodFilter() def filter_content(self, queryset, value): return queryset.filter(masterylog__summarylog__content_id=value) - def filter_user(self, queryset, value): - return queryset.filter(masterylog__summarylog__user_id=value) - class Meta: model = AttemptLog - fields = ['masterylog', 'complete'] + fields = ['masterylog', 'complete', 'user', 'content'] class AttemptLogViewSet(viewsets.ModelViewSet): permission_classes = (KolibriAuthPermissions,)
{"golden_diff": "diff --git a/kolibri/logger/api.py b/kolibri/logger/api.py\n--- a/kolibri/logger/api.py\n+++ b/kolibri/logger/api.py\n@@ -113,17 +113,13 @@\n \n class AttemptFilter(filters.FilterSet):\n content = filters.django_filters.MethodFilter()\n- user = filters.django_filters.MethodFilter()\n \n def filter_content(self, queryset, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n \n- def filter_user(self, queryset, value):\n- return queryset.filter(masterylog__summarylog__user_id=value)\n-\n class Meta:\n model = AttemptLog\n- fields = ['masterylog', 'complete']\n+ fields = ['masterylog', 'complete', 'user', 'content']\n \n class AttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n", "issue": "Cannot play MP3 files in Kolibri\n## Summary\r\n\r\nMP3 files are inside the `.kolibri\\content\\storage` folder and I can play them in the Windows Media Player, but do not load from Kolibri.\r\n\r\n## System information\r\n\r\n - Version: KolibriSetup-0.2.0.dev3.exe\r\n - Operating system: Windows 8.1\r\n - Browser: Firefox\r\n\r\n## Screenshots\r\n\r\n![mp32](https://cloud.githubusercontent.com/assets/1457929/23808849/3645b420-0580-11e7-9628-e0e01cac6073.png)\r\n\n", "before_files": [{"content": "from django.db.models.query import F\nfrom kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter\nfrom kolibri.auth.filters import HierarchyRelationsFilter\nfrom kolibri.content.api import OptionalPageNumberPagination\nfrom rest_framework import filters, viewsets\n\nfrom .models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom .serializers import (\n AttemptLogSerializer, ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, ExamAttemptLogSerializer,\n ExamLogSerializer, MasteryLogSerializer, UserSessionLogSerializer\n)\n\n\nclass BaseLogFilter(filters.FilterSet):\n facility = filters.django_filters.MethodFilter()\n classroom = filters.django_filters.MethodFilter()\n learner_group = filters.django_filters.MethodFilter()\n\n # Only device owner (superuser) can filter by facilities\n def filter_facility(self, queryset, value):\n return queryset.filter(user__facility_id=value)\n\n def filter_classroom(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n def filter_learner_group(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n\nclass ContentSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSessionLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSessionLogFilter\n\n\nclass ContentSummaryLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSummaryLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSummaryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSummaryLogFilter\n\n\nclass ContentRatingLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentRatingLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentRatingLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentRatingLog.objects.all()\n serializer_class = ContentRatingLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentRatingLogFilter\n\n\nclass UserSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = UserSessionLog\n fields = ['user_id']\n\n\nclass UserSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = UserSessionLog.objects.all()\n serializer_class = UserSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = UserSessionLogFilter\n\n\nclass MasteryFilter(filters.FilterSet):\n\n class Meta:\n model = MasteryLog\n fields = ['summarylog']\n\nclass MasteryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n serializer_class = MasteryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n\nclass AttemptFilter(filters.FilterSet):\n content = filters.django_filters.MethodFilter()\n user = filters.django_filters.MethodFilter()\n\n def filter_content(self, queryset, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n def filter_user(self, queryset, value):\n return queryset.filter(masterylog__summarylog__user_id=value)\n\n class Meta:\n model = AttemptLog\n fields = ['masterylog', 'complete']\n\nclass AttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = AttemptLog.objects.all()\n serializer_class = AttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = ('end_timestamp',)\n ordering = ('end_timestamp',)\n\n\nclass ExamAttemptFilter(filters.FilterSet):\n exam = filters.django_filters.MethodFilter()\n user = filters.django_filters.MethodFilter()\n\n def filter_exam(self, queryset, value):\n return queryset.filter(examlog__exam=value)\n\n def filter_user(self, queryset, value):\n return queryset.filter(examlog__user=value)\n\n class Meta:\n model = ExamAttemptLog\n fields = ['examlog', 'exam', 'user']\n\nclass ExamAttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = ExamAttemptLog.objects.all()\n serializer_class = ExamAttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamAttemptFilter\n\nclass ExamLogFilter(BaseLogFilter):\n\n class Meta:\n model = ExamLog\n fields = ['user', 'exam']\n\nclass ExamLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ExamLog.objects.all()\n serializer_class = ExamLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamLogFilter\n", "path": "kolibri/logger/api.py"}], "after_files": [{"content": "from django.db.models.query import F\nfrom kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter\nfrom kolibri.auth.filters import HierarchyRelationsFilter\nfrom kolibri.content.api import OptionalPageNumberPagination\nfrom rest_framework import filters, viewsets\n\nfrom .models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom .serializers import (\n AttemptLogSerializer, ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, ExamAttemptLogSerializer,\n ExamLogSerializer, MasteryLogSerializer, UserSessionLogSerializer\n)\n\n\nclass BaseLogFilter(filters.FilterSet):\n facility = filters.django_filters.MethodFilter()\n classroom = filters.django_filters.MethodFilter()\n learner_group = filters.django_filters.MethodFilter()\n\n # Only device owner (superuser) can filter by facilities\n def filter_facility(self, queryset, value):\n return queryset.filter(user__facility_id=value)\n\n def filter_classroom(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n def filter_learner_group(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n\nclass ContentSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSessionLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSessionLogFilter\n\n\nclass ContentSummaryLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSummaryLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSummaryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSummaryLogFilter\n\n\nclass ContentRatingLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentRatingLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentRatingLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentRatingLog.objects.all()\n serializer_class = ContentRatingLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentRatingLogFilter\n\n\nclass UserSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = UserSessionLog\n fields = ['user_id']\n\n\nclass UserSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = UserSessionLog.objects.all()\n serializer_class = UserSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = UserSessionLogFilter\n\n\nclass MasteryFilter(filters.FilterSet):\n\n class Meta:\n model = MasteryLog\n fields = ['summarylog']\n\nclass MasteryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n serializer_class = MasteryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n\nclass AttemptFilter(filters.FilterSet):\n content = filters.django_filters.MethodFilter()\n\n def filter_content(self, queryset, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = ['masterylog', 'complete', 'user', 'content']\n\nclass AttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = AttemptLog.objects.all()\n serializer_class = AttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = ('end_timestamp',)\n ordering = ('end_timestamp',)\n\n\nclass ExamAttemptFilter(filters.FilterSet):\n exam = filters.django_filters.MethodFilter()\n user = filters.django_filters.MethodFilter()\n\n def filter_exam(self, queryset, value):\n return queryset.filter(examlog__exam=value)\n\n def filter_user(self, queryset, value):\n return queryset.filter(examlog__user=value)\n\n class Meta:\n model = ExamAttemptLog\n fields = ['examlog', 'exam', 'user']\n\nclass ExamAttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = ExamAttemptLog.objects.all()\n serializer_class = ExamAttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamAttemptFilter\n\nclass ExamLogFilter(BaseLogFilter):\n\n class Meta:\n model = ExamLog\n fields = ['user', 'exam']\n\nclass ExamLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ExamLog.objects.all()\n serializer_class = ExamLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamLogFilter\n", "path": "kolibri/logger/api.py"}]}
2,135
197
gh_patches_debug_17868
rasdani/github-patches
git_diff
openai__gym-1497
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- EzPickle used incorrectly on some environments Some environments use `EzPickle` class incorrectly and thus the environment serialization does not result in an expected copy of the environment. An example of such class is the `HandReach-v0`: https://github.com/openai/gym/blob/a488c2fe2fbd4408ba9e193985ec2d86033cab6d/gym/envs/robotics/hand/reach.py#L66 I think the correct usage would be to move the call in the beginning of the `__init__`-method and call ```python utils.EzPickle.__init__( distance_threshold=distance_threshold, n_substeps=n_substeps, relative_control=relative_control, initial_qpos=initial_qpos, reward_type=reward_type, ) ``` or alternatively ```python utils.EzPickle.__init__(**locals()) ``` Here's a simple script to verify the issue: ```python import pickle import gym def main(): env1 = gym.envs.make('HandReach-v0', distance_threshold=1e-6) env2 = pickle.loads(pickle.dumps(env1)) assert env1.distance_threshold == env2.distance_threshold, ( env1.distance_threshold, env2.distance_threshold) if __name__ == '__main__': main() ``` Running it results in the following: ``` $ python ./tests/test_hand_copy.py Traceback (most recent call last): File "./tests/test_hand_copy.py", line 15, in <module> main() File "./tests/test_hand_copy.py", line 11, in main env1.distance_threshold, env2.distance_threshold) AssertionError: (1e-06, 0.01) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gym/envs/robotics/hand/reach.py` Content: ``` 1 import os 2 import numpy as np 3 4 from gym import utils 5 from gym.envs.robotics import hand_env 6 from gym.envs.robotics.utils import robot_get_obs 7 8 9 FINGERTIP_SITE_NAMES = [ 10 'robot0:S_fftip', 11 'robot0:S_mftip', 12 'robot0:S_rftip', 13 'robot0:S_lftip', 14 'robot0:S_thtip', 15 ] 16 17 18 DEFAULT_INITIAL_QPOS = { 19 'robot0:WRJ1': -0.16514339750464327, 20 'robot0:WRJ0': -0.31973286565062153, 21 'robot0:FFJ3': 0.14340512546557435, 22 'robot0:FFJ2': 0.32028208333591573, 23 'robot0:FFJ1': 0.7126053607727917, 24 'robot0:FFJ0': 0.6705281001412586, 25 'robot0:MFJ3': 0.000246444303701037, 26 'robot0:MFJ2': 0.3152655251085491, 27 'robot0:MFJ1': 0.7659800313729842, 28 'robot0:MFJ0': 0.7323156897425923, 29 'robot0:RFJ3': 0.00038520700007378114, 30 'robot0:RFJ2': 0.36743546201985233, 31 'robot0:RFJ1': 0.7119514095008576, 32 'robot0:RFJ0': 0.6699446327514138, 33 'robot0:LFJ4': 0.0525442258033891, 34 'robot0:LFJ3': -0.13615534724474673, 35 'robot0:LFJ2': 0.39872030433433003, 36 'robot0:LFJ1': 0.7415570009679252, 37 'robot0:LFJ0': 0.704096378652974, 38 'robot0:THJ4': 0.003673823825070126, 39 'robot0:THJ3': 0.5506291436028695, 40 'robot0:THJ2': -0.014515151997119306, 41 'robot0:THJ1': -0.0015229223564485414, 42 'robot0:THJ0': -0.7894883021600622, 43 } 44 45 46 # Ensure we get the path separator correct on windows 47 MODEL_XML_PATH = os.path.join('hand', 'reach.xml') 48 49 50 def goal_distance(goal_a, goal_b): 51 assert goal_a.shape == goal_b.shape 52 return np.linalg.norm(goal_a - goal_b, axis=-1) 53 54 55 class HandReachEnv(hand_env.HandEnv, utils.EzPickle): 56 def __init__( 57 self, distance_threshold=0.01, n_substeps=20, relative_control=False, 58 initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse', 59 ): 60 self.distance_threshold = distance_threshold 61 self.reward_type = reward_type 62 63 hand_env.HandEnv.__init__( 64 self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos, 65 relative_control=relative_control) 66 utils.EzPickle.__init__(self) 67 68 def _get_achieved_goal(self): 69 goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES] 70 return np.array(goal).flatten() 71 72 # GoalEnv methods 73 # ---------------------------- 74 75 def compute_reward(self, achieved_goal, goal, info): 76 d = goal_distance(achieved_goal, goal) 77 if self.reward_type == 'sparse': 78 return -(d > self.distance_threshold).astype(np.float32) 79 else: 80 return -d 81 82 # RobotEnv methods 83 # ---------------------------- 84 85 def _env_setup(self, initial_qpos): 86 for name, value in initial_qpos.items(): 87 self.sim.data.set_joint_qpos(name, value) 88 self.sim.forward() 89 90 self.initial_goal = self._get_achieved_goal().copy() 91 self.palm_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('robot0:palm')].copy() 92 93 def _get_obs(self): 94 robot_qpos, robot_qvel = robot_get_obs(self.sim) 95 achieved_goal = self._get_achieved_goal().ravel() 96 observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal]) 97 return { 98 'observation': observation.copy(), 99 'achieved_goal': achieved_goal.copy(), 100 'desired_goal': self.goal.copy(), 101 } 102 103 def _sample_goal(self): 104 thumb_name = 'robot0:S_thtip' 105 finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name] 106 finger_name = self.np_random.choice(finger_names) 107 108 thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name) 109 finger_idx = FINGERTIP_SITE_NAMES.index(finger_name) 110 assert thumb_idx != finger_idx 111 112 # Pick a meeting point above the hand. 113 meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05]) 114 meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape) 115 116 # Slightly move meeting goal towards the respective finger to avoid that they 117 # overlap. 118 goal = self.initial_goal.copy().reshape(-1, 3) 119 for idx in [thumb_idx, finger_idx]: 120 offset_direction = (meeting_pos - goal[idx]) 121 offset_direction /= np.linalg.norm(offset_direction) 122 goal[idx] = meeting_pos - 0.005 * offset_direction 123 124 if self.np_random.uniform() < 0.1: 125 # With some probability, ask all fingers to move back to the origin. 126 # This avoids that the thumb constantly stays near the goal position already. 127 goal = self.initial_goal.copy() 128 return goal.flatten() 129 130 def _is_success(self, achieved_goal, desired_goal): 131 d = goal_distance(achieved_goal, desired_goal) 132 return (d < self.distance_threshold).astype(np.float32) 133 134 def _render_callback(self): 135 # Visualize targets. 136 sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy() 137 goal = self.goal.reshape(5, 3) 138 for finger_idx in range(5): 139 site_name = 'target{}'.format(finger_idx) 140 site_id = self.sim.model.site_name2id(site_name) 141 self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id] 142 143 # Visualize finger positions. 144 achieved_goal = self._get_achieved_goal().reshape(5, 3) 145 for finger_idx in range(5): 146 site_name = 'finger{}'.format(finger_idx) 147 site_id = self.sim.model.site_name2id(site_name) 148 self.sim.model.site_pos[site_id] = achieved_goal[finger_idx] - sites_offset[site_id] 149 self.sim.forward() 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gym/envs/robotics/hand/reach.py b/gym/envs/robotics/hand/reach.py --- a/gym/envs/robotics/hand/reach.py +++ b/gym/envs/robotics/hand/reach.py @@ -57,13 +57,13 @@ self, distance_threshold=0.01, n_substeps=20, relative_control=False, initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse', ): + utils.EzPickle.__init__(**locals()) self.distance_threshold = distance_threshold self.reward_type = reward_type hand_env.HandEnv.__init__( self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos, relative_control=relative_control) - utils.EzPickle.__init__(self) def _get_achieved_goal(self): goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]
{"golden_diff": "diff --git a/gym/envs/robotics/hand/reach.py b/gym/envs/robotics/hand/reach.py\n--- a/gym/envs/robotics/hand/reach.py\n+++ b/gym/envs/robotics/hand/reach.py\n@@ -57,13 +57,13 @@\n self, distance_threshold=0.01, n_substeps=20, relative_control=False,\n initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',\n ):\n+ utils.EzPickle.__init__(**locals())\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n \n hand_env.HandEnv.__init__(\n self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,\n relative_control=relative_control)\n- utils.EzPickle.__init__(self)\n \n def _get_achieved_goal(self):\n goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]\n", "issue": "EzPickle used incorrectly on some environments\nSome environments use `EzPickle` class incorrectly and thus the environment serialization does not result in an expected copy of the environment. An example of such class is the `HandReach-v0`: https://github.com/openai/gym/blob/a488c2fe2fbd4408ba9e193985ec2d86033cab6d/gym/envs/robotics/hand/reach.py#L66\r\n\r\nI think the correct usage would be to move the call in the beginning of the `__init__`-method and call\r\n```python\r\nutils.EzPickle.__init__(\r\n distance_threshold=distance_threshold,\r\n n_substeps=n_substeps,\r\n relative_control=relative_control,\r\n initial_qpos=initial_qpos,\r\n reward_type=reward_type,\r\n)\r\n```\r\nor alternatively\r\n```python\r\nutils.EzPickle.__init__(**locals())\r\n```\r\nHere's a simple script to verify the issue:\r\n```python\r\nimport pickle\r\n\r\nimport gym\r\n\r\n\r\ndef main():\r\n env1 = gym.envs.make('HandReach-v0', distance_threshold=1e-6)\r\n env2 = pickle.loads(pickle.dumps(env1))\r\n\r\n assert env1.distance_threshold == env2.distance_threshold, (\r\n env1.distance_threshold, env2.distance_threshold)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nRunning it results in the following:\r\n```\r\n$ python ./tests/test_hand_copy.py\r\nTraceback (most recent call last):\r\n File \"./tests/test_hand_copy.py\", line 15, in <module>\r\n main()\r\n File \"./tests/test_hand_copy.py\", line 11, in main\r\n env1.distance_threshold, env2.distance_threshold)\r\nAssertionError: (1e-06, 0.01)\r\n```\r\n\n", "before_files": [{"content": "import os\nimport numpy as np\n\nfrom gym import utils\nfrom gym.envs.robotics import hand_env\nfrom gym.envs.robotics.utils import robot_get_obs\n\n\nFINGERTIP_SITE_NAMES = [\n 'robot0:S_fftip',\n 'robot0:S_mftip',\n 'robot0:S_rftip',\n 'robot0:S_lftip',\n 'robot0:S_thtip',\n]\n\n\nDEFAULT_INITIAL_QPOS = {\n 'robot0:WRJ1': -0.16514339750464327,\n 'robot0:WRJ0': -0.31973286565062153,\n 'robot0:FFJ3': 0.14340512546557435,\n 'robot0:FFJ2': 0.32028208333591573,\n 'robot0:FFJ1': 0.7126053607727917,\n 'robot0:FFJ0': 0.6705281001412586,\n 'robot0:MFJ3': 0.000246444303701037,\n 'robot0:MFJ2': 0.3152655251085491,\n 'robot0:MFJ1': 0.7659800313729842,\n 'robot0:MFJ0': 0.7323156897425923,\n 'robot0:RFJ3': 0.00038520700007378114,\n 'robot0:RFJ2': 0.36743546201985233,\n 'robot0:RFJ1': 0.7119514095008576,\n 'robot0:RFJ0': 0.6699446327514138,\n 'robot0:LFJ4': 0.0525442258033891,\n 'robot0:LFJ3': -0.13615534724474673,\n 'robot0:LFJ2': 0.39872030433433003,\n 'robot0:LFJ1': 0.7415570009679252,\n 'robot0:LFJ0': 0.704096378652974,\n 'robot0:THJ4': 0.003673823825070126,\n 'robot0:THJ3': 0.5506291436028695,\n 'robot0:THJ2': -0.014515151997119306,\n 'robot0:THJ1': -0.0015229223564485414,\n 'robot0:THJ0': -0.7894883021600622,\n}\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('hand', 'reach.xml')\n\n\ndef goal_distance(goal_a, goal_b):\n assert goal_a.shape == goal_b.shape\n return np.linalg.norm(goal_a - goal_b, axis=-1)\n\n\nclass HandReachEnv(hand_env.HandEnv, utils.EzPickle):\n def __init__(\n self, distance_threshold=0.01, n_substeps=20, relative_control=False,\n initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',\n ):\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n\n hand_env.HandEnv.__init__(\n self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,\n relative_control=relative_control)\n utils.EzPickle.__init__(self)\n\n def _get_achieved_goal(self):\n goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]\n return np.array(goal).flatten()\n\n # GoalEnv methods\n # ----------------------------\n\n def compute_reward(self, achieved_goal, goal, info):\n d = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return -d\n\n # RobotEnv methods\n # ----------------------------\n\n def _env_setup(self, initial_qpos):\n for name, value in initial_qpos.items():\n self.sim.data.set_joint_qpos(name, value)\n self.sim.forward()\n\n self.initial_goal = self._get_achieved_goal().copy()\n self.palm_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('robot0:palm')].copy()\n\n def _get_obs(self):\n robot_qpos, robot_qvel = robot_get_obs(self.sim)\n achieved_goal = self._get_achieved_goal().ravel()\n observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal])\n return {\n 'observation': observation.copy(),\n 'achieved_goal': achieved_goal.copy(),\n 'desired_goal': self.goal.copy(),\n }\n\n def _sample_goal(self):\n thumb_name = 'robot0:S_thtip'\n finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name]\n finger_name = self.np_random.choice(finger_names)\n\n thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name)\n finger_idx = FINGERTIP_SITE_NAMES.index(finger_name)\n assert thumb_idx != finger_idx\n\n # Pick a meeting point above the hand.\n meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05])\n meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape)\n\n # Slightly move meeting goal towards the respective finger to avoid that they\n # overlap.\n goal = self.initial_goal.copy().reshape(-1, 3)\n for idx in [thumb_idx, finger_idx]:\n offset_direction = (meeting_pos - goal[idx])\n offset_direction /= np.linalg.norm(offset_direction)\n goal[idx] = meeting_pos - 0.005 * offset_direction\n\n if self.np_random.uniform() < 0.1:\n # With some probability, ask all fingers to move back to the origin.\n # This avoids that the thumb constantly stays near the goal position already.\n goal = self.initial_goal.copy()\n return goal.flatten()\n\n def _is_success(self, achieved_goal, desired_goal):\n d = goal_distance(achieved_goal, desired_goal)\n return (d < self.distance_threshold).astype(np.float32)\n\n def _render_callback(self):\n # Visualize targets.\n sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()\n goal = self.goal.reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'target{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id]\n\n # Visualize finger positions.\n achieved_goal = self._get_achieved_goal().reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'finger{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = achieved_goal[finger_idx] - sites_offset[site_id]\n self.sim.forward()\n", "path": "gym/envs/robotics/hand/reach.py"}], "after_files": [{"content": "import os\nimport numpy as np\n\nfrom gym import utils\nfrom gym.envs.robotics import hand_env\nfrom gym.envs.robotics.utils import robot_get_obs\n\n\nFINGERTIP_SITE_NAMES = [\n 'robot0:S_fftip',\n 'robot0:S_mftip',\n 'robot0:S_rftip',\n 'robot0:S_lftip',\n 'robot0:S_thtip',\n]\n\n\nDEFAULT_INITIAL_QPOS = {\n 'robot0:WRJ1': -0.16514339750464327,\n 'robot0:WRJ0': -0.31973286565062153,\n 'robot0:FFJ3': 0.14340512546557435,\n 'robot0:FFJ2': 0.32028208333591573,\n 'robot0:FFJ1': 0.7126053607727917,\n 'robot0:FFJ0': 0.6705281001412586,\n 'robot0:MFJ3': 0.000246444303701037,\n 'robot0:MFJ2': 0.3152655251085491,\n 'robot0:MFJ1': 0.7659800313729842,\n 'robot0:MFJ0': 0.7323156897425923,\n 'robot0:RFJ3': 0.00038520700007378114,\n 'robot0:RFJ2': 0.36743546201985233,\n 'robot0:RFJ1': 0.7119514095008576,\n 'robot0:RFJ0': 0.6699446327514138,\n 'robot0:LFJ4': 0.0525442258033891,\n 'robot0:LFJ3': -0.13615534724474673,\n 'robot0:LFJ2': 0.39872030433433003,\n 'robot0:LFJ1': 0.7415570009679252,\n 'robot0:LFJ0': 0.704096378652974,\n 'robot0:THJ4': 0.003673823825070126,\n 'robot0:THJ3': 0.5506291436028695,\n 'robot0:THJ2': -0.014515151997119306,\n 'robot0:THJ1': -0.0015229223564485414,\n 'robot0:THJ0': -0.7894883021600622,\n}\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('hand', 'reach.xml')\n\n\ndef goal_distance(goal_a, goal_b):\n assert goal_a.shape == goal_b.shape\n return np.linalg.norm(goal_a - goal_b, axis=-1)\n\n\nclass HandReachEnv(hand_env.HandEnv, utils.EzPickle):\n def __init__(\n self, distance_threshold=0.01, n_substeps=20, relative_control=False,\n initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',\n ):\n utils.EzPickle.__init__(**locals())\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n\n hand_env.HandEnv.__init__(\n self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,\n relative_control=relative_control)\n\n def _get_achieved_goal(self):\n goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]\n return np.array(goal).flatten()\n\n # GoalEnv methods\n # ----------------------------\n\n def compute_reward(self, achieved_goal, goal, info):\n d = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return -d\n\n # RobotEnv methods\n # ----------------------------\n\n def _env_setup(self, initial_qpos):\n for name, value in initial_qpos.items():\n self.sim.data.set_joint_qpos(name, value)\n self.sim.forward()\n\n self.initial_goal = self._get_achieved_goal().copy()\n self.palm_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('robot0:palm')].copy()\n\n def _get_obs(self):\n robot_qpos, robot_qvel = robot_get_obs(self.sim)\n achieved_goal = self._get_achieved_goal().ravel()\n observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal])\n return {\n 'observation': observation.copy(),\n 'achieved_goal': achieved_goal.copy(),\n 'desired_goal': self.goal.copy(),\n }\n\n def _sample_goal(self):\n thumb_name = 'robot0:S_thtip'\n finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name]\n finger_name = self.np_random.choice(finger_names)\n\n thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name)\n finger_idx = FINGERTIP_SITE_NAMES.index(finger_name)\n assert thumb_idx != finger_idx\n\n # Pick a meeting point above the hand.\n meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05])\n meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape)\n\n # Slightly move meeting goal towards the respective finger to avoid that they\n # overlap.\n goal = self.initial_goal.copy().reshape(-1, 3)\n for idx in [thumb_idx, finger_idx]:\n offset_direction = (meeting_pos - goal[idx])\n offset_direction /= np.linalg.norm(offset_direction)\n goal[idx] = meeting_pos - 0.005 * offset_direction\n\n if self.np_random.uniform() < 0.1:\n # With some probability, ask all fingers to move back to the origin.\n # This avoids that the thumb constantly stays near the goal position already.\n goal = self.initial_goal.copy()\n return goal.flatten()\n\n def _is_success(self, achieved_goal, desired_goal):\n d = goal_distance(achieved_goal, desired_goal)\n return (d < self.distance_threshold).astype(np.float32)\n\n def _render_callback(self):\n # Visualize targets.\n sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()\n goal = self.goal.reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'target{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id]\n\n # Visualize finger positions.\n achieved_goal = self._get_achieved_goal().reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'finger{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = achieved_goal[finger_idx] - sites_offset[site_id]\n self.sim.forward()\n", "path": "gym/envs/robotics/hand/reach.py"}]}
2,836
225
gh_patches_debug_16293
rasdani/github-patches
git_diff
nf-core__tools-1261
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Linting does not recognize README Nextflow minimum version mention in Quick Start When running `nf-core lint` with a readme file that has the following in it: ``` ## Quick Start 1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`) ``` A warning is triggered: ``` readme: README did not have a Nextflow minimum version mentioned in Quick Start section. ``` This warning should not be triggering as the minimum nextflow version is in the readme file. Link to code location https://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nf_core/lint/readme.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 import re 5 6 7 def readme(self): 8 """Repository ``README.md`` tests 9 10 The ``README.md`` files for a project are very important and must meet some requirements: 11 12 * Nextflow badge 13 14 * If no Nextflow badge is found, a warning is given 15 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails 16 * Example badge code: 17 18 .. code-block:: md 19 20 [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.6-brightgreen.svg)](https://www.nextflow.io/) 21 22 * Bioconda badge 23 24 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required 25 * Required badge code: 26 27 .. code-block:: md 28 29 [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) 30 31 .. note:: These badges are a markdown image ``![alt-text](<image URL>)`` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write. 32 """ 33 passed = [] 34 warned = [] 35 failed = [] 36 37 with open(os.path.join(self.wf_path, "README.md"), "r") as fh: 38 content = fh.read() 39 40 # Check that there is a readme badge showing the minimum required version of Nextflow 41 # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/) 42 # and that it has the correct version 43 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)" 44 match = re.search(nf_badge_re, content) 45 if match: 46 nf_badge_version = match.group(1).strip("'\"") 47 try: 48 assert nf_badge_version == self.minNextflowVersion 49 except (AssertionError, KeyError): 50 failed.append( 51 "README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`".format( 52 nf_badge_version, self.minNextflowVersion 53 ) 54 ) 55 else: 56 passed.append( 57 "README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`".format( 58 nf_badge_version, self.minNextflowVersion 59 ) 60 ) 61 else: 62 warned.append("README did not have a Nextflow minimum version badge.") 63 64 # Check that the minimum version mentioned in the quick start section is consistent 65 # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)" 66 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)" 67 match = re.search(nf_version_re, content) 68 if match: 69 nf_quickstart_version = match.group(1) 70 try: 71 assert nf_quickstart_version == self.minNextflowVersion 72 except (AssertionError, KeyError): 73 failed.append( 74 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`" 75 ) 76 else: 77 passed.append( 78 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`" 79 ) 80 else: 81 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.") 82 83 return {"passed": passed, "warned": warned, "failed": failed} 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py --- a/nf_core/lint/readme.py +++ b/nf_core/lint/readme.py @@ -62,8 +62,8 @@ warned.append("README did not have a Nextflow minimum version badge.") # Check that the minimum version mentioned in the quick start section is consistent - # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)" - nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)" + # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)" + nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)" match = re.search(nf_version_re, content) if match: nf_quickstart_version = match.group(1)
{"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -62,8 +62,8 @@\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n \n # Check that the minimum version mentioned in the quick start section is consistent\n- # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n- nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n+ # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n+ nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n", "issue": "Linting does not recognize README Nextflow minimum version mention in Quick Start\nWhen running `nf-core lint` with a readme file that has the following in it:\r\n\r\n```\r\n## Quick Start\r\n\r\n1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r\n```\r\n\r\nA warning is triggered:\r\n\r\n```\r\nreadme: README did not have a Nextflow minimum version mentioned in Quick Start section.\r\n```\r\n\r\nThis warning should not be triggering as the minimum nextflow version is in the readme file.\r\n\r\nLink to code location\r\n\r\nhttps://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.6-brightgreen.svg)](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ``![alt-text](<image URL>)`` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.6-brightgreen.svg)](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ``![alt-text](<image URL>)`` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]}
1,545
291
gh_patches_debug_50355
rasdani/github-patches
git_diff
pypi__warehouse-6747
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add Brazilian Portugese to localization footer https://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese. @yeraydiazdiaz @nlhkabu can either of you do this? Thanks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/i18n/__init__.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 from babel.core import Locale 14 from pyramid.i18n import TranslationStringFactory, default_locale_negotiator 15 from pyramid.threadlocal import get_current_request 16 17 KNOWN_LOCALES = {"en": "English"} 18 19 LOCALE_ATTR = "_LOCALE_" 20 21 _translation_factory = TranslationStringFactory("messages") 22 23 24 class LazyString: 25 def __init__(self, fn, *args, **kwargs): 26 self.fn = fn 27 self.args = args 28 self.mapping = kwargs.get("mapping", {}) 29 self.kwargs = kwargs 30 31 def __json__(self, request): 32 return str(self) 33 34 def __mod__(self, new_mapping): 35 mapping = self.mapping.copy() 36 mapping.update(new_mapping) 37 return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs) 38 39 def __str__(self): 40 return self.fn(*self.args, **self.kwargs) 41 42 43 def _locale(request): 44 """ 45 Computes a babel.core:Locale() object for this request. 46 """ 47 return Locale.parse(request.locale_name, sep="_") 48 49 50 def _negotiate_locale(request): 51 locale_name = getattr(request, LOCALE_ATTR, None) 52 if locale_name is not None: 53 return locale_name 54 55 locale_name = request.params.get(LOCALE_ATTR) 56 if locale_name is not None: 57 return locale_name 58 59 locale_name = request.cookies.get(LOCALE_ATTR) 60 if locale_name is not None: 61 return locale_name 62 63 if not request.accept_language: 64 return default_locale_negotiator(request) 65 66 return request.accept_language.best_match( 67 tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request) 68 ) 69 70 71 def localize(message, **kwargs): 72 def _localize(message, **kwargs): 73 request = get_current_request() 74 return request.localizer.translate(_translation_factory(message, **kwargs)) 75 76 return LazyString(_localize, message, **kwargs) 77 78 79 def includeme(config): 80 # Add the request attributes 81 config.add_request_method(_locale, name="locale", reify=True) 82 83 # Register our translation directory. 84 config.add_translation_dirs("warehouse:locale/") 85 86 config.set_locale_negotiator(_negotiate_locale) 87 88 # Register our i18n/l10n filters for Jinja2 89 filters = config.get_settings().setdefault("jinja2.filters", {}) 90 filters.setdefault("format_date", "warehouse.i18n.filters:format_date") 91 filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime") 92 filters.setdefault( 93 "format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime" 94 ) 95 filters.setdefault("format_number", "warehouse.i18n.filters:format_number") 96 97 jglobals = config.get_settings().setdefault("jinja2.globals", {}) 98 jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES") 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py --- a/warehouse/i18n/__init__.py +++ b/warehouse/i18n/__init__.py @@ -14,7 +14,7 @@ from pyramid.i18n import TranslationStringFactory, default_locale_negotiator from pyramid.threadlocal import get_current_request -KNOWN_LOCALES = {"en": "English"} +KNOWN_LOCALES = {"en": "English", "pt_BR": "Portuguese (Brazil)"} LOCALE_ATTR = "_LOCALE_"
{"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -14,7 +14,7 @@\n from pyramid.i18n import TranslationStringFactory, default_locale_negotiator\n from pyramid.threadlocal import get_current_request\n \n-KNOWN_LOCALES = {\"en\": \"English\"}\n+KNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n \n LOCALE_ATTR = \"_LOCALE_\"\n", "issue": "Add Brazilian Portugese to localization footer\nhttps://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.\r\n\r\n@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}]}
1,299
137
gh_patches_debug_24788
rasdani/github-patches
git_diff
opsdroid__opsdroid-1435
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Telegram connector can not handle the File event type <!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. --> # Description Telegram connector does not handle File events. ## Steps to Reproduce ``` @match_regex(r'Please, send me a pdf file') async def sendDocument(self, message): path= '/file.pdf' with open(path, 'rb') as file: fileEvent = File(file_bytes= file, mimetype= 'application/pdf', target= {'id': message.target}) await message.respond( fileEvent ) ``` ## Expected Functionality The connector should handle the File event type. ## Experienced Functionality You get the following error > `TypeError: Connector <class 'opsdroid.connector.telegram.ConnectorTelegram'> can not handle the 'File' event type.` ## Versions - **Opsdroid version:** v0.17.1 - **Python version:** 3.7 - **OS/Docker version:** Fedora 31 ## Configuration File Not relevant, I think ## Additional Details The solution should be quite similar to [Adds Image support for Telegram #929](https://github.com/opsdroid/opsdroid/pull/929) <!-- Love opsdroid? Please consider supporting our collective: +👉 https://opencollective.com/opsdroid/donate --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/connector/telegram/__init__.py` Content: ``` 1 """A connector for Telegram.""" 2 import asyncio 3 import logging 4 import aiohttp 5 from voluptuous import Required 6 7 from opsdroid.connector import Connector, register_event 8 from opsdroid.events import Message, Image 9 10 11 _LOGGER = logging.getLogger(__name__) 12 CONFIG_SCHEMA = { 13 Required("token"): str, 14 "update-interval": float, 15 "default-user": str, 16 "whitelisted-users": list, 17 } 18 19 20 class ConnectorTelegram(Connector): 21 """A connector the the char service Telegram.""" 22 23 def __init__(self, config, opsdroid=None): 24 """Create the connector. 25 26 Args: 27 config (dict): configuration settings from the 28 file config.yaml. 29 opsdroid (OpsDroid): An instance of opsdroid.core. 30 31 """ 32 _LOGGER.debug(_("Loaded Telegram Connector")) 33 super().__init__(config, opsdroid=opsdroid) 34 self.name = "telegram" 35 self.opsdroid = opsdroid 36 self.latest_update = None 37 self.listening = True 38 self.default_user = config.get("default-user", None) 39 self.default_target = self.default_user 40 self.whitelisted_users = config.get("whitelisted-users", None) 41 self.update_interval = config.get("update-interval", 1) 42 self.session = None 43 self._closing = asyncio.Event() 44 self.loop = asyncio.get_event_loop() 45 46 try: 47 self.token = config["token"] 48 except (KeyError, AttributeError): 49 _LOGGER.error( 50 _( 51 "Unable to login: Access token is missing. Telegram connector will be unavailable." 52 ) 53 ) 54 55 @staticmethod 56 def get_user(response): 57 """Get user from response. 58 59 The API response is different depending on how 60 the bot is set up and where the message is coming 61 from. This method was created to keep if/else 62 statements to a minium on _parse_message. 63 64 Args: 65 response (dict): Response returned by aiohttp.ClientSession. 66 67 """ 68 user = None 69 user_id = None 70 71 if "username" in response["message"]["from"]: 72 user = response["message"]["from"]["username"] 73 74 elif "first_name" in response["message"]["from"]: 75 user = response["message"]["from"]["first_name"] 76 user_id = response["message"]["from"]["id"] 77 78 return user, user_id 79 80 def handle_user_permission(self, response, user): 81 """Handle user permissions. 82 83 This will check if the user that tried to talk with 84 the bot is allowed to do so. It will also work with 85 userid to improve security. 86 87 """ 88 user_id = response["message"]["from"]["id"] 89 90 if ( 91 not self.whitelisted_users 92 or user in self.whitelisted_users 93 or user_id in self.whitelisted_users 94 ): 95 return True 96 97 return False 98 99 def build_url(self, method): 100 """Build the url to connect to the API. 101 102 Args: 103 method (string): API call end point. 104 105 Return: 106 String that represents the full API url. 107 108 """ 109 return "https://api.telegram.org/bot{}/{}".format(self.token, method) 110 111 async def delete_webhook(self): 112 """Delete Telegram webhook. 113 114 The Telegram api will thrown an 409 error when an webhook is 115 active and a call to getUpdates is made. This method will 116 try to request the deletion of the webhook to make the getUpdate 117 request possible. 118 119 """ 120 _LOGGER.debug(_("Sending deleteWebhook request to Telegram...")) 121 resp = await self.session.get(self.build_url("deleteWebhook")) 122 123 if resp.status == 200: 124 _LOGGER.debug(_("Telegram webhook deleted successfully.")) 125 else: 126 _LOGGER.debug(_("Unable to delete webhook.")) 127 128 async def connect(self): 129 """Connect to Telegram. 130 131 This method is not an authorization call. It basically 132 checks if the API token was provided and makes an API 133 call to Telegram and evaluates the status of the call. 134 135 """ 136 137 _LOGGER.debug(_("Connecting to Telegram.")) 138 self.session = aiohttp.ClientSession() 139 140 resp = await self.session.get(self.build_url("getMe")) 141 142 if resp.status != 200: 143 _LOGGER.error(_("Unable to connect.")) 144 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text) 145 else: 146 json = await resp.json() 147 _LOGGER.debug(json) 148 _LOGGER.debug(_("Connected to Telegram as %s."), json["result"]["username"]) 149 150 async def _parse_message(self, response): 151 """Handle logic to parse a received message. 152 153 Since everyone can send a private message to any user/bot 154 in Telegram, this method allows to set a list of whitelisted 155 users that can interact with the bot. If any other user tries 156 to interact with the bot the command is not parsed and instead 157 the bot will inform that user that he is not allowed to talk 158 with the bot. 159 160 We also set self.latest_update to +1 in order to get the next 161 available message (or an empty {} if no message has been received 162 yet) with the method self._get_messages(). 163 164 Args: 165 response (dict): Response returned by aiohttp.ClientSession. 166 167 """ 168 for result in response["result"]: 169 _LOGGER.debug(result) 170 171 if result.get("edited_message", None): 172 result["message"] = result.pop("edited_message") 173 if result.get("channel_post", None) or result.get( 174 "edited_channel_post", None 175 ): 176 self.latest_update = result["update_id"] + 1 177 _LOGGER.debug( 178 _("Channel message parsing not supported " "- Ignoring message.") 179 ) 180 elif "message" in result and "text" in result["message"]: 181 user, user_id = self.get_user(result) 182 message = Message( 183 text=result["message"]["text"], 184 user=user, 185 user_id=user_id, 186 target=result["message"]["chat"]["id"], 187 connector=self, 188 ) 189 190 if self.handle_user_permission(result, user): 191 await self.opsdroid.parse(message) 192 else: 193 message.text = ( 194 "Sorry, you're not allowed " "to speak with this bot." 195 ) 196 await self.send(message) 197 self.latest_update = result["update_id"] + 1 198 elif ( 199 "message" in result 200 and "sticker" in result["message"] 201 and "emoji" in result["message"]["sticker"] 202 ): 203 self.latest_update = result["update_id"] + 1 204 _LOGGER.debug( 205 _("Emoji message parsing not supported - Ignoring message.") 206 ) 207 else: 208 _LOGGER.error(_("Unable to parse the message.")) 209 210 async def _get_messages(self): 211 """Connect to the Telegram API. 212 213 Uses an aiohttp ClientSession to connect to Telegram API 214 and get the latest messages from the chat service. 215 216 The data["offset"] is used to consume every new message, the API 217 returns an int - "update_id" value. In order to get the next 218 message this value needs to be increased by 1 the next time 219 the API is called. If no new messages exists the API will just 220 return an empty {}. 221 222 """ 223 data = {} 224 if self.latest_update is not None: 225 data["offset"] = self.latest_update 226 227 await asyncio.sleep(self.update_interval) 228 resp = await self.session.get(self.build_url("getUpdates"), params=data) 229 230 if resp.status == 409: 231 _LOGGER.info( 232 _( 233 "Can't get updates because previous webhook is still active. Will try to delete webhook." 234 ) 235 ) 236 await self.delete_webhook() 237 238 if resp.status != 200: 239 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text) 240 self.listening = False 241 else: 242 json = await resp.json() 243 244 await self._parse_message(json) 245 246 async def get_messages_loop(self): 247 """Listen for and parse new messages. 248 249 The bot will always listen to all opened chat windows, 250 as long as opsdroid is running. Since anyone can start 251 a new chat with the bot is recommended that a list of 252 users to be whitelisted be provided in config.yaml. 253 254 The method will sleep asynchronously at the end of 255 every loop. The time can either be specified in the 256 config.yaml with the param update-interval - this 257 defaults to 1 second. 258 259 """ 260 while self.listening: 261 await self._get_messages() 262 263 async def listen(self): 264 """Listen method of the connector. 265 266 Every connector has to implement the listen method. When an 267 infinite loop is running, it becomes hard to cancel this task. 268 So we are creating a task and set it on a variable so we can 269 cancel the task. 270 271 """ 272 message_getter = self.loop.create_task(await self.get_messages_loop()) 273 await self._closing.wait() 274 message_getter.cancel() 275 276 @register_event(Message) 277 async def send_message(self, message): 278 """Respond with a message. 279 280 Args: 281 message (object): An instance of Message. 282 283 """ 284 _LOGGER.debug( 285 _("Responding with: '%s' at target: '%s'"), message.text, message.target 286 ) 287 288 data = dict() 289 data["chat_id"] = message.target 290 data["text"] = message.text 291 resp = await self.session.post(self.build_url("sendMessage"), data=data) 292 if resp.status == 200: 293 _LOGGER.debug(_("Successfully responded.")) 294 else: 295 _LOGGER.error(_("Unable to respond.")) 296 297 @register_event(Image) 298 async def send_image(self, file_event): 299 """Send Image to Telegram. 300 301 Gets the chat id from the channel and then 302 sends the bytes of the image as multipart/form-data. 303 304 """ 305 data = aiohttp.FormData() 306 data.add_field( 307 "chat_id", str(file_event.target["id"]), content_type="multipart/form-data" 308 ) 309 data.add_field( 310 "photo", 311 await file_event.get_file_bytes(), 312 content_type="multipart/form-data", 313 ) 314 315 resp = await self.session.post(self.build_url("sendPhoto"), data=data) 316 if resp.status == 200: 317 _LOGGER.debug(_("Sent %s image successfully."), file_event.name) 318 else: 319 _LOGGER.debug(_("Unable to send image - Status Code %s."), resp.status) 320 321 async def disconnect(self): 322 """Disconnect from Telegram. 323 324 Stops the infinite loop found in self._listen(), closes 325 aiohttp session. 326 327 """ 328 self.listening = False 329 self._closing.set() 330 await self.session.close() 331 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py --- a/opsdroid/connector/telegram/__init__.py +++ b/opsdroid/connector/telegram/__init__.py @@ -5,7 +5,7 @@ from voluptuous import Required from opsdroid.connector import Connector, register_event -from opsdroid.events import Message, Image +from opsdroid.events import Message, Image, File _LOGGER = logging.getLogger(__name__) @@ -318,6 +318,30 @@ else: _LOGGER.debug(_("Unable to send image - Status Code %s."), resp.status) + @register_event(File) + async def send_file(self, file_event): + """Send File to Telegram. + + Gets the chat id from the channel and then + sends the bytes of the file as multipart/form-data. + + """ + data = aiohttp.FormData() + data.add_field( + "chat_id", str(file_event.target["id"]), content_type="multipart/form-data" + ) + data.add_field( + "document", + await file_event.get_file_bytes(), + content_type="multipart/form-data", + ) + + resp = await self.session.post(self.build_url("sendDocument"), data=data) + if resp.status == 200: + _LOGGER.debug(_("Sent %s file successfully."), file_event.name) + else: + _LOGGER.debug(_("Unable to send file - Status Code %s."), resp.status) + async def disconnect(self): """Disconnect from Telegram.
{"golden_diff": "diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py\n--- a/opsdroid/connector/telegram/__init__.py\n+++ b/opsdroid/connector/telegram/__init__.py\n@@ -5,7 +5,7 @@\n from voluptuous import Required\n \n from opsdroid.connector import Connector, register_event\n-from opsdroid.events import Message, Image\n+from opsdroid.events import Message, Image, File\n \n \n _LOGGER = logging.getLogger(__name__)\n@@ -318,6 +318,30 @@\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n \n+ @register_event(File)\n+ async def send_file(self, file_event):\n+ \"\"\"Send File to Telegram.\n+\n+ Gets the chat id from the channel and then\n+ sends the bytes of the file as multipart/form-data.\n+\n+ \"\"\"\n+ data = aiohttp.FormData()\n+ data.add_field(\n+ \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n+ )\n+ data.add_field(\n+ \"document\",\n+ await file_event.get_file_bytes(),\n+ content_type=\"multipart/form-data\",\n+ )\n+\n+ resp = await self.session.post(self.build_url(\"sendDocument\"), data=data)\n+ if resp.status == 200:\n+ _LOGGER.debug(_(\"Sent %s file successfully.\"), file_event.name)\n+ else:\n+ _LOGGER.debug(_(\"Unable to send file - Status Code %s.\"), resp.status)\n+\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n", "issue": "Telegram connector can not handle the File event type\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description\r\nTelegram connector does not handle File events.\r\n\r\n## Steps to Reproduce\r\n```\r\n@match_regex(r'Please, send me a pdf file')\r\n async def sendDocument(self, message):\r\n path= '/file.pdf' \r\n\r\n with open(path, 'rb') as file:\r\n fileEvent = File(file_bytes= file, mimetype= 'application/pdf', target= {'id': message.target})\r\n await message.respond( fileEvent )\r\n\r\n```\r\n\r\n## Expected Functionality\r\nThe connector should handle the File event type.\r\n\r\n\r\n## Experienced Functionality\r\nYou get the following error\r\n\r\n> `TypeError: Connector <class 'opsdroid.connector.telegram.ConnectorTelegram'> can not handle the 'File' event type.`\r\n\r\n## Versions\r\n- **Opsdroid version:** v0.17.1\r\n- **Python version:** 3.7\r\n- **OS/Docker version:** Fedora 31\r\n\r\n## Configuration File\r\nNot relevant, I think\r\n\r\n## Additional Details\r\nThe solution should be quite similar to [Adds Image support for Telegram #929](https://github.com/opsdroid/opsdroid/pull/929)\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Telegram.\"\"\"\nimport asyncio\nimport logging\nimport aiohttp\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Image\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"update-interval\": float,\n \"default-user\": str,\n \"whitelisted-users\": list,\n}\n\n\nclass ConnectorTelegram(Connector):\n \"\"\"A connector the the char service Telegram.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\n\n Args:\n config (dict): configuration settings from the\n file config.yaml.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n _LOGGER.debug(_(\"Loaded Telegram Connector\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n self.default_target = self.default_user\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n try:\n self.token = config[\"token\"]\n except (KeyError, AttributeError):\n _LOGGER.error(\n _(\n \"Unable to login: Access token is missing. Telegram connector will be unavailable.\"\n )\n )\n\n @staticmethod\n def get_user(response):\n \"\"\"Get user from response.\n\n The API response is different depending on how\n the bot is set up and where the message is coming\n from. This method was created to keep if/else\n statements to a minium on _parse_message.\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n user = None\n user_id = None\n\n if \"username\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"username\"]\n\n elif \"first_name\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"first_name\"]\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n return user, user_id\n\n def handle_user_permission(self, response, user):\n \"\"\"Handle user permissions.\n\n This will check if the user that tried to talk with\n the bot is allowed to do so. It will also work with\n userid to improve security.\n\n \"\"\"\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n if (\n not self.whitelisted_users\n or user in self.whitelisted_users\n or user_id in self.whitelisted_users\n ):\n return True\n\n return False\n\n def build_url(self, method):\n \"\"\"Build the url to connect to the API.\n\n Args:\n method (string): API call end point.\n\n Return:\n String that represents the full API url.\n\n \"\"\"\n return \"https://api.telegram.org/bot{}/{}\".format(self.token, method)\n\n async def delete_webhook(self):\n \"\"\"Delete Telegram webhook.\n\n The Telegram api will thrown an 409 error when an webhook is\n active and a call to getUpdates is made. This method will\n try to request the deletion of the webhook to make the getUpdate\n request possible.\n\n \"\"\"\n _LOGGER.debug(_(\"Sending deleteWebhook request to Telegram...\"))\n resp = await self.session.get(self.build_url(\"deleteWebhook\"))\n\n if resp.status == 200:\n _LOGGER.debug(_(\"Telegram webhook deleted successfully.\"))\n else:\n _LOGGER.debug(_(\"Unable to delete webhook.\"))\n\n async def connect(self):\n \"\"\"Connect to Telegram.\n\n This method is not an authorization call. It basically\n checks if the API token was provided and makes an API\n call to Telegram and evaluates the status of the call.\n\n \"\"\"\n\n _LOGGER.debug(_(\"Connecting to Telegram.\"))\n self.session = aiohttp.ClientSession()\n\n resp = await self.session.get(self.build_url(\"getMe\"))\n\n if resp.status != 200:\n _LOGGER.error(_(\"Unable to connect.\"))\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n else:\n json = await resp.json()\n _LOGGER.debug(json)\n _LOGGER.debug(_(\"Connected to Telegram as %s.\"), json[\"result\"][\"username\"])\n\n async def _parse_message(self, response):\n \"\"\"Handle logic to parse a received message.\n\n Since everyone can send a private message to any user/bot\n in Telegram, this method allows to set a list of whitelisted\n users that can interact with the bot. If any other user tries\n to interact with the bot the command is not parsed and instead\n the bot will inform that user that he is not allowed to talk\n with the bot.\n\n We also set self.latest_update to +1 in order to get the next\n available message (or an empty {} if no message has been received\n yet) with the method self._get_messages().\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n if result.get(\"channel_post\", None) or result.get(\n \"edited_channel_post\", None\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n elif \"message\" in result and \"text\" in result[\"message\"]:\n user, user_id = self.get_user(result)\n message = Message(\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n target=result[\"message\"][\"chat\"][\"id\"],\n connector=self,\n )\n\n if self.handle_user_permission(result, user):\n await self.opsdroid.parse(message)\n else:\n message.text = (\n \"Sorry, you're not allowed \" \"to speak with this bot.\"\n )\n await self.send(message)\n self.latest_update = result[\"update_id\"] + 1\n elif (\n \"message\" in result\n and \"sticker\" in result[\"message\"]\n and \"emoji\" in result[\"message\"][\"sticker\"]\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Emoji message parsing not supported - Ignoring message.\")\n )\n else:\n _LOGGER.error(_(\"Unable to parse the message.\"))\n\n async def _get_messages(self):\n \"\"\"Connect to the Telegram API.\n\n Uses an aiohttp ClientSession to connect to Telegram API\n and get the latest messages from the chat service.\n\n The data[\"offset\"] is used to consume every new message, the API\n returns an int - \"update_id\" value. In order to get the next\n message this value needs to be increased by 1 the next time\n the API is called. If no new messages exists the API will just\n return an empty {}.\n\n \"\"\"\n data = {}\n if self.latest_update is not None:\n data[\"offset\"] = self.latest_update\n\n await asyncio.sleep(self.update_interval)\n resp = await self.session.get(self.build_url(\"getUpdates\"), params=data)\n\n if resp.status == 409:\n _LOGGER.info(\n _(\n \"Can't get updates because previous webhook is still active. Will try to delete webhook.\"\n )\n )\n await self.delete_webhook()\n\n if resp.status != 200:\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n self.listening = False\n else:\n json = await resp.json()\n\n await self._parse_message(json)\n\n async def get_messages_loop(self):\n \"\"\"Listen for and parse new messages.\n\n The bot will always listen to all opened chat windows,\n as long as opsdroid is running. Since anyone can start\n a new chat with the bot is recommended that a list of\n users to be whitelisted be provided in config.yaml.\n\n The method will sleep asynchronously at the end of\n every loop. The time can either be specified in the\n config.yaml with the param update-interval - this\n defaults to 1 second.\n\n \"\"\"\n while self.listening:\n await self._get_messages()\n\n async def listen(self):\n \"\"\"Listen method of the connector.\n\n Every connector has to implement the listen method. When an\n infinite loop is running, it becomes hard to cancel this task.\n So we are creating a task and set it on a variable so we can\n cancel the task.\n\n \"\"\"\n message_getter = self.loop.create_task(await self.get_messages_loop())\n await self._closing.wait()\n message_getter.cancel()\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message.\n\n \"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' at target: '%s'\"), message.text, message.target\n )\n\n data = dict()\n data[\"chat_id\"] = message.target\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Successfully responded.\"))\n else:\n _LOGGER.error(_(\"Unable to respond.\"))\n\n @register_event(Image)\n async def send_image(self, file_event):\n \"\"\"Send Image to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the image as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"photo\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendPhoto\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s image successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n\n Stops the infinite loop found in self._listen(), closes\n aiohttp session.\n\n \"\"\"\n self.listening = False\n self._closing.set()\n await self.session.close()\n", "path": "opsdroid/connector/telegram/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Telegram.\"\"\"\nimport asyncio\nimport logging\nimport aiohttp\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Image, File\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"update-interval\": float,\n \"default-user\": str,\n \"whitelisted-users\": list,\n}\n\n\nclass ConnectorTelegram(Connector):\n \"\"\"A connector the the char service Telegram.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\n\n Args:\n config (dict): configuration settings from the\n file config.yaml.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n _LOGGER.debug(_(\"Loaded Telegram Connector\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n self.default_target = self.default_user\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n try:\n self.token = config[\"token\"]\n except (KeyError, AttributeError):\n _LOGGER.error(\n _(\n \"Unable to login: Access token is missing. Telegram connector will be unavailable.\"\n )\n )\n\n @staticmethod\n def get_user(response):\n \"\"\"Get user from response.\n\n The API response is different depending on how\n the bot is set up and where the message is coming\n from. This method was created to keep if/else\n statements to a minium on _parse_message.\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n user = None\n user_id = None\n\n if \"username\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"username\"]\n\n elif \"first_name\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"first_name\"]\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n return user, user_id\n\n def handle_user_permission(self, response, user):\n \"\"\"Handle user permissions.\n\n This will check if the user that tried to talk with\n the bot is allowed to do so. It will also work with\n userid to improve security.\n\n \"\"\"\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n if (\n not self.whitelisted_users\n or user in self.whitelisted_users\n or user_id in self.whitelisted_users\n ):\n return True\n\n return False\n\n def build_url(self, method):\n \"\"\"Build the url to connect to the API.\n\n Args:\n method (string): API call end point.\n\n Return:\n String that represents the full API url.\n\n \"\"\"\n return \"https://api.telegram.org/bot{}/{}\".format(self.token, method)\n\n async def delete_webhook(self):\n \"\"\"Delete Telegram webhook.\n\n The Telegram api will thrown an 409 error when an webhook is\n active and a call to getUpdates is made. This method will\n try to request the deletion of the webhook to make the getUpdate\n request possible.\n\n \"\"\"\n _LOGGER.debug(_(\"Sending deleteWebhook request to Telegram...\"))\n resp = await self.session.get(self.build_url(\"deleteWebhook\"))\n\n if resp.status == 200:\n _LOGGER.debug(_(\"Telegram webhook deleted successfully.\"))\n else:\n _LOGGER.debug(_(\"Unable to delete webhook.\"))\n\n async def connect(self):\n \"\"\"Connect to Telegram.\n\n This method is not an authorization call. It basically\n checks if the API token was provided and makes an API\n call to Telegram and evaluates the status of the call.\n\n \"\"\"\n\n _LOGGER.debug(_(\"Connecting to Telegram.\"))\n self.session = aiohttp.ClientSession()\n\n resp = await self.session.get(self.build_url(\"getMe\"))\n\n if resp.status != 200:\n _LOGGER.error(_(\"Unable to connect.\"))\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n else:\n json = await resp.json()\n _LOGGER.debug(json)\n _LOGGER.debug(_(\"Connected to Telegram as %s.\"), json[\"result\"][\"username\"])\n\n async def _parse_message(self, response):\n \"\"\"Handle logic to parse a received message.\n\n Since everyone can send a private message to any user/bot\n in Telegram, this method allows to set a list of whitelisted\n users that can interact with the bot. If any other user tries\n to interact with the bot the command is not parsed and instead\n the bot will inform that user that he is not allowed to talk\n with the bot.\n\n We also set self.latest_update to +1 in order to get the next\n available message (or an empty {} if no message has been received\n yet) with the method self._get_messages().\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n if result.get(\"channel_post\", None) or result.get(\n \"edited_channel_post\", None\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n elif \"message\" in result and \"text\" in result[\"message\"]:\n user, user_id = self.get_user(result)\n message = Message(\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n target=result[\"message\"][\"chat\"][\"id\"],\n connector=self,\n )\n\n if self.handle_user_permission(result, user):\n await self.opsdroid.parse(message)\n else:\n message.text = (\n \"Sorry, you're not allowed \" \"to speak with this bot.\"\n )\n await self.send(message)\n self.latest_update = result[\"update_id\"] + 1\n elif (\n \"message\" in result\n and \"sticker\" in result[\"message\"]\n and \"emoji\" in result[\"message\"][\"sticker\"]\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Emoji message parsing not supported - Ignoring message.\")\n )\n else:\n _LOGGER.error(_(\"Unable to parse the message.\"))\n\n async def _get_messages(self):\n \"\"\"Connect to the Telegram API.\n\n Uses an aiohttp ClientSession to connect to Telegram API\n and get the latest messages from the chat service.\n\n The data[\"offset\"] is used to consume every new message, the API\n returns an int - \"update_id\" value. In order to get the next\n message this value needs to be increased by 1 the next time\n the API is called. If no new messages exists the API will just\n return an empty {}.\n\n \"\"\"\n data = {}\n if self.latest_update is not None:\n data[\"offset\"] = self.latest_update\n\n await asyncio.sleep(self.update_interval)\n resp = await self.session.get(self.build_url(\"getUpdates\"), params=data)\n\n if resp.status == 409:\n _LOGGER.info(\n _(\n \"Can't get updates because previous webhook is still active. Will try to delete webhook.\"\n )\n )\n await self.delete_webhook()\n\n if resp.status != 200:\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n self.listening = False\n else:\n json = await resp.json()\n\n await self._parse_message(json)\n\n async def get_messages_loop(self):\n \"\"\"Listen for and parse new messages.\n\n The bot will always listen to all opened chat windows,\n as long as opsdroid is running. Since anyone can start\n a new chat with the bot is recommended that a list of\n users to be whitelisted be provided in config.yaml.\n\n The method will sleep asynchronously at the end of\n every loop. The time can either be specified in the\n config.yaml with the param update-interval - this\n defaults to 1 second.\n\n \"\"\"\n while self.listening:\n await self._get_messages()\n\n async def listen(self):\n \"\"\"Listen method of the connector.\n\n Every connector has to implement the listen method. When an\n infinite loop is running, it becomes hard to cancel this task.\n So we are creating a task and set it on a variable so we can\n cancel the task.\n\n \"\"\"\n message_getter = self.loop.create_task(await self.get_messages_loop())\n await self._closing.wait()\n message_getter.cancel()\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message.\n\n \"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' at target: '%s'\"), message.text, message.target\n )\n\n data = dict()\n data[\"chat_id\"] = message.target\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Successfully responded.\"))\n else:\n _LOGGER.error(_(\"Unable to respond.\"))\n\n @register_event(Image)\n async def send_image(self, file_event):\n \"\"\"Send Image to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the image as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"photo\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendPhoto\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s image successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n\n @register_event(File)\n async def send_file(self, file_event):\n \"\"\"Send File to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the file as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"document\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendDocument\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s file successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send file - Status Code %s.\"), resp.status)\n\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n\n Stops the infinite loop found in self._listen(), closes\n aiohttp session.\n\n \"\"\"\n self.listening = False\n self._closing.set()\n await self.session.close()\n", "path": "opsdroid/connector/telegram/__init__.py"}]}
3,824
364
gh_patches_debug_31430
rasdani/github-patches
git_diff
chaoss__augur-2541
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue data cntrb_id null for closed issues Cntrb_id is stated to be "The ID of the person that closed the issue" and even for closed issues I am seeing nulls --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `augur/tasks/github/events/tasks.py` Content: ``` 1 import time 2 import logging 3 import traceback 4 5 from augur.tasks.init.celery_app import celery_app as celery 6 from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask 7 from augur.application.db.data_parse import * 8 from augur.tasks.github.util.github_paginator import GithubPaginator, hit_api 9 from augur.tasks.github.util.github_task_session import GithubTaskManifest 10 from augur.application.db.session import DatabaseSession 11 from augur.tasks.github.util.util import get_owner_repo 12 from augur.tasks.util.worker_util import remove_duplicate_dicts 13 from augur.application.db.models import PullRequest, Message, PullRequestReview, PullRequestLabel, PullRequestReviewer, PullRequestEvent, PullRequestMeta, PullRequestAssignee, PullRequestReviewMessageRef, Issue, IssueEvent, IssueLabel, IssueAssignee, PullRequestMessageRef, IssueMessageRef, Contributor, Repo 14 from augur.application.db.util import execute_session_query 15 16 platform_id = 1 17 18 @celery.task(base=AugurCoreRepoCollectionTask) 19 def collect_events(repo_git: str): 20 21 logger = logging.getLogger(collect_events.__name__) 22 23 with GithubTaskManifest(logger) as manifest: 24 25 augur_db = manifest.augur_db 26 27 try: 28 29 query = augur_db.session.query(Repo).filter(Repo.repo_git == repo_git) 30 repo_obj = execute_session_query(query, 'one') 31 repo_id = repo_obj.repo_id 32 33 owner, repo = get_owner_repo(repo_git) 34 35 logger.info(f"Collecting Github events for {owner}/{repo}") 36 37 url = f"https://api.github.com/repos/{owner}/{repo}/issues/events" 38 39 event_data = retrieve_all_event_data(repo_git, logger, manifest.key_auth) 40 41 if event_data: 42 43 process_events(event_data, f"{owner}/{repo}: Event task", repo_id, logger, manifest.augur_db) 44 45 else: 46 logger.info(f"{owner}/{repo} has no events") 47 except Exception as e: 48 logger.error(f"Could not collect events for {repo_git}\n Reason: {e} \n Traceback: {''.join(traceback.format_exception(None, e, e.__traceback__))}") 49 50 51 def retrieve_all_event_data(repo_git: str, logger, key_auth): 52 53 owner, repo = get_owner_repo(repo_git) 54 55 logger.info(f"Collecting Github events for {owner}/{repo}") 56 57 url = f"https://api.github.com/repos/{owner}/{repo}/issues/events" 58 59 # returns an iterable of all issues at this url (this essentially means you can treat the issues variable as a list of the issues) 60 events = GithubPaginator(url, key_auth, logger) 61 62 63 num_pages = events.get_num_pages() 64 all_data = [] 65 for page_data, page in events.iter_pages(): 66 67 if page_data is None: 68 return all_data 69 70 elif len(page_data) == 0: 71 logger.debug(f"{repo.capitalize()} Events Page {page} contains no data...returning") 72 logger.info(f"Events Page {page} of {num_pages}") 73 return all_data 74 75 logger.info(f"{repo} Events Page {page} of {num_pages}") 76 77 all_data += page_data 78 79 return all_data 80 81 def process_events(events, task_name, repo_id, logger, augur_db): 82 83 tool_source = "Github events task" 84 tool_version = "2.0" 85 data_source = "Github API" 86 87 pr_event_dicts = [] 88 issue_event_dicts = [] 89 contributors = [] 90 91 92 # create mapping from issue url to issue id of current issues 93 issue_url_to_id_map = {} 94 issues = augur_db.session.query(Issue).filter(Issue.repo_id == repo_id).all() 95 for issue in issues: 96 issue_url_to_id_map[issue.issue_url] = issue.issue_id 97 98 # create mapping from pr url to pr id of current pull requests 99 pr_url_to_id_map = {} 100 prs = augur_db.session.query(PullRequest).filter(PullRequest.repo_id == repo_id).all() 101 for pr in prs: 102 pr_url_to_id_map[pr.pr_url] = pr.pull_request_id 103 104 not_mapable_event_count = 0 105 event_len = len(events) 106 for event in events: 107 108 event, contributor = process_github_event_contributors(logger, event, tool_source, tool_version, data_source) 109 110 # event_mapping_data is the pr or issue data needed to relate the event to an issue or pr 111 event_mapping_data = event["issue"] 112 113 if event_mapping_data is None: 114 not_mapable_event_count += 1 115 continue 116 117 pull_request = event_mapping_data.get('pull_request', None) 118 if pull_request: 119 pr_url = pull_request["url"] 120 121 try: 122 pull_request_id = pr_url_to_id_map[pr_url] 123 124 # query = augur_db.session.query(PullRequest).filter(PullRequest.pr_url == pr_url) 125 # related_pr = execute_session_query(query, 'one') 126 except KeyError: 127 logger.info(f"{task_name}: Could not find related pr") 128 logger.info(f"{task_name}: We were searching for: {pr_url}") 129 logger.info(f"{task_name}: Skipping") 130 continue 131 132 pr_event_dicts.append( 133 extract_pr_event_data(event, pull_request_id, platform_id, repo_id, 134 tool_source, tool_version, data_source) 135 ) 136 137 else: 138 issue_url = event_mapping_data["url"] 139 140 try: 141 issue_id = issue_url_to_id_map[issue_url] 142 # query = augur_db.session.query(Issue).filter(Issue.issue_url == issue_url) 143 # related_issue = execute_session_query(query, 'one') 144 except KeyError: 145 logger.info(f"{task_name}: Could not find related pr") 146 logger.info(f"{task_name}: We were searching for: {issue_url}") 147 logger.info(f"{task_name}: Skipping") 148 continue 149 150 issue_event_dicts.append( 151 extract_issue_event_data(event, issue_id, platform_id, repo_id, 152 tool_source, tool_version, data_source) 153 ) 154 155 # add contributor to list after porcessing the event, 156 # so if it fails processing for some reason the contributor is not inserted 157 # NOTE: contributor is none when there is no contributor data on the event 158 if contributor: 159 contributors.append(contributor) 160 161 # remove contributors that were found in the data more than once 162 contributors = remove_duplicate_dicts(contributors) 163 164 augur_db.insert_data(contributors, Contributor, ["cntrb_id"]) 165 166 issue_events_len = len(issue_event_dicts) 167 pr_events_len = len(pr_event_dicts) 168 if event_len != (issue_events_len + pr_events_len): 169 170 unassigned_events = event_len - issue_events_len - pr_events_len 171 172 logger.error(f"{task_name}: {event_len} events were processed, but {pr_events_len} pr events were found and related to a pr, and {issue_events_len} issue events were found and related to an issue. {not_mapable_event_count} events were not related to a pr or issue due to the api returning insufficient data. For some reason {unassigned_events} events were not able to be processed even when the api returned sufficient data. This is usually because pull requests or issues have not been collected, and the events are skipped because they cannot be related to a pr or issue") 173 174 logger.info(f"{task_name}: Inserting {len(pr_event_dicts)} pr events and {len(issue_event_dicts)} issue events") 175 176 # TODO: Could replace this with "id" but it isn't stored on the table for some reason 177 pr_event_natural_keys = ["node_id"] 178 augur_db.insert_data(pr_event_dicts, PullRequestEvent, pr_event_natural_keys) 179 180 issue_event_natural_keys = ["issue_id", "issue_event_src_id"] 181 augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys) 182 183 184 # TODO: Should we skip an event if there is no contributor to resolve it o 185 def process_github_event_contributors(logger, event, tool_source, tool_version, data_source): 186 187 if event["actor"]: 188 189 event_cntrb = extract_needed_contributor_data(event["actor"], tool_source, tool_version, data_source) 190 event["cntrb_id"] = event_cntrb["cntrb_id"] 191 192 else: 193 event["cntrb_id"] = None 194 return event, None 195 196 return event, event_cntrb 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/augur/tasks/github/events/tasks.py b/augur/tasks/github/events/tasks.py --- a/augur/tasks/github/events/tasks.py +++ b/augur/tasks/github/events/tasks.py @@ -1,6 +1,7 @@ import time import logging import traceback +import sqlalchemy as s from augur.tasks.init.celery_app import celery_app as celery from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask @@ -180,6 +181,7 @@ issue_event_natural_keys = ["issue_id", "issue_event_src_id"] augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys) + update_issue_closed_cntrbs_from_events(augur_db.engine, repo_id) # TODO: Should we skip an event if there is no contributor to resolve it o def process_github_event_contributors(logger, event, tool_source, tool_version, data_source): @@ -194,3 +196,30 @@ return event, None return event, event_cntrb + + +def update_issue_closed_cntrbs_from_events(engine, repo_id): + + get_ranked_issues = s.text(f""" + WITH RankedIssues AS ( + SELECT repo_id, issue_id, cntrb_id, + ROW_NUMBER() OVER(PARTITION BY issue_id ORDER BY created_at DESC) AS rn + FROM issue_events + WHERE "action" = 'closed' + ) + + SELECT issue_id, cntrb_id from RankedIssues where rn=1 and repo_id={repo_id} + """) + result = engine.execute(get_ranked_issues).fetchall() + + update_data = [{'issue_id': row['issue_id'], 'cntrb_id': row['cntrb_id'], 'repo_id': repo_id} for row in result] + with engine.connect() as connection: + update_stmt = s.text(""" + UPDATE issues + SET cntrb_id = :cntrb_id + WHERE issue_id = :issue_id + AND repo_id = :repo_id + """) + connection.execute(update_stmt, update_data) + +
{"golden_diff": "diff --git a/augur/tasks/github/events/tasks.py b/augur/tasks/github/events/tasks.py\n--- a/augur/tasks/github/events/tasks.py\n+++ b/augur/tasks/github/events/tasks.py\n@@ -1,6 +1,7 @@\n import time\n import logging\n import traceback\n+import sqlalchemy as s\n \n from augur.tasks.init.celery_app import celery_app as celery\n from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask\n@@ -180,6 +181,7 @@\n issue_event_natural_keys = [\"issue_id\", \"issue_event_src_id\"]\n augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)\n \n+ update_issue_closed_cntrbs_from_events(augur_db.engine, repo_id)\n \n # TODO: Should we skip an event if there is no contributor to resolve it o\n def process_github_event_contributors(logger, event, tool_source, tool_version, data_source):\n@@ -194,3 +196,30 @@\n return event, None\n \n return event, event_cntrb\n+\n+\n+def update_issue_closed_cntrbs_from_events(engine, repo_id):\n+\n+ get_ranked_issues = s.text(f\"\"\"\n+ WITH RankedIssues AS (\n+ SELECT repo_id, issue_id, cntrb_id, \n+ ROW_NUMBER() OVER(PARTITION BY issue_id ORDER BY created_at DESC) AS rn\n+ FROM issue_events \n+ WHERE \"action\" = 'closed'\n+ )\n+ \n+ SELECT issue_id, cntrb_id from RankedIssues where rn=1 and repo_id={repo_id}\n+ \"\"\")\n+ result = engine.execute(get_ranked_issues).fetchall()\n+\n+ update_data = [{'issue_id': row['issue_id'], 'cntrb_id': row['cntrb_id'], 'repo_id': repo_id} for row in result]\n+ with engine.connect() as connection:\n+ update_stmt = s.text(\"\"\"\n+ UPDATE issues\n+ SET cntrb_id = :cntrb_id\n+ WHERE issue_id = :issue_id\n+ AND repo_id = :repo_id\n+ \"\"\")\n+ connection.execute(update_stmt, update_data)\n+\n+\n", "issue": "Issue data cntrb_id null for closed issues \nCntrb_id is stated to be \"The ID of the person that closed the issue\" and even for closed issues I am seeing nulls \n", "before_files": [{"content": "import time\nimport logging\nimport traceback\n\nfrom augur.tasks.init.celery_app import celery_app as celery\nfrom augur.tasks.init.celery_app import AugurCoreRepoCollectionTask\nfrom augur.application.db.data_parse import *\nfrom augur.tasks.github.util.github_paginator import GithubPaginator, hit_api\nfrom augur.tasks.github.util.github_task_session import GithubTaskManifest\nfrom augur.application.db.session import DatabaseSession\nfrom augur.tasks.github.util.util import get_owner_repo\nfrom augur.tasks.util.worker_util import remove_duplicate_dicts\nfrom augur.application.db.models import PullRequest, Message, PullRequestReview, PullRequestLabel, PullRequestReviewer, PullRequestEvent, PullRequestMeta, PullRequestAssignee, PullRequestReviewMessageRef, Issue, IssueEvent, IssueLabel, IssueAssignee, PullRequestMessageRef, IssueMessageRef, Contributor, Repo\nfrom augur.application.db.util import execute_session_query\n\nplatform_id = 1\n\[email protected](base=AugurCoreRepoCollectionTask)\ndef collect_events(repo_git: str):\n\n logger = logging.getLogger(collect_events.__name__)\n \n with GithubTaskManifest(logger) as manifest:\n\n augur_db = manifest.augur_db\n\n try:\n \n query = augur_db.session.query(Repo).filter(Repo.repo_git == repo_git)\n repo_obj = execute_session_query(query, 'one')\n repo_id = repo_obj.repo_id\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n\n event_data = retrieve_all_event_data(repo_git, logger, manifest.key_auth)\n\n if event_data:\n \n process_events(event_data, f\"{owner}/{repo}: Event task\", repo_id, logger, manifest.augur_db)\n\n else:\n logger.info(f\"{owner}/{repo} has no events\")\n except Exception as e:\n logger.error(f\"Could not collect events for {repo_git}\\n Reason: {e} \\n Traceback: {''.join(traceback.format_exception(None, e, e.__traceback__))}\")\n\n\ndef retrieve_all_event_data(repo_git: str, logger, key_auth):\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n \n # returns an iterable of all issues at this url (this essentially means you can treat the issues variable as a list of the issues)\n events = GithubPaginator(url, key_auth, logger)\n\n\n num_pages = events.get_num_pages()\n all_data = []\n for page_data, page in events.iter_pages():\n\n if page_data is None:\n return all_data\n \n elif len(page_data) == 0:\n logger.debug(f\"{repo.capitalize()} Events Page {page} contains no data...returning\")\n logger.info(f\"Events Page {page} of {num_pages}\")\n return all_data\n\n logger.info(f\"{repo} Events Page {page} of {num_pages}\")\n\n all_data += page_data\n\n return all_data \n\ndef process_events(events, task_name, repo_id, logger, augur_db):\n \n tool_source = \"Github events task\"\n tool_version = \"2.0\"\n data_source = \"Github API\"\n \n pr_event_dicts = []\n issue_event_dicts = []\n contributors = []\n\n\n # create mapping from issue url to issue id of current issues\n issue_url_to_id_map = {}\n issues = augur_db.session.query(Issue).filter(Issue.repo_id == repo_id).all()\n for issue in issues:\n issue_url_to_id_map[issue.issue_url] = issue.issue_id\n\n # create mapping from pr url to pr id of current pull requests\n pr_url_to_id_map = {}\n prs = augur_db.session.query(PullRequest).filter(PullRequest.repo_id == repo_id).all()\n for pr in prs:\n pr_url_to_id_map[pr.pr_url] = pr.pull_request_id\n\n not_mapable_event_count = 0\n event_len = len(events)\n for event in events:\n\n event, contributor = process_github_event_contributors(logger, event, tool_source, tool_version, data_source)\n\n # event_mapping_data is the pr or issue data needed to relate the event to an issue or pr\n event_mapping_data = event[\"issue\"]\n\n if event_mapping_data is None:\n not_mapable_event_count += 1\n continue\n\n pull_request = event_mapping_data.get('pull_request', None)\n if pull_request:\n pr_url = pull_request[\"url\"]\n\n try:\n pull_request_id = pr_url_to_id_map[pr_url]\n\n # query = augur_db.session.query(PullRequest).filter(PullRequest.pr_url == pr_url)\n # related_pr = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {pr_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n pr_event_dicts.append(\n extract_pr_event_data(event, pull_request_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n\n else:\n issue_url = event_mapping_data[\"url\"]\n\n try:\n issue_id = issue_url_to_id_map[issue_url]\n # query = augur_db.session.query(Issue).filter(Issue.issue_url == issue_url)\n # related_issue = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {issue_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n issue_event_dicts.append(\n extract_issue_event_data(event, issue_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n \n # add contributor to list after porcessing the event, \n # so if it fails processing for some reason the contributor is not inserted\n # NOTE: contributor is none when there is no contributor data on the event\n if contributor:\n contributors.append(contributor)\n\n # remove contributors that were found in the data more than once\n contributors = remove_duplicate_dicts(contributors)\n\n augur_db.insert_data(contributors, Contributor, [\"cntrb_id\"])\n\n issue_events_len = len(issue_event_dicts)\n pr_events_len = len(pr_event_dicts)\n if event_len != (issue_events_len + pr_events_len):\n\n unassigned_events = event_len - issue_events_len - pr_events_len\n\n logger.error(f\"{task_name}: {event_len} events were processed, but {pr_events_len} pr events were found and related to a pr, and {issue_events_len} issue events were found and related to an issue. {not_mapable_event_count} events were not related to a pr or issue due to the api returning insufficient data. For some reason {unassigned_events} events were not able to be processed even when the api returned sufficient data. This is usually because pull requests or issues have not been collected, and the events are skipped because they cannot be related to a pr or issue\")\n\n logger.info(f\"{task_name}: Inserting {len(pr_event_dicts)} pr events and {len(issue_event_dicts)} issue events\")\n\n # TODO: Could replace this with \"id\" but it isn't stored on the table for some reason\n pr_event_natural_keys = [\"node_id\"]\n augur_db.insert_data(pr_event_dicts, PullRequestEvent, pr_event_natural_keys)\n\n issue_event_natural_keys = [\"issue_id\", \"issue_event_src_id\"]\n augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)\n\n\n# TODO: Should we skip an event if there is no contributor to resolve it o\ndef process_github_event_contributors(logger, event, tool_source, tool_version, data_source):\n\n if event[\"actor\"]:\n\n event_cntrb = extract_needed_contributor_data(event[\"actor\"], tool_source, tool_version, data_source)\n event[\"cntrb_id\"] = event_cntrb[\"cntrb_id\"]\n\n else:\n event[\"cntrb_id\"] = None\n return event, None\n \n return event, event_cntrb\n", "path": "augur/tasks/github/events/tasks.py"}], "after_files": [{"content": "import time\nimport logging\nimport traceback\nimport sqlalchemy as s\n\nfrom augur.tasks.init.celery_app import celery_app as celery\nfrom augur.tasks.init.celery_app import AugurCoreRepoCollectionTask\nfrom augur.application.db.data_parse import *\nfrom augur.tasks.github.util.github_paginator import GithubPaginator, hit_api\nfrom augur.tasks.github.util.github_task_session import GithubTaskManifest\nfrom augur.application.db.session import DatabaseSession\nfrom augur.tasks.github.util.util import get_owner_repo\nfrom augur.tasks.util.worker_util import remove_duplicate_dicts\nfrom augur.application.db.models import PullRequest, Message, PullRequestReview, PullRequestLabel, PullRequestReviewer, PullRequestEvent, PullRequestMeta, PullRequestAssignee, PullRequestReviewMessageRef, Issue, IssueEvent, IssueLabel, IssueAssignee, PullRequestMessageRef, IssueMessageRef, Contributor, Repo\nfrom augur.application.db.util import execute_session_query\n\nplatform_id = 1\n\[email protected](base=AugurCoreRepoCollectionTask)\ndef collect_events(repo_git: str):\n\n logger = logging.getLogger(collect_events.__name__)\n \n with GithubTaskManifest(logger) as manifest:\n\n augur_db = manifest.augur_db\n\n try:\n \n query = augur_db.session.query(Repo).filter(Repo.repo_git == repo_git)\n repo_obj = execute_session_query(query, 'one')\n repo_id = repo_obj.repo_id\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n\n event_data = retrieve_all_event_data(repo_git, logger, manifest.key_auth)\n\n if event_data:\n \n process_events(event_data, f\"{owner}/{repo}: Event task\", repo_id, logger, manifest.augur_db)\n\n else:\n logger.info(f\"{owner}/{repo} has no events\")\n except Exception as e:\n logger.error(f\"Could not collect events for {repo_git}\\n Reason: {e} \\n Traceback: {''.join(traceback.format_exception(None, e, e.__traceback__))}\")\n\n\ndef retrieve_all_event_data(repo_git: str, logger, key_auth):\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n \n # returns an iterable of all issues at this url (this essentially means you can treat the issues variable as a list of the issues)\n events = GithubPaginator(url, key_auth, logger)\n\n\n num_pages = events.get_num_pages()\n all_data = []\n for page_data, page in events.iter_pages():\n\n if page_data is None:\n return all_data\n \n elif len(page_data) == 0:\n logger.debug(f\"{repo.capitalize()} Events Page {page} contains no data...returning\")\n logger.info(f\"Events Page {page} of {num_pages}\")\n return all_data\n\n logger.info(f\"{repo} Events Page {page} of {num_pages}\")\n\n all_data += page_data\n\n return all_data \n\ndef process_events(events, task_name, repo_id, logger, augur_db):\n \n tool_source = \"Github events task\"\n tool_version = \"2.0\"\n data_source = \"Github API\"\n \n pr_event_dicts = []\n issue_event_dicts = []\n contributors = []\n\n\n # create mapping from issue url to issue id of current issues\n issue_url_to_id_map = {}\n issues = augur_db.session.query(Issue).filter(Issue.repo_id == repo_id).all()\n for issue in issues:\n issue_url_to_id_map[issue.issue_url] = issue.issue_id\n\n # create mapping from pr url to pr id of current pull requests\n pr_url_to_id_map = {}\n prs = augur_db.session.query(PullRequest).filter(PullRequest.repo_id == repo_id).all()\n for pr in prs:\n pr_url_to_id_map[pr.pr_url] = pr.pull_request_id\n\n not_mapable_event_count = 0\n event_len = len(events)\n for event in events:\n\n event, contributor = process_github_event_contributors(logger, event, tool_source, tool_version, data_source)\n\n # event_mapping_data is the pr or issue data needed to relate the event to an issue or pr\n event_mapping_data = event[\"issue\"]\n\n if event_mapping_data is None:\n not_mapable_event_count += 1\n continue\n\n pull_request = event_mapping_data.get('pull_request', None)\n if pull_request:\n pr_url = pull_request[\"url\"]\n\n try:\n pull_request_id = pr_url_to_id_map[pr_url]\n\n # query = augur_db.session.query(PullRequest).filter(PullRequest.pr_url == pr_url)\n # related_pr = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {pr_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n pr_event_dicts.append(\n extract_pr_event_data(event, pull_request_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n\n else:\n issue_url = event_mapping_data[\"url\"]\n\n try:\n issue_id = issue_url_to_id_map[issue_url]\n # query = augur_db.session.query(Issue).filter(Issue.issue_url == issue_url)\n # related_issue = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {issue_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n issue_event_dicts.append(\n extract_issue_event_data(event, issue_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n \n # add contributor to list after porcessing the event, \n # so if it fails processing for some reason the contributor is not inserted\n # NOTE: contributor is none when there is no contributor data on the event\n if contributor:\n contributors.append(contributor)\n\n # remove contributors that were found in the data more than once\n contributors = remove_duplicate_dicts(contributors)\n\n augur_db.insert_data(contributors, Contributor, [\"cntrb_id\"])\n\n issue_events_len = len(issue_event_dicts)\n pr_events_len = len(pr_event_dicts)\n if event_len != (issue_events_len + pr_events_len):\n\n unassigned_events = event_len - issue_events_len - pr_events_len\n\n logger.error(f\"{task_name}: {event_len} events were processed, but {pr_events_len} pr events were found and related to a pr, and {issue_events_len} issue events were found and related to an issue. {not_mapable_event_count} events were not related to a pr or issue due to the api returning insufficient data. For some reason {unassigned_events} events were not able to be processed even when the api returned sufficient data. This is usually because pull requests or issues have not been collected, and the events are skipped because they cannot be related to a pr or issue\")\n\n logger.info(f\"{task_name}: Inserting {len(pr_event_dicts)} pr events and {len(issue_event_dicts)} issue events\")\n\n # TODO: Could replace this with \"id\" but it isn't stored on the table for some reason\n pr_event_natural_keys = [\"node_id\"]\n augur_db.insert_data(pr_event_dicts, PullRequestEvent, pr_event_natural_keys)\n\n issue_event_natural_keys = [\"issue_id\", \"issue_event_src_id\"]\n augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)\n\n update_issue_closed_cntrbs_from_events(augur_db.engine, repo_id)\n\n# TODO: Should we skip an event if there is no contributor to resolve it o\ndef process_github_event_contributors(logger, event, tool_source, tool_version, data_source):\n\n if event[\"actor\"]:\n\n event_cntrb = extract_needed_contributor_data(event[\"actor\"], tool_source, tool_version, data_source)\n event[\"cntrb_id\"] = event_cntrb[\"cntrb_id\"]\n\n else:\n event[\"cntrb_id\"] = None\n return event, None\n \n return event, event_cntrb\n\n\ndef update_issue_closed_cntrbs_from_events(engine, repo_id):\n\n get_ranked_issues = s.text(f\"\"\"\n WITH RankedIssues AS (\n SELECT repo_id, issue_id, cntrb_id, \n ROW_NUMBER() OVER(PARTITION BY issue_id ORDER BY created_at DESC) AS rn\n FROM issue_events \n WHERE \"action\" = 'closed'\n )\n \n SELECT issue_id, cntrb_id from RankedIssues where rn=1 and repo_id={repo_id}\n \"\"\")\n result = engine.execute(get_ranked_issues).fetchall()\n\n update_data = [{'issue_id': row['issue_id'], 'cntrb_id': row['cntrb_id'], 'repo_id': repo_id} for row in result]\n with engine.connect() as connection:\n update_stmt = s.text(\"\"\"\n UPDATE issues\n SET cntrb_id = :cntrb_id\n WHERE issue_id = :issue_id\n AND repo_id = :repo_id\n \"\"\")\n connection.execute(update_stmt, update_data)\n\n\n", "path": "augur/tasks/github/events/tasks.py"}]}
2,626
487
gh_patches_debug_15882
rasdani/github-patches
git_diff
beeware__toga-850
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Table rows not rendering correctly in `update_data` winforms ## Expected Behavior View text in the rows ## Current Behavior Rows are created but text is not displayed. ![image](https://user-images.githubusercontent.com/479689/78953344-20995d80-7b0b-11ea-80da-889b488372be.png) When I insert a new row (with insert button) is working ok: ![image](https://user-images.githubusercontent.com/479689/78953506-a3221d00-7b0b-11ea-8ec5-b1429da77e54.png) In found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok. The specific line is the change made from this one: ``` item._impl = WinForms.ListViewItem([ str(getattr(item, attr)) for attr in self.interface._accessors ]) ``` to this: ``` item._impl = WinForms.ListViewItem(self.row_data(item)) ``` ## Steps to reproduce 1. Open example and that's it ## Your Environment * Python Version (list the specific version number) 3.7.7 * Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well) - [ ] macOS - version: - [ ] Linux - distro: - version: - [x] Windows - version: 10 Pro - [ ] Other - name: - version: * Toga Version (list the specific version number or git hash) Master branch of toga. * Toga Target (the type of app you are trying to generate) - [ ] android - [ ] cocoa - [ ] django - [ ] gtk - [ ] iOS - [ ] tvOS - [ ] watchOS - [x] winforms - [ ] win32 - [ ] Other (please specify) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/winforms/toga_winforms/widgets/table.py` Content: ``` 1 from travertino.size import at_least 2 3 from toga_winforms.libs import WinForms 4 5 from .base import Widget 6 7 8 class Table(Widget): 9 def create(self): 10 self._container = self 11 self.native = WinForms.ListView() 12 self.native.View = WinForms.View.Details 13 14 dataColumn = [] 15 for heading in self.interface.headings: 16 col = WinForms.ColumnHeader() 17 col.Text = heading 18 dataColumn.append(col) 19 20 self.native.FullRowSelect = True 21 self.native.Multiselect = self.interface.multiple_select 22 self.native.DoubleBuffered = True 23 self.native.Columns.AddRange(dataColumn) 24 25 def change_source(self, source): 26 self.update_data() 27 28 def row_data(self, item): 29 # TODO: Winforms can't support icons in tree cells; so, if the data source 30 # specifies an icon, strip it when converting to row data. 31 def strip_icon(item, attr): 32 val = getattr(item, attr) 33 if isinstance(val, tuple): 34 return str(val[1]) 35 return str(val) 36 37 return [item] + [ 38 strip_icon(item, attr) 39 for attr in self.interface._accessors 40 ] 41 42 def update_data(self): 43 self.native.BeginUpdate() 44 self.native.Items.Clear() 45 items = [] 46 for item in self.interface.data: 47 item._impl = WinForms.ListViewItem(self.row_data(item)) 48 items.append(item._impl) 49 self.native.Items.AddRange(items) 50 self.native.EndUpdate() 51 52 def insert(self, index, item): 53 self.native.BeginUpdate() 54 item._impl = WinForms.ListViewItem([ 55 str(getattr(item, attr)) for attr in self.interface._accessors 56 ]) 57 self.native.Items.Insert(index, item._impl) 58 self.native.EndUpdate() 59 60 def change(self, item): 61 self.interface.factory.not_implemented('Table.change()') 62 63 def remove(self, item): 64 self.update_data() 65 66 def clear(self): 67 self.native.Items.Clear() 68 69 def set_on_select(self, handler): 70 self.interface.factory.not_implemented('Table.set_on_select()') 71 72 def scroll_to_row(self, row): 73 self.native.EnsureVisible(row) 74 self.interface.factory.not_implemented('Table.scroll_to_row()') 75 76 def rehint(self): 77 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH) 78 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT) 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py --- a/src/winforms/toga_winforms/widgets/table.py +++ b/src/winforms/toga_winforms/widgets/table.py @@ -34,7 +34,7 @@ return str(val[1]) return str(val) - return [item] + [ + return [ strip_icon(item, attr) for attr in self.interface._accessors ] @@ -51,9 +51,7 @@ def insert(self, index, item): self.native.BeginUpdate() - item._impl = WinForms.ListViewItem([ - str(getattr(item, attr)) for attr in self.interface._accessors - ]) + item._impl = WinForms.ListViewItem(self.row_data(item)) self.native.Items.Insert(index, item._impl) self.native.EndUpdate()
{"golden_diff": "diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py\n--- a/src/winforms/toga_winforms/widgets/table.py\n+++ b/src/winforms/toga_winforms/widgets/table.py\n@@ -34,7 +34,7 @@\n return str(val[1])\n return str(val)\n \n- return [item] + [\n+ return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n@@ -51,9 +51,7 @@\n \n def insert(self, index, item):\n self.native.BeginUpdate()\n- item._impl = WinForms.ListViewItem([\n- str(getattr(item, attr)) for attr in self.interface._accessors\n- ])\n+ item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n", "issue": "Table rows not rendering correctly in `update_data` winforms\n## Expected Behavior\r\nView text in the rows\r\n\r\n## Current Behavior\r\nRows are created but text is not displayed.\r\n![image](https://user-images.githubusercontent.com/479689/78953344-20995d80-7b0b-11ea-80da-889b488372be.png)\r\nWhen I insert a new row (with insert button) is working ok:\r\n![image](https://user-images.githubusercontent.com/479689/78953506-a3221d00-7b0b-11ea-8ec5-b1429da77e54.png)\r\n\r\nIn found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.\r\n\r\nThe specific line is the change made from this one:\r\n```\r\nitem._impl = WinForms.ListViewItem([\r\n str(getattr(item, attr)) for attr in self.interface._accessors\r\n ])\r\n```\r\nto this:\r\n```\r\nitem._impl = WinForms.ListViewItem(self.row_data(item))\r\n```\r\n\r\n## Steps to reproduce\r\n1. Open example and that's it\r\n\r\n## Your Environment\r\n\r\n* Python Version (list the specific version number)\r\n3.7.7\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version:\r\n - [ ] Linux - distro: - version:\r\n - [x] Windows - version: 10 Pro\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Version (list the specific version number or git hash)\r\nMaster branch of toga.\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [x] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [item] + [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem([\n str(getattr(item, attr)) for attr in self.interface._accessors\n ])\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}], "after_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}]}
1,461
200
gh_patches_debug_9260
rasdani/github-patches
git_diff
translate__pootle-6524
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Changing source (with xliff) can create submissions with no submitter # Steps to reproduce: - change source in file of existing unit - run update_stores # Results - submissions have no submitter - stuff breaks ## Expected result: - nothing breaks this only happens in xliff afaict - as i dont think its possible to change source of units in this way with po --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_store/receivers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from hashlib import md5 10 11 from django.db.models.signals import post_save, pre_save 12 from django.dispatch import receiver 13 from django.utils.encoding import force_bytes 14 15 from pootle.core.delegate import lifecycle, uniqueid 16 from pootle.core.models import Revision 17 from pootle.core.signals import update_checks, update_data 18 19 from .constants import FUZZY, TRANSLATED, UNTRANSLATED 20 from .models import Suggestion, Unit, UnitChange, UnitSource 21 22 23 @receiver(post_save, sender=Suggestion) 24 def handle_suggestion_added(**kwargs): 25 created = kwargs.get("created") 26 if not created: 27 return 28 store = kwargs["instance"].unit.store 29 update_data.send(store.__class__, instance=store) 30 31 32 @receiver(post_save, sender=Suggestion) 33 def handle_suggestion_accepted(**kwargs): 34 created = kwargs.get("created") 35 suggestion = kwargs["instance"] 36 if created or not suggestion.is_accepted: 37 return 38 update_data.send( 39 suggestion.unit.store.__class__, 40 instance=suggestion.unit.store) 41 42 43 @receiver(pre_save, sender=UnitSource) 44 def handle_unit_source_pre_save(**kwargs): 45 unit_source = kwargs["instance"] 46 created = not unit_source.pk 47 unit = unit_source.unit 48 if created: 49 unit_source.creation_revision = unit.revision 50 if created or unit.source_updated: 51 unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest() 52 unit_source.source_length = len(unit.source_f) 53 unit_source.source_wordcount = max( 54 1, (unit.counter.count_words(unit.source_f.strings) or 0)) 55 56 57 @receiver(pre_save, sender=Unit) 58 def handle_unit_pre_save(**kwargs): 59 unit = kwargs["instance"] 60 auto_translated = False 61 62 if unit.source_updated: 63 # update source related fields 64 wc = unit.counter.count_words(unit.source_f.strings) 65 if not wc and not bool(filter(None, unit.target_f.strings)): 66 # auto-translate untranslated strings 67 unit.target = unit.source 68 unit.state = FUZZY 69 auto_translated = True 70 if unit.target_updated: 71 # update target related fields 72 unit.target_wordcount = unit.counter.count_words( 73 unit.target_f.strings) 74 unit.target_length = len(unit.target_f) 75 if filter(None, unit.target_f.strings): 76 if unit.state == UNTRANSLATED: 77 unit.state = TRANSLATED 78 else: 79 # if it was TRANSLATED then set to UNTRANSLATED 80 if unit.state > FUZZY: 81 unit.state = UNTRANSLATED 82 83 # Updating unit from the .po file set its revision property to 84 # a new value (the same for all units during its store updated) 85 # since that change doesn't require further sync but note that 86 # auto_translated units require further sync 87 update_revision = ( 88 unit.revision is None 89 or (not unit.revision_updated 90 and (unit.updated and not auto_translated))) 91 if update_revision: 92 unit.revision = Revision.incr() 93 94 if unit.index is None: 95 unit.index = unit.store.max_index() + 1 96 unitid = uniqueid.get(unit.__class__)(unit) 97 if unitid.changed: 98 unit.setid(unitid.getid()) 99 100 101 @receiver(pre_save, sender=UnitChange) 102 def handle_unit_pre_change(**kwargs): 103 unit_change = kwargs["instance"] 104 unit = unit_change.unit 105 if unit.state == UNTRANSLATED: 106 # clear reviewer and translator data if translation 107 # has been deleted 108 unit_change.submitted_by = None 109 unit_change.submitted_on = None 110 111 112 @receiver(post_save, sender=UnitChange) 113 def handle_unit_change(**kwargs): 114 unit_change = kwargs["instance"] 115 unit = unit_change.unit 116 created = not unit._frozen.pk 117 118 if not created: 119 lifecycle.get(Unit)(unit).change() 120 if not unit.source_updated and not unit.target_updated: 121 return 122 new_untranslated = (created and unit.state == UNTRANSLATED) 123 if not new_untranslated: 124 update_checks.send(unit.__class__, instance=unit) 125 if unit.istranslated(): 126 unit.update_tmserver() 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py --- a/pootle/apps/pootle_store/receivers.py +++ b/pootle/apps/pootle_store/receivers.py @@ -98,17 +98,6 @@ unit.setid(unitid.getid()) -@receiver(pre_save, sender=UnitChange) -def handle_unit_pre_change(**kwargs): - unit_change = kwargs["instance"] - unit = unit_change.unit - if unit.state == UNTRANSLATED: - # clear reviewer and translator data if translation - # has been deleted - unit_change.submitted_by = None - unit_change.submitted_on = None - - @receiver(post_save, sender=UnitChange) def handle_unit_change(**kwargs): unit_change = kwargs["instance"]
{"golden_diff": "diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py\n--- a/pootle/apps/pootle_store/receivers.py\n+++ b/pootle/apps/pootle_store/receivers.py\n@@ -98,17 +98,6 @@\n unit.setid(unitid.getid())\n \n \n-@receiver(pre_save, sender=UnitChange)\n-def handle_unit_pre_change(**kwargs):\n- unit_change = kwargs[\"instance\"]\n- unit = unit_change.unit\n- if unit.state == UNTRANSLATED:\n- # clear reviewer and translator data if translation\n- # has been deleted\n- unit_change.submitted_by = None\n- unit_change.submitted_on = None\n-\n-\n @receiver(post_save, sender=UnitChange)\n def handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n", "issue": "Changing source (with xliff) can create submissions with no submitter\n# Steps to reproduce:\r\n\r\n- change source in file of existing unit\r\n- run update_stores\r\n\r\n# Results\r\n\r\n- submissions have no submitter\r\n- stuff breaks\r\n\r\n## Expected result:\r\n\r\n- nothing breaks\r\n\r\n\r\nthis only happens in xliff afaict - as i dont think its possible to change source of units in this way with po\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(pre_save, sender=UnitChange)\ndef handle_unit_pre_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n if unit.state == UNTRANSLATED:\n # clear reviewer and translator data if translation\n # has been deleted\n unit_change.submitted_by = None\n unit_change.submitted_on = None\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}]}
1,599
195
gh_patches_debug_6221
rasdani/github-patches
git_diff
scikit-hep__pyhf-837
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bump jsonschema to v3.2.0+ to support draft 6 Currently on alpha release 3.0.x but can bump to 3.2.0 which was released. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from pathlib import Path 3 4 this_directory = Path(__file__).parent.resolve() 5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst: 6 long_description = readme_rst.read() 7 8 extras_require = { 9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'], 10 'torch': ['torch~=1.2'], 11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 12 'xmlio': ['uproot'], 13 'minuit': ['iminuit'], 14 } 15 extras_require['backends'] = sorted( 16 set( 17 extras_require['tensorflow'] 18 + extras_require['torch'] 19 + extras_require['jax'] 20 + extras_require['minuit'] 21 ) 22 ) 23 extras_require['contrib'] = sorted(set(['matplotlib'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + [ 31 'pyflakes', 32 'pytest~=3.5', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=1.0', 41 'nteract-scrapbook~=0.2', 42 'check-manifest', 43 'jupyter', 44 'uproot~=3.3', 45 'graphviz', 46 'jsonpatch', 47 'black', 48 ] 49 ) 50 ) 51 extras_require['docs'] = sorted( 52 set( 53 [ 54 'sphinx', 55 'sphinxcontrib-bibtex', 56 'sphinx-click', 57 'sphinx_rtd_theme', 58 'nbsphinx', 59 'ipywidgets', 60 'sphinx-issues', 61 'sphinx-copybutton>0.2.9', 62 ] 63 ) 64 ) 65 extras_require['develop'] = sorted( 66 set( 67 extras_require['docs'] 68 + extras_require['test'] 69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine'] 70 ) 71 ) 72 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 73 74 75 setup( 76 name='pyhf', 77 version='0.4.1', 78 description='(partial) pure python histfactory implementation', 79 long_description=long_description, 80 long_description_content_type='text/x-rst', 81 url='https://github.com/scikit-hep/pyhf', 82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark', 83 author_email='[email protected], [email protected], [email protected]', 84 license='Apache', 85 keywords='physics fitting numpy scipy tensorflow pytorch', 86 classifiers=[ 87 "Programming Language :: Python :: 3", 88 "Programming Language :: Python :: 3.6", 89 "Programming Language :: Python :: 3.7", 90 ], 91 package_dir={'': 'src'}, 92 packages=find_packages(where='src'), 93 include_package_data=True, 94 python_requires=">=3.6", 95 install_requires=[ 96 'scipy', # requires numpy, which is required by pyhf and tensorflow 97 'click>=6.0', # for console scripts, 98 'tqdm', # for readxml 99 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 100 'jsonpatch', 101 'pyyaml', # for parsing CLI equal-delimited options 102 ], 103 extras_require=extras_require, 104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']}, 105 dependency_links=[], 106 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 107 ) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -96,7 +96,7 @@ 'scipy', # requires numpy, which is required by pyhf and tensorflow 'click>=6.0', # for console scripts, 'tqdm', # for readxml - 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 + 'jsonschema>=3.2.0', # for utils 'jsonpatch', 'pyyaml', # for parsing CLI equal-delimited options ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n+ 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n", "issue": "bump jsonschema to v3.2.0+ to support draft 6\nCurrently on alpha release 3.0.x but can bump to 3.2.0 which was released.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,368
143
gh_patches_debug_925
rasdani/github-patches
git_diff
dynamiqs__dynamiqs-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- implement a ver() method As a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dynamiqs/__init__.py` Content: ``` 1 from .mesolve import mesolve 2 from .sesolve import sesolve 3 from .smesolve import smesolve 4 from .utils import * 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py --- a/dynamiqs/__init__.py +++ b/dynamiqs/__init__.py @@ -1,4 +1,9 @@ +from importlib.metadata import version + from .mesolve import mesolve from .sesolve import sesolve from .smesolve import smesolve from .utils import * + +# get version from pyproject.toml +__version__ = version(__package__)
{"golden_diff": "diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py\n--- a/dynamiqs/__init__.py\n+++ b/dynamiqs/__init__.py\n@@ -1,4 +1,9 @@\n+from importlib.metadata import version\n+\n from .mesolve import mesolve\n from .sesolve import sesolve\n from .smesolve import smesolve\n from .utils import *\n+\n+# get version from pyproject.toml\n+__version__ = version(__package__)\n", "issue": "implement a ver() method\nAs a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running\n", "before_files": [{"content": "from .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n", "path": "dynamiqs/__init__.py"}], "after_files": [{"content": "from importlib.metadata import version\n\nfrom .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n\n# get version from pyproject.toml\n__version__ = version(__package__)\n", "path": "dynamiqs/__init__.py"}]}
341
113
gh_patches_debug_132
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-3433
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ci - failure around mismatched versions of tabulate Per current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2. This mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt. digging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 from io import open 3 from setuptools import setup, find_packages 4 5 6 def read(fname): 7 return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read() 8 9 10 setup( 11 name="c7n", 12 version='0.8.33.1', 13 description="Cloud Custodian - Policy Rules Engine", 14 long_description=read('README.rst'), 15 classifiers=[ 16 "Topic :: System :: Systems Administration", 17 "Topic :: System :: Distributed Computing" 18 ], 19 url="https://github.com/capitalone/cloud-custodian", 20 license="Apache-2.0", 21 packages=find_packages(), 22 entry_points={ 23 'console_scripts': [ 24 'custodian = c7n.cli:main']}, 25 install_requires=[ 26 "boto3>=1.9.62", 27 "botocore>=1.12.62", 28 "python-dateutil>=2.6,<3.0.0", 29 "pyyaml", 30 "jsonschema", 31 "jsonpatch>=1.21", 32 "argcomplete", 33 "tabulate" 34 ], 35 ) 36 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,6 +30,6 @@ "jsonschema", "jsonpatch>=1.21", "argcomplete", - "tabulate" + "tabulate==0.8.2" ], )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,6 +30,6 @@\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n- \"tabulate\"\n+ \"tabulate==0.8.2\"\n ],\n )\n", "issue": "ci - failure around mismatched versions of tabulate\nPer current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.\r\n\r\nThis mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.\r\n\r\ndigging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.\r\n\n", "before_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate\"\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate==0.8.2\"\n ],\n)\n", "path": "setup.py"}]}
691
73
gh_patches_debug_26997
rasdani/github-patches
git_diff
mdn__kuma-6098
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- search_phase_execution_exception in ES on huuuge ?page params https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/ ``` TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.') (24 additional frame(s) were not displayed) ... File "rest_framework/views.py", line 492, in dispatch response = handler(request, *args, **kwargs) File "rest_framework/generics.py", line 201, in get return self.list(request, *args, **kwargs) File "rest_framework/mixins.py", line 42, in list page = self.paginate_queryset(queryset) File "rest_framework/generics.py", line 173, in paginate_queryset return self.paginator.paginate_queryset(queryset, self.request, view=self) File "rest_framework/pagination.py", line 204, in paginate_queryset self.page = paginator.page(page_number) TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/search/paginator.py` Content: ``` 1 from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator 2 from django.utils.functional import cached_property 3 4 5 class SearchPaginator(Paginator): 6 """ 7 A better paginator for search results 8 9 The normal Paginator does a .count() query and then a slice. Since ES 10 results contain the total number of results, we can take an optimistic 11 slice and then adjust the count. 12 """ 13 14 def __init__(self, *args, **kwargs): 15 super(SearchPaginator, self).__init__(*args, **kwargs) 16 self._result_total = None 17 18 def validate_number(self, number): 19 """ 20 Validates the given 1-based page number. 21 22 This class overrides the default behavior and ignores the upper bound. 23 """ 24 try: 25 number = int(number) 26 except (TypeError, ValueError): 27 raise PageNotAnInteger('That page number is not an integer') 28 if number < 1: 29 raise EmptyPage('That page number is less than 1') 30 return number 31 32 def page(self, number): 33 """ 34 Returns a page object. 35 36 This class overrides the default behavior and ignores "orphans" and 37 assigns the count from the ES result to the Paginator. 38 """ 39 number = self.validate_number(number) 40 bottom = (number - 1) * self.per_page 41 top = bottom + self.per_page 42 43 # Force the search to evaluate and then attach the count. We want to 44 # avoid an extra useless query even if there are no results, so we 45 # directly fetch the count from hits. 46 result = self.object_list[bottom:top].execute() 47 page = Page(result.hits, number, self) 48 # Set the count to the results after post_filter 49 self._result_total = result.hits.total 50 # Also store the aggregations, if any. 51 page.aggregations = getattr(result, 'aggregations', None) 52 53 # Now that we have the count validate that the page number isn't higher 54 # than the possible number of pages and adjust accordingly. 55 if number > self.num_pages: 56 if number == 1 and self.allow_empty_first_page: 57 pass 58 else: 59 raise EmptyPage('That page contains no results') 60 return page 61 62 @cached_property 63 def count(self): 64 """ 65 Returns the total number of results. 66 67 Paginator's count property will call .count() on the search object, 68 which returns results before the pre_filter. This will result in a 69 count that is too high. Instead, use 'total' from the results, 70 executing if needed. 71 """ 72 if self._result_total is not None: 73 return self._result_total 74 return self.object_list.execute().hits.total 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py --- a/kuma/search/paginator.py +++ b/kuma/search/paginator.py @@ -1,4 +1,5 @@ -from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator +from django.core.paginator import ( + EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator) from django.utils.functional import cached_property @@ -19,7 +20,7 @@ """ Validates the given 1-based page number. - This class overrides the default behavior and ignores the upper bound. + We also check that the number isn't too large. """ try: number = int(number) @@ -27,6 +28,19 @@ raise PageNotAnInteger('That page number is not an integer') if number < 1: raise EmptyPage('That page number is less than 1') + + if number >= 1000: + # Anything >=1,000 will result in a hard error in + # Elasticsearch which would happen before we even get a chance + # to validate that the range is too big. The error you would + # get from Elasticsearch 6.x is something like this: + # + # Result window is too large, from + size must be less + # than or equal to: [10000] but was [11000]. + # + # See https://github.com/mdn/kuma/issues/6092 + raise InvalidPage('Page number too large') + return number def page(self, number):
{"golden_diff": "diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py\n--- a/kuma/search/paginator.py\n+++ b/kuma/search/paginator.py\n@@ -1,4 +1,5 @@\n-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\n+from django.core.paginator import (\n+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\n from django.utils.functional import cached_property\n \n \n@@ -19,7 +20,7 @@\n \"\"\"\n Validates the given 1-based page number.\n \n- This class overrides the default behavior and ignores the upper bound.\n+ We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n@@ -27,6 +28,19 @@\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n+\n+ if number >= 1000:\n+ # Anything >=1,000 will result in a hard error in\n+ # Elasticsearch which would happen before we even get a chance\n+ # to validate that the range is too big. The error you would\n+ # get from Elasticsearch 6.x is something like this:\n+ #\n+ # Result window is too large, from + size must be less\n+ # than or equal to: [10000] but was [11000].\n+ #\n+ # See https://github.com/mdn/kuma/issues/6092\n+ raise InvalidPage('Page number too large')\n+\n return number\n \n def page(self, number):\n", "issue": "search_phase_execution_exception in ES on huuuge ?page params\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/\n\n```\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n(24 additional frame(s) were not displayed)\n...\n File \"rest_framework/views.py\", line 492, in dispatch\n response = handler(request, *args, **kwargs)\n File \"rest_framework/generics.py\", line 201, in get\n return self.list(request, *args, **kwargs)\n File \"rest_framework/mixins.py\", line 42, in list\n page = self.paginate_queryset(queryset)\n File \"rest_framework/generics.py\", line 173, in paginate_queryset\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n File \"rest_framework/pagination.py\", line 204, in paginate_queryset\n self.page = paginator.page(page_number)\n\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n```\n", "before_files": [{"content": "from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n This class overrides the default behavior and ignores the upper bound.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}], "after_files": [{"content": "from django.core.paginator import (\n EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n\n if number >= 1000:\n # Anything >=1,000 will result in a hard error in\n # Elasticsearch which would happen before we even get a chance\n # to validate that the range is too big. The error you would\n # get from Elasticsearch 6.x is something like this:\n #\n # Result window is too large, from + size must be less\n # than or equal to: [10000] but was [11000].\n #\n # See https://github.com/mdn/kuma/issues/6092\n raise InvalidPage('Page number too large')\n\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}]}
1,365
373
gh_patches_debug_11772
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-19204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ERROR: Unable to extract OpenGraph title; ## Please follow the guide below - You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`) - Use the *Preview* tab to see what your issue will actually look like --- ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.17*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.17** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones - [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser ### What is the purpose of your *issue*? - [x] Bug report (encountered problems with youtube-dl) - [ ] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ``` [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: [u'--verbose', u'https://i.imgur.com/jxBXAMC.gifv'] [debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2018.12.17 [debug] Python version 2.7.12 (CPython) - Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial [debug] exe versions: ffmpeg 2.8.15-0ubuntu0.16.04.1, ffprobe 2.8.15-0ubuntu0.16.04.1, rtmpdump 2.4 [debug] Proxy map: {} [Imgur] jxBXAMC: Downloading webpage ERROR: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. Traceback (most recent call last): File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 793, in extract_info ie_result = ie.extract(url) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 508, in extract ie_result = self._real_extract(url) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/imgur.py", line 90, in _real_extract 'title': self._og_search_title(webpage), File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 1095, in _og_search_title return self._og_search_property('title', html, **kargs) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 1083, in _og_search_property escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 983, in _search_regex raise RegexNotFoundError('Unable to extract %s' % _name) RegexNotFoundError: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/imgur.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import re 4 5 from .common import InfoExtractor 6 from ..utils import ( 7 int_or_none, 8 js_to_json, 9 mimetype2ext, 10 ExtractorError, 11 ) 12 13 14 class ImgurIE(InfoExtractor): 15 _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)' 16 17 _TESTS = [{ 18 'url': 'https://i.imgur.com/A61SaA1.gifv', 19 'info_dict': { 20 'id': 'A61SaA1', 21 'ext': 'mp4', 22 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$', 23 }, 24 }, { 25 'url': 'https://imgur.com/A61SaA1', 26 'only_matching': True, 27 }, { 28 'url': 'https://i.imgur.com/crGpqCV.mp4', 29 'only_matching': True, 30 }] 31 32 def _real_extract(self, url): 33 video_id = self._match_id(url) 34 webpage = self._download_webpage( 35 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id) 36 37 width = int_or_none(self._og_search_property( 38 'video:width', webpage, default=None)) 39 height = int_or_none(self._og_search_property( 40 'video:height', webpage, default=None)) 41 42 video_elements = self._search_regex( 43 r'(?s)<div class="video-elements">(.*?)</div>', 44 webpage, 'video elements', default=None) 45 if not video_elements: 46 raise ExtractorError( 47 'No sources found for video %s. Maybe an image?' % video_id, 48 expected=True) 49 50 formats = [] 51 for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements): 52 formats.append({ 53 'format_id': m.group('type').partition('/')[2], 54 'url': self._proto_relative_url(m.group('src')), 55 'ext': mimetype2ext(m.group('type')), 56 'width': width, 57 'height': height, 58 'http_headers': { 59 'User-Agent': 'youtube-dl (like wget)', 60 }, 61 }) 62 63 gif_json = self._search_regex( 64 r'(?s)var\s+videoItem\s*=\s*(\{.*?\})', 65 webpage, 'GIF code', fatal=False) 66 if gif_json: 67 gifd = self._parse_json( 68 gif_json, video_id, transform_source=js_to_json) 69 formats.append({ 70 'format_id': 'gif', 71 'preference': -10, 72 'width': width, 73 'height': height, 74 'ext': 'gif', 75 'acodec': 'none', 76 'vcodec': 'gif', 77 'container': 'gif', 78 'url': self._proto_relative_url(gifd['gifUrl']), 79 'filesize': gifd.get('size'), 80 'http_headers': { 81 'User-Agent': 'youtube-dl (like wget)', 82 }, 83 }) 84 85 self._sort_formats(formats) 86 87 return { 88 'id': video_id, 89 'formats': formats, 90 'title': self._og_search_title(webpage), 91 } 92 93 94 class ImgurGalleryIE(InfoExtractor): 95 IE_NAME = 'imgur:gallery' 96 _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)' 97 98 _TESTS = [{ 99 'url': 'http://imgur.com/gallery/Q95ko', 100 'info_dict': { 101 'id': 'Q95ko', 102 'title': 'Adding faces make every GIF better', 103 }, 104 'playlist_count': 25, 105 }, { 106 'url': 'http://imgur.com/topic/Aww/ll5Vk', 107 'only_matching': True, 108 }, { 109 'url': 'https://imgur.com/gallery/YcAQlkx', 110 'info_dict': { 111 'id': 'YcAQlkx', 112 'ext': 'mp4', 113 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....', 114 } 115 }, { 116 'url': 'http://imgur.com/topic/Funny/N8rOudd', 117 'only_matching': True, 118 }, { 119 'url': 'http://imgur.com/r/aww/VQcQPhM', 120 'only_matching': True, 121 }] 122 123 def _real_extract(self, url): 124 gallery_id = self._match_id(url) 125 126 data = self._download_json( 127 'https://imgur.com/gallery/%s.json' % gallery_id, 128 gallery_id)['data']['image'] 129 130 if data.get('is_album'): 131 entries = [ 132 self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash']) 133 for image in data['album_images']['images'] if image.get('hash')] 134 return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description')) 135 136 return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id) 137 138 139 class ImgurAlbumIE(ImgurGalleryIE): 140 IE_NAME = 'imgur:album' 141 _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)' 142 143 _TESTS = [{ 144 'url': 'http://imgur.com/a/j6Orj', 145 'info_dict': { 146 'id': 'j6Orj', 147 'title': 'A Literary Analysis of "Star Wars: The Force Awakens"', 148 }, 149 'playlist_count': 12, 150 }] 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py --- a/youtube_dl/extractor/imgur.py +++ b/youtube_dl/extractor/imgur.py @@ -27,6 +27,10 @@ }, { 'url': 'https://i.imgur.com/crGpqCV.mp4', 'only_matching': True, + }, { + # no title + 'url': 'https://i.imgur.com/jxBXAMC.gifv', + 'only_matching': True, }] def _real_extract(self, url): @@ -87,7 +91,7 @@ return { 'id': video_id, 'formats': formats, - 'title': self._og_search_title(webpage), + 'title': self._og_search_title(webpage, default=video_id), }
{"golden_diff": "diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py\n--- a/youtube_dl/extractor/imgur.py\n+++ b/youtube_dl/extractor/imgur.py\n@@ -27,6 +27,10 @@\n }, {\n 'url': 'https://i.imgur.com/crGpqCV.mp4',\n 'only_matching': True,\n+ }, {\n+ # no title\n+ 'url': 'https://i.imgur.com/jxBXAMC.gifv',\n+ 'only_matching': True,\n }]\n \n def _real_extract(self, url):\n@@ -87,7 +91,7 @@\n return {\n 'id': video_id,\n 'formats': formats,\n- 'title': self._og_search_title(webpage),\n+ 'title': self._og_search_title(webpage, default=video_id),\n }\n", "issue": "ERROR: Unable to extract OpenGraph title;\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.17*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.17**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'https://i.imgur.com/jxBXAMC.gifv']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.12.17\r\n[debug] Python version 2.7.12 (CPython) - Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial\r\n[debug] exe versions: ffmpeg 2.8.15-0ubuntu0.16.04.1, ffprobe 2.8.15-0ubuntu0.16.04.1, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[Imgur] jxBXAMC: Downloading webpage\r\nERROR: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 793, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 508, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/imgur.py\", line 90, in _real_extract\r\n 'title': self._og_search_title(webpage),\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 1095, in _og_search_title\r\n return self._og_search_property('title', html, **kargs)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 1083, in _og_search_property\r\n escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 983, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\r\nRegexNotFoundError: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n js_to_json,\n mimetype2ext,\n ExtractorError,\n)\n\n\nclass ImgurIE(InfoExtractor):\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'https://i.imgur.com/A61SaA1.gifv',\n 'info_dict': {\n 'id': 'A61SaA1',\n 'ext': 'mp4',\n 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',\n },\n }, {\n 'url': 'https://imgur.com/A61SaA1',\n 'only_matching': True,\n }, {\n 'url': 'https://i.imgur.com/crGpqCV.mp4',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)\n\n width = int_or_none(self._og_search_property(\n 'video:width', webpage, default=None))\n height = int_or_none(self._og_search_property(\n 'video:height', webpage, default=None))\n\n video_elements = self._search_regex(\n r'(?s)<div class=\"video-elements\">(.*?)</div>',\n webpage, 'video elements', default=None)\n if not video_elements:\n raise ExtractorError(\n 'No sources found for video %s. Maybe an image?' % video_id,\n expected=True)\n\n formats = []\n for m in re.finditer(r'<source\\s+src=\"(?P<src>[^\"]+)\"\\s+type=\"(?P<type>[^\"]+)\"', video_elements):\n formats.append({\n 'format_id': m.group('type').partition('/')[2],\n 'url': self._proto_relative_url(m.group('src')),\n 'ext': mimetype2ext(m.group('type')),\n 'width': width,\n 'height': height,\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n gif_json = self._search_regex(\n r'(?s)var\\s+videoItem\\s*=\\s*(\\{.*?\\})',\n webpage, 'GIF code', fatal=False)\n if gif_json:\n gifd = self._parse_json(\n gif_json, video_id, transform_source=js_to_json)\n formats.append({\n 'format_id': 'gif',\n 'preference': -10,\n 'width': width,\n 'height': height,\n 'ext': 'gif',\n 'acodec': 'none',\n 'vcodec': 'gif',\n 'container': 'gif',\n 'url': self._proto_relative_url(gifd['gifUrl']),\n 'filesize': gifd.get('size'),\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': self._og_search_title(webpage),\n }\n\n\nclass ImgurGalleryIE(InfoExtractor):\n IE_NAME = 'imgur:gallery'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/gallery/Q95ko',\n 'info_dict': {\n 'id': 'Q95ko',\n 'title': 'Adding faces make every GIF better',\n },\n 'playlist_count': 25,\n }, {\n 'url': 'http://imgur.com/topic/Aww/ll5Vk',\n 'only_matching': True,\n }, {\n 'url': 'https://imgur.com/gallery/YcAQlkx',\n 'info_dict': {\n 'id': 'YcAQlkx',\n 'ext': 'mp4',\n 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',\n }\n }, {\n 'url': 'http://imgur.com/topic/Funny/N8rOudd',\n 'only_matching': True,\n }, {\n 'url': 'http://imgur.com/r/aww/VQcQPhM',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n gallery_id = self._match_id(url)\n\n data = self._download_json(\n 'https://imgur.com/gallery/%s.json' % gallery_id,\n gallery_id)['data']['image']\n\n if data.get('is_album'):\n entries = [\n self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])\n for image in data['album_images']['images'] if image.get('hash')]\n return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))\n\n return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)\n\n\nclass ImgurAlbumIE(ImgurGalleryIE):\n IE_NAME = 'imgur:album'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/a/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/a/j6Orj',\n 'info_dict': {\n 'id': 'j6Orj',\n 'title': 'A Literary Analysis of \"Star Wars: The Force Awakens\"',\n },\n 'playlist_count': 12,\n }]\n", "path": "youtube_dl/extractor/imgur.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n js_to_json,\n mimetype2ext,\n ExtractorError,\n)\n\n\nclass ImgurIE(InfoExtractor):\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'https://i.imgur.com/A61SaA1.gifv',\n 'info_dict': {\n 'id': 'A61SaA1',\n 'ext': 'mp4',\n 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',\n },\n }, {\n 'url': 'https://imgur.com/A61SaA1',\n 'only_matching': True,\n }, {\n 'url': 'https://i.imgur.com/crGpqCV.mp4',\n 'only_matching': True,\n }, {\n # no title\n 'url': 'https://i.imgur.com/jxBXAMC.gifv',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)\n\n width = int_or_none(self._og_search_property(\n 'video:width', webpage, default=None))\n height = int_or_none(self._og_search_property(\n 'video:height', webpage, default=None))\n\n video_elements = self._search_regex(\n r'(?s)<div class=\"video-elements\">(.*?)</div>',\n webpage, 'video elements', default=None)\n if not video_elements:\n raise ExtractorError(\n 'No sources found for video %s. Maybe an image?' % video_id,\n expected=True)\n\n formats = []\n for m in re.finditer(r'<source\\s+src=\"(?P<src>[^\"]+)\"\\s+type=\"(?P<type>[^\"]+)\"', video_elements):\n formats.append({\n 'format_id': m.group('type').partition('/')[2],\n 'url': self._proto_relative_url(m.group('src')),\n 'ext': mimetype2ext(m.group('type')),\n 'width': width,\n 'height': height,\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n gif_json = self._search_regex(\n r'(?s)var\\s+videoItem\\s*=\\s*(\\{.*?\\})',\n webpage, 'GIF code', fatal=False)\n if gif_json:\n gifd = self._parse_json(\n gif_json, video_id, transform_source=js_to_json)\n formats.append({\n 'format_id': 'gif',\n 'preference': -10,\n 'width': width,\n 'height': height,\n 'ext': 'gif',\n 'acodec': 'none',\n 'vcodec': 'gif',\n 'container': 'gif',\n 'url': self._proto_relative_url(gifd['gifUrl']),\n 'filesize': gifd.get('size'),\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': self._og_search_title(webpage, default=video_id),\n }\n\n\nclass ImgurGalleryIE(InfoExtractor):\n IE_NAME = 'imgur:gallery'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/gallery/Q95ko',\n 'info_dict': {\n 'id': 'Q95ko',\n 'title': 'Adding faces make every GIF better',\n },\n 'playlist_count': 25,\n }, {\n 'url': 'http://imgur.com/topic/Aww/ll5Vk',\n 'only_matching': True,\n }, {\n 'url': 'https://imgur.com/gallery/YcAQlkx',\n 'info_dict': {\n 'id': 'YcAQlkx',\n 'ext': 'mp4',\n 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',\n }\n }, {\n 'url': 'http://imgur.com/topic/Funny/N8rOudd',\n 'only_matching': True,\n }, {\n 'url': 'http://imgur.com/r/aww/VQcQPhM',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n gallery_id = self._match_id(url)\n\n data = self._download_json(\n 'https://imgur.com/gallery/%s.json' % gallery_id,\n gallery_id)['data']['image']\n\n if data.get('is_album'):\n entries = [\n self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])\n for image in data['album_images']['images'] if image.get('hash')]\n return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))\n\n return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)\n\n\nclass ImgurAlbumIE(ImgurGalleryIE):\n IE_NAME = 'imgur:album'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/a/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/a/j6Orj',\n 'info_dict': {\n 'id': 'j6Orj',\n 'title': 'A Literary Analysis of \"Star Wars: The Force Awakens\"',\n },\n 'playlist_count': 12,\n }]\n", "path": "youtube_dl/extractor/imgur.py"}]}
2,979
199
gh_patches_debug_400
rasdani/github-patches
git_diff
mozilla__bugbug-3941
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [model:accessibility] Add the model to `http_service` and `data_pipeline` Depends on merging: #3775 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `http_service/bugbug_http/models.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import logging 7 import os 8 from datetime import timedelta 9 from functools import lru_cache 10 from typing import Sequence 11 12 import orjson 13 import requests 14 import zstandard 15 from redis import Redis 16 17 from bugbug import bugzilla, repository, test_scheduling 18 from bugbug.github import Github 19 from bugbug.model import Model 20 from bugbug.models import testselect 21 from bugbug.utils import get_hgmo_stack 22 from bugbug_http.readthrough_cache import ReadthroughTTLCache 23 24 logging.basicConfig(level=logging.INFO) 25 LOGGER = logging.getLogger() 26 27 MODELS_NAMES = [ 28 "defectenhancementtask", 29 "component", 30 "invalidcompatibilityreport", 31 "needsdiagnosis", 32 "regression", 33 "stepstoreproduce", 34 "spambug", 35 "testlabelselect", 36 "testgroupselect", 37 ] 38 39 DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week 40 redis = Redis.from_url(os.environ.get("REDIS_URL", "redis://localhost/0")) 41 42 MODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache( 43 timedelta(hours=1), lambda m: Model.load(f"{m}model") 44 ) 45 MODEL_CACHE.start_ttl_thread() 46 47 cctx = zstandard.ZstdCompressor(level=10) 48 49 50 def setkey(key: str, value: bytes, compress: bool = False) -> None: 51 LOGGER.debug(f"Storing data at {key}: {value!r}") 52 if compress: 53 value = cctx.compress(value) 54 redis.set(key, value) 55 redis.expire(key, DEFAULT_EXPIRATION_TTL) 56 57 58 def classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str: 59 from bugbug_http.app import JobInfo 60 61 # This should be called in a process worker so it should be safe to set 62 # the token here 63 bug_ids_set = set(map(int, bug_ids)) 64 bugzilla.set_token(bugzilla_token) 65 66 bugs = bugzilla.get(bug_ids) 67 68 missing_bugs = bug_ids_set.difference(bugs.keys()) 69 70 for bug_id in missing_bugs: 71 job = JobInfo(classify_bug, model_name, bug_id) 72 73 # TODO: Find a better error format 74 setkey(job.result_key, orjson.dumps({"available": False})) 75 76 if not bugs: 77 return "NOK" 78 79 model = MODEL_CACHE.get(model_name) 80 81 if not model: 82 LOGGER.info("Missing model %r, aborting" % model_name) 83 return "NOK" 84 85 model_extra_data = model.get_extra_data() 86 87 # TODO: Classify could choke on a single bug which could make the whole 88 # job to fails. What should we do here? 89 probs = model.classify(list(bugs.values()), True) 90 indexes = probs.argmax(axis=-1) 91 suggestions = model.le.inverse_transform(indexes) 92 93 probs_list = probs.tolist() 94 indexes_list = indexes.tolist() 95 suggestions_list = suggestions.tolist() 96 97 for i, bug_id in enumerate(bugs.keys()): 98 data = { 99 "prob": probs_list[i], 100 "index": indexes_list[i], 101 "class": suggestions_list[i], 102 "extra_data": model_extra_data, 103 } 104 105 job = JobInfo(classify_bug, model_name, bug_id) 106 setkey(job.result_key, orjson.dumps(data), compress=True) 107 108 # Save the bug last change 109 setkey(job.change_time_key, bugs[bug_id]["last_change_time"].encode()) 110 111 return "OK" 112 113 114 def classify_issue( 115 model_name: str, owner: str, repo: str, issue_nums: Sequence[int] 116 ) -> str: 117 from bugbug_http.app import JobInfo 118 119 github = Github(owner=owner, repo=repo) 120 121 issue_ids_set = set(map(int, issue_nums)) 122 123 issues = { 124 issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True) 125 for issue_num in issue_nums 126 } 127 128 missing_issues = issue_ids_set.difference(issues.keys()) 129 130 for issue_id in missing_issues: 131 job = JobInfo(classify_issue, model_name, owner, repo, issue_id) 132 133 # TODO: Find a better error format 134 setkey(job.result_key, orjson.dumps({"available": False})) 135 136 if not issues: 137 return "NOK" 138 139 model = MODEL_CACHE.get(model_name) 140 141 if not model: 142 LOGGER.info("Missing model %r, aborting" % model_name) 143 return "NOK" 144 145 model_extra_data = model.get_extra_data() 146 147 # TODO: Classify could choke on a single bug which could make the whole 148 # job to fail. What should we do here? 149 probs = model.classify(list(issues.values()), True) 150 indexes = probs.argmax(axis=-1) 151 suggestions = model.le.inverse_transform(indexes) 152 153 probs_list = probs.tolist() 154 indexes_list = indexes.tolist() 155 suggestions_list = suggestions.tolist() 156 157 for i, issue_id in enumerate(issues.keys()): 158 data = { 159 "prob": probs_list[i], 160 "index": indexes_list[i], 161 "class": suggestions_list[i], 162 "extra_data": model_extra_data, 163 } 164 165 job = JobInfo(classify_issue, model_name, owner, repo, issue_id) 166 setkey(job.result_key, orjson.dumps(data), compress=True) 167 168 # Save the bug last change 169 setkey(job.change_time_key, issues[issue_id]["updated_at"].encode()) 170 171 return "OK" 172 173 174 def classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str: 175 from bugbug_http.app import JobInfo 176 177 reports = { 178 report["uuid"]: {"title": report["title"], "body": report["body"]} 179 for report in reports_data 180 } 181 182 if not reports: 183 return "NOK" 184 185 model = MODEL_CACHE.get(model_name) 186 187 if not model: 188 LOGGER.info("Missing model %r, aborting" % model_name) 189 return "NOK" 190 191 model_extra_data = model.get_extra_data() 192 probs = model.classify(list(reports.values()), True) 193 indexes = probs.argmax(axis=-1) 194 suggestions = model.le.inverse_transform(indexes) 195 196 probs_list = probs.tolist() 197 indexes_list = indexes.tolist() 198 suggestions_list = suggestions.tolist() 199 200 for i, report_uuid in enumerate(reports.keys()): 201 data = { 202 "prob": probs_list[i], 203 "index": indexes_list[i], 204 "class": suggestions_list[i], 205 "extra_data": model_extra_data, 206 } 207 208 job = JobInfo(classify_broken_site_report, model_name, report_uuid) 209 setkey(job.result_key, orjson.dumps(data), compress=True) 210 211 return "OK" 212 213 214 @lru_cache(maxsize=None) 215 def get_known_tasks() -> tuple[str, ...]: 216 with open("known_tasks", "r") as f: 217 return tuple(line.strip() for line in f) 218 219 220 def schedule_tests(branch: str, rev: str) -> str: 221 from bugbug_http import REPO_DIR 222 from bugbug_http.app import JobInfo 223 224 job = JobInfo(schedule_tests, branch, rev) 225 LOGGER.info("Processing %s...", job) 226 227 # Pull the revision to the local repository 228 LOGGER.info("Pulling commits from the remote repository...") 229 repository.pull(REPO_DIR, branch, rev) 230 231 # Load the full stack of patches leading to that revision 232 LOGGER.info("Loading commits to analyze using automationrelevance...") 233 try: 234 revs = get_hgmo_stack(branch, rev) 235 except requests.exceptions.RequestException: 236 LOGGER.warning(f"Push not found for {branch} @ {rev}!") 237 return "NOK" 238 239 test_selection_threshold = float( 240 os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.5) 241 ) 242 243 # On "try", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493). 244 # On other repos, only consider "tip" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b). 245 if branch == "try": 246 repo_branch = None 247 else: 248 repo_branch = "tip" 249 250 # Analyze patches. 251 commits = repository.download_commits( 252 REPO_DIR, 253 revs=revs, 254 branch=repo_branch, 255 save=False, 256 use_single_process=True, 257 include_no_bug=True, 258 ) 259 260 if len(commits) > 0: 261 testlabelselect_model = MODEL_CACHE.get("testlabelselect") 262 testgroupselect_model = MODEL_CACHE.get("testgroupselect") 263 264 tasks = testlabelselect_model.select_tests(commits, test_selection_threshold) 265 266 reduced = testselect.reduce_configs( 267 set(t for t, c in tasks.items() if c >= 0.8), 1.0 268 ) 269 270 reduced_higher = testselect.reduce_configs( 271 set(t for t, c in tasks.items() if c >= 0.9), 1.0 272 ) 273 274 groups = testgroupselect_model.select_tests(commits, test_selection_threshold) 275 276 config_groups = testselect.select_configs(groups.keys(), 0.9) 277 else: 278 tasks = {} 279 reduced = set() 280 groups = {} 281 config_groups = {} 282 283 data = { 284 "tasks": tasks, 285 "groups": groups, 286 "config_groups": config_groups, 287 "reduced_tasks": {t: c for t, c in tasks.items() if t in reduced}, 288 "reduced_tasks_higher": {t: c for t, c in tasks.items() if t in reduced_higher}, 289 "known_tasks": get_known_tasks(), 290 } 291 setkey(job.result_key, orjson.dumps(data), compress=True) 292 293 return "OK" 294 295 296 def get_config_specific_groups(config: str) -> str: 297 from bugbug_http.app import JobInfo 298 299 job = JobInfo(get_config_specific_groups, config) 300 LOGGER.info("Processing %s...", job) 301 302 equivalence_sets = testselect._get_equivalence_sets(0.9) 303 304 past_failures_data = test_scheduling.PastFailures("group", True) 305 306 setkey( 307 job.result_key, 308 orjson.dumps( 309 [ 310 {"name": group} 311 for group in past_failures_data.all_runnables 312 if any( 313 equivalence_set == {config} 314 for equivalence_set in equivalence_sets[group] 315 ) 316 ] 317 ), 318 compress=True, 319 ) 320 321 return "OK" 322 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py --- a/http_service/bugbug_http/models.py +++ b/http_service/bugbug_http/models.py @@ -34,6 +34,7 @@ "spambug", "testlabelselect", "testgroupselect", + "accessibility", ] DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week
{"golden_diff": "diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py\n--- a/http_service/bugbug_http/models.py\n+++ b/http_service/bugbug_http/models.py\n@@ -34,6 +34,7 @@\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n+ \"accessibility\",\n ]\n \n DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\n", "issue": "[model:accessibility] Add the model to `http_service` and `data_pipeline`\nDepends on merging: #3775\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n", "path": "http_service/bugbug_http/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n \"accessibility\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n", "path": "http_service/bugbug_http/models.py"}]}
3,549
104
gh_patches_debug_8276
rasdani/github-patches
git_diff
archlinux__archinstall-666
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- re_rank_mirrors bug `re_rank_mirrors` from `archinstall/lib/mirrors.py` does not work. Since `SyncCommand` doesn't run command from shell, stdout redirection (`foo > bar`) doesn't work. Also, using source file as destination file does not seem to work. File becomes empty. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/mirrors.py` Content: ``` 1 import urllib.error 2 import urllib.request 3 from typing import Union 4 5 from .general import * 6 from .output import log 7 8 def sort_mirrorlist(raw_data :bytes, sort_order=["https", "http"]) -> bytes: 9 """ 10 This function can sort /etc/pacman.d/mirrorlist according to the 11 mirror's URL prefix. By default places HTTPS before HTTP but it also 12 preserves the country/rank-order. 13 14 This assumes /etc/pacman.d/mirrorlist looks like the following: 15 16 ## Comment 17 Server = url 18 19 or 20 21 ## Comment 22 #Server = url 23 24 But the Comments need to start with double-hashmarks to be distringuished 25 from server url definitions (commented or uncommented). 26 """ 27 comments_and_whitespaces = b"" 28 29 categories = {key: [] for key in sort_order+["Unknown"]} 30 for line in raw_data.split(b"\n"): 31 if line[0:2] in (b'##', b''): 32 comments_and_whitespaces += line + b'\n' 33 elif line[:6].lower() == b'server' or line[:7].lower() == b'#server': 34 opening, url = line.split(b'=', 1) 35 opening, url = opening.strip(), url.strip() 36 if (category := url.split(b'://',1)[0].decode('UTF-8')) in categories: 37 categories[category].append(comments_and_whitespaces) 38 categories[category].append(opening+b' = '+url+b'\n') 39 else: 40 categories["Unknown"].append(comments_and_whitespaces) 41 categories["Unknown"].append(opening+b' = '+url+b'\n') 42 43 comments_and_whitespaces = b"" 44 45 46 new_raw_data = b'' 47 for category in sort_order+["Unknown"]: 48 for line in categories[category]: 49 new_raw_data += line 50 51 return new_raw_data 52 53 54 def filter_mirrors_by_region(regions, destination='/etc/pacman.d/mirrorlist', sort_order=["https", "http"], *args, **kwargs) -> Union[bool, bytes]: 55 """ 56 This function will change the active mirrors on the live medium by 57 filtering which regions are active based on `regions`. 58 59 :param regions: A series of country codes separated by `,`. For instance `SE,US` for sweden and United States. 60 :type regions: str 61 """ 62 region_list = [f'country={region}' for region in regions.split(',')] 63 response = urllib.request.urlopen(urllib.request.Request(f"https://archlinux.org/mirrorlist/?{'&'.join(region_list)}&protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on'", headers={'User-Agent': 'ArchInstall'})) 64 new_list = response.read().replace(b"#Server", b"Server") 65 66 if sort_order: 67 new_list = sort_mirrorlist(new_list, sort_order=sort_order) 68 69 if destination: 70 with open(destination, "wb") as mirrorlist: 71 mirrorlist.write(new_list) 72 73 return True 74 else: 75 return new_list.decode('UTF-8') 76 77 78 def add_custom_mirrors(mirrors: list, *args, **kwargs): 79 """ 80 This will append custom mirror definitions in pacman.conf 81 82 :param mirrors: A list of mirror data according to: `{'url': 'http://url.com', 'signcheck': 'Optional', 'signoptions': 'TrustAll', 'name': 'testmirror'}` 83 :type mirrors: dict 84 """ 85 with open('/etc/pacman.conf', 'a') as pacman: 86 for mirror in mirrors: 87 pacman.write(f"[{mirror['name']}]\n") 88 pacman.write(f"SigLevel = {mirror['signcheck']} {mirror['signoptions']}\n") 89 pacman.write(f"Server = {mirror['url']}\n") 90 91 return True 92 93 94 def insert_mirrors(mirrors, *args, **kwargs): 95 """ 96 This function will insert a given mirror-list at the top of `/etc/pacman.d/mirrorlist`. 97 It will not flush any other mirrors, just insert new ones. 98 99 :param mirrors: A dictionary of `{'url' : 'country', 'url2' : 'country'}` 100 :type mirrors: dict 101 """ 102 original_mirrorlist = '' 103 with open('/etc/pacman.d/mirrorlist', 'r') as original: 104 original_mirrorlist = original.read() 105 106 with open('/etc/pacman.d/mirrorlist', 'w') as new_mirrorlist: 107 for mirror, country in mirrors.items(): 108 new_mirrorlist.write(f'## {country}\n') 109 new_mirrorlist.write(f'Server = {mirror}\n') 110 new_mirrorlist.write('\n') 111 new_mirrorlist.write(original_mirrorlist) 112 113 return True 114 115 116 def use_mirrors(regions: dict, destination='/etc/pacman.d/mirrorlist'): 117 log(f'A new package mirror-list has been created: {destination}', level=logging.INFO) 118 for region, mirrors in regions.items(): 119 with open(destination, 'w') as mirrorlist: 120 for mirror in mirrors: 121 mirrorlist.write(f'## {region}\n') 122 mirrorlist.write(f'Server = {mirror}\n') 123 return True 124 125 126 def re_rank_mirrors(top=10, *positionals, **kwargs): 127 if SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0: 128 return True 129 return False 130 131 132 def list_mirrors(sort_order=["https", "http"]): 133 url = "https://archlinux.org/mirrorlist/?protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on" 134 regions = {} 135 136 try: 137 response = urllib.request.urlopen(url) 138 except urllib.error.URLError as err: 139 log(f'Could not fetch an active mirror-list: {err}', level=logging.WARNING, fg="yellow") 140 return regions 141 142 mirrorlist = response.read() 143 if sort_order: 144 mirrorlist = sort_mirrorlist(mirrorlist, sort_order=sort_order) 145 146 region = 'Unknown region' 147 for line in mirrorlist.split(b'\n'): 148 if len(line.strip()) == 0: 149 continue 150 151 line = line.decode('UTF-8').strip('\n').strip('\r') 152 if line[:3] == '## ': 153 region = line[3:] 154 elif line[:10] == '#Server = ': 155 regions.setdefault(region, {}) 156 157 url = line.lstrip('#Server = ') 158 regions[region][url] = True 159 160 return regions 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/mirrors.py b/archinstall/lib/mirrors.py --- a/archinstall/lib/mirrors.py +++ b/archinstall/lib/mirrors.py @@ -123,10 +123,17 @@ return True -def re_rank_mirrors(top=10, *positionals, **kwargs): - if SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0: - return True - return False +def re_rank_mirrors( + top: int = 10, + src: str = '/etc/pacman.d/mirrorlist', + dst: str = '/etc/pacman.d/mirrorlist', +) -> bool: + cmd = SysCommand(f"/usr/bin/rankmirrors -n {top} {src}") + if cmd.exit_code != 0: + return False + with open(dst, 'w') as f: + f.write(str(cmd)) + return True def list_mirrors(sort_order=["https", "http"]):
{"golden_diff": "diff --git a/archinstall/lib/mirrors.py b/archinstall/lib/mirrors.py\n--- a/archinstall/lib/mirrors.py\n+++ b/archinstall/lib/mirrors.py\n@@ -123,10 +123,17 @@\n \treturn True\n \n \n-def re_rank_mirrors(top=10, *positionals, **kwargs):\n-\tif SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:\n-\t\treturn True\n-\treturn False\n+def re_rank_mirrors(\n+\ttop: int = 10,\n+\tsrc: str = '/etc/pacman.d/mirrorlist',\n+\tdst: str = '/etc/pacman.d/mirrorlist',\n+) -> bool:\n+\tcmd = SysCommand(f\"/usr/bin/rankmirrors -n {top} {src}\")\n+\tif cmd.exit_code != 0:\n+\t\treturn False\n+\twith open(dst, 'w') as f:\n+\t\tf.write(str(cmd))\n+\treturn True\n \n \n def list_mirrors(sort_order=[\"https\", \"http\"]):\n", "issue": "re_rank_mirrors bug\n`re_rank_mirrors` from `archinstall/lib/mirrors.py` does not work. Since `SyncCommand` doesn't run command from shell, stdout redirection (`foo > bar`) doesn't work. \r\n\r\nAlso, using source file as destination file does not seem to work. File becomes empty.\n", "before_files": [{"content": "import urllib.error\nimport urllib.request\nfrom typing import Union\n\nfrom .general import *\nfrom .output import log\n\ndef sort_mirrorlist(raw_data :bytes, sort_order=[\"https\", \"http\"]) -> bytes:\n\t\"\"\"\n\tThis function can sort /etc/pacman.d/mirrorlist according to the\n\tmirror's URL prefix. By default places HTTPS before HTTP but it also\n\tpreserves the country/rank-order.\n\n\tThis assumes /etc/pacman.d/mirrorlist looks like the following:\n\n\t## Comment\n\tServer = url\n\n\tor\n\n\t## Comment\n\t#Server = url\n\n\tBut the Comments need to start with double-hashmarks to be distringuished\n\tfrom server url definitions (commented or uncommented).\n\t\"\"\"\n\tcomments_and_whitespaces = b\"\"\n\n\tcategories = {key: [] for key in sort_order+[\"Unknown\"]}\n\tfor line in raw_data.split(b\"\\n\"):\n\t\tif line[0:2] in (b'##', b''):\n\t\t\tcomments_and_whitespaces += line + b'\\n'\n\t\telif line[:6].lower() == b'server' or line[:7].lower() == b'#server':\n\t\t\topening, url = line.split(b'=', 1)\n\t\t\topening, url = opening.strip(), url.strip()\n\t\t\tif (category := url.split(b'://',1)[0].decode('UTF-8')) in categories:\n\t\t\t\tcategories[category].append(comments_and_whitespaces)\n\t\t\t\tcategories[category].append(opening+b' = '+url+b'\\n')\n\t\t\telse:\n\t\t\t\tcategories[\"Unknown\"].append(comments_and_whitespaces)\n\t\t\t\tcategories[\"Unknown\"].append(opening+b' = '+url+b'\\n')\n\n\t\t\tcomments_and_whitespaces = b\"\"\n\n\n\tnew_raw_data = b''\n\tfor category in sort_order+[\"Unknown\"]:\n\t\tfor line in categories[category]:\n\t\t\tnew_raw_data += line\n\n\treturn new_raw_data\n\n\ndef filter_mirrors_by_region(regions, destination='/etc/pacman.d/mirrorlist', sort_order=[\"https\", \"http\"], *args, **kwargs) -> Union[bool, bytes]:\n\t\"\"\"\n\tThis function will change the active mirrors on the live medium by\n\tfiltering which regions are active based on `regions`.\n\n\t:param regions: A series of country codes separated by `,`. For instance `SE,US` for sweden and United States.\n\t:type regions: str\n\t\"\"\"\n\tregion_list = [f'country={region}' for region in regions.split(',')]\n\tresponse = urllib.request.urlopen(urllib.request.Request(f\"https://archlinux.org/mirrorlist/?{'&'.join(region_list)}&protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on'\", headers={'User-Agent': 'ArchInstall'}))\n\tnew_list = response.read().replace(b\"#Server\", b\"Server\")\n\n\tif sort_order:\n\t\tnew_list = sort_mirrorlist(new_list, sort_order=sort_order)\n\n\tif destination:\n\t\twith open(destination, \"wb\") as mirrorlist:\n\t\t\tmirrorlist.write(new_list)\n\n\t\treturn True\n\telse:\n\t\treturn new_list.decode('UTF-8')\n\n\ndef add_custom_mirrors(mirrors: list, *args, **kwargs):\n\t\"\"\"\n\tThis will append custom mirror definitions in pacman.conf\n\n\t:param mirrors: A list of mirror data according to: `{'url': 'http://url.com', 'signcheck': 'Optional', 'signoptions': 'TrustAll', 'name': 'testmirror'}`\n\t:type mirrors: dict\n\t\"\"\"\n\twith open('/etc/pacman.conf', 'a') as pacman:\n\t\tfor mirror in mirrors:\n\t\t\tpacman.write(f\"[{mirror['name']}]\\n\")\n\t\t\tpacman.write(f\"SigLevel = {mirror['signcheck']} {mirror['signoptions']}\\n\")\n\t\t\tpacman.write(f\"Server = {mirror['url']}\\n\")\n\n\treturn True\n\n\ndef insert_mirrors(mirrors, *args, **kwargs):\n\t\"\"\"\n\tThis function will insert a given mirror-list at the top of `/etc/pacman.d/mirrorlist`.\n\tIt will not flush any other mirrors, just insert new ones.\n\n\t:param mirrors: A dictionary of `{'url' : 'country', 'url2' : 'country'}`\n\t:type mirrors: dict\n\t\"\"\"\n\toriginal_mirrorlist = ''\n\twith open('/etc/pacman.d/mirrorlist', 'r') as original:\n\t\toriginal_mirrorlist = original.read()\n\n\twith open('/etc/pacman.d/mirrorlist', 'w') as new_mirrorlist:\n\t\tfor mirror, country in mirrors.items():\n\t\t\tnew_mirrorlist.write(f'## {country}\\n')\n\t\t\tnew_mirrorlist.write(f'Server = {mirror}\\n')\n\t\tnew_mirrorlist.write('\\n')\n\t\tnew_mirrorlist.write(original_mirrorlist)\n\n\treturn True\n\n\ndef use_mirrors(regions: dict, destination='/etc/pacman.d/mirrorlist'):\n\tlog(f'A new package mirror-list has been created: {destination}', level=logging.INFO)\n\tfor region, mirrors in regions.items():\n\t\twith open(destination, 'w') as mirrorlist:\n\t\t\tfor mirror in mirrors:\n\t\t\t\tmirrorlist.write(f'## {region}\\n')\n\t\t\t\tmirrorlist.write(f'Server = {mirror}\\n')\n\treturn True\n\n\ndef re_rank_mirrors(top=10, *positionals, **kwargs):\n\tif SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:\n\t\treturn True\n\treturn False\n\n\ndef list_mirrors(sort_order=[\"https\", \"http\"]):\n\turl = \"https://archlinux.org/mirrorlist/?protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on\"\n\tregions = {}\n\n\ttry:\n\t\tresponse = urllib.request.urlopen(url)\n\texcept urllib.error.URLError as err:\n\t\tlog(f'Could not fetch an active mirror-list: {err}', level=logging.WARNING, fg=\"yellow\")\n\t\treturn regions\n\n\tmirrorlist = response.read()\n\tif sort_order:\n\t\tmirrorlist = sort_mirrorlist(mirrorlist, sort_order=sort_order)\n\n\tregion = 'Unknown region'\n\tfor line in mirrorlist.split(b'\\n'):\n\t\tif len(line.strip()) == 0:\n\t\t\tcontinue\n\n\t\tline = line.decode('UTF-8').strip('\\n').strip('\\r')\n\t\tif line[:3] == '## ':\n\t\t\tregion = line[3:]\n\t\telif line[:10] == '#Server = ':\n\t\t\tregions.setdefault(region, {})\n\n\t\t\turl = line.lstrip('#Server = ')\n\t\t\tregions[region][url] = True\n\n\treturn regions\n", "path": "archinstall/lib/mirrors.py"}], "after_files": [{"content": "import urllib.error\nimport urllib.request\nfrom typing import Union\n\nfrom .general import *\nfrom .output import log\n\ndef sort_mirrorlist(raw_data :bytes, sort_order=[\"https\", \"http\"]) -> bytes:\n\t\"\"\"\n\tThis function can sort /etc/pacman.d/mirrorlist according to the\n\tmirror's URL prefix. By default places HTTPS before HTTP but it also\n\tpreserves the country/rank-order.\n\n\tThis assumes /etc/pacman.d/mirrorlist looks like the following:\n\n\t## Comment\n\tServer = url\n\n\tor\n\n\t## Comment\n\t#Server = url\n\n\tBut the Comments need to start with double-hashmarks to be distringuished\n\tfrom server url definitions (commented or uncommented).\n\t\"\"\"\n\tcomments_and_whitespaces = b\"\"\n\n\tcategories = {key: [] for key in sort_order+[\"Unknown\"]}\n\tfor line in raw_data.split(b\"\\n\"):\n\t\tif line[0:2] in (b'##', b''):\n\t\t\tcomments_and_whitespaces += line + b'\\n'\n\t\telif line[:6].lower() == b'server' or line[:7].lower() == b'#server':\n\t\t\topening, url = line.split(b'=', 1)\n\t\t\topening, url = opening.strip(), url.strip()\n\t\t\tif (category := url.split(b'://',1)[0].decode('UTF-8')) in categories:\n\t\t\t\tcategories[category].append(comments_and_whitespaces)\n\t\t\t\tcategories[category].append(opening+b' = '+url+b'\\n')\n\t\t\telse:\n\t\t\t\tcategories[\"Unknown\"].append(comments_and_whitespaces)\n\t\t\t\tcategories[\"Unknown\"].append(opening+b' = '+url+b'\\n')\n\n\t\t\tcomments_and_whitespaces = b\"\"\n\n\n\tnew_raw_data = b''\n\tfor category in sort_order+[\"Unknown\"]:\n\t\tfor line in categories[category]:\n\t\t\tnew_raw_data += line\n\n\treturn new_raw_data\n\n\ndef filter_mirrors_by_region(regions, destination='/etc/pacman.d/mirrorlist', sort_order=[\"https\", \"http\"], *args, **kwargs) -> Union[bool, bytes]:\n\t\"\"\"\n\tThis function will change the active mirrors on the live medium by\n\tfiltering which regions are active based on `regions`.\n\n\t:param regions: A series of country codes separated by `,`. For instance `SE,US` for sweden and United States.\n\t:type regions: str\n\t\"\"\"\n\tregion_list = [f'country={region}' for region in regions.split(',')]\n\tresponse = urllib.request.urlopen(urllib.request.Request(f\"https://archlinux.org/mirrorlist/?{'&'.join(region_list)}&protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on'\", headers={'User-Agent': 'ArchInstall'}))\n\tnew_list = response.read().replace(b\"#Server\", b\"Server\")\n\n\tif sort_order:\n\t\tnew_list = sort_mirrorlist(new_list, sort_order=sort_order)\n\n\tif destination:\n\t\twith open(destination, \"wb\") as mirrorlist:\n\t\t\tmirrorlist.write(new_list)\n\n\t\treturn True\n\telse:\n\t\treturn new_list.decode('UTF-8')\n\n\ndef add_custom_mirrors(mirrors: list, *args, **kwargs):\n\t\"\"\"\n\tThis will append custom mirror definitions in pacman.conf\n\n\t:param mirrors: A list of mirror data according to: `{'url': 'http://url.com', 'signcheck': 'Optional', 'signoptions': 'TrustAll', 'name': 'testmirror'}`\n\t:type mirrors: dict\n\t\"\"\"\n\twith open('/etc/pacman.conf', 'a') as pacman:\n\t\tfor mirror in mirrors:\n\t\t\tpacman.write(f\"[{mirror['name']}]\\n\")\n\t\t\tpacman.write(f\"SigLevel = {mirror['signcheck']} {mirror['signoptions']}\\n\")\n\t\t\tpacman.write(f\"Server = {mirror['url']}\\n\")\n\n\treturn True\n\n\ndef insert_mirrors(mirrors, *args, **kwargs):\n\t\"\"\"\n\tThis function will insert a given mirror-list at the top of `/etc/pacman.d/mirrorlist`.\n\tIt will not flush any other mirrors, just insert new ones.\n\n\t:param mirrors: A dictionary of `{'url' : 'country', 'url2' : 'country'}`\n\t:type mirrors: dict\n\t\"\"\"\n\toriginal_mirrorlist = ''\n\twith open('/etc/pacman.d/mirrorlist', 'r') as original:\n\t\toriginal_mirrorlist = original.read()\n\n\twith open('/etc/pacman.d/mirrorlist', 'w') as new_mirrorlist:\n\t\tfor mirror, country in mirrors.items():\n\t\t\tnew_mirrorlist.write(f'## {country}\\n')\n\t\t\tnew_mirrorlist.write(f'Server = {mirror}\\n')\n\t\tnew_mirrorlist.write('\\n')\n\t\tnew_mirrorlist.write(original_mirrorlist)\n\n\treturn True\n\n\ndef use_mirrors(regions: dict, destination='/etc/pacman.d/mirrorlist'):\n\tlog(f'A new package mirror-list has been created: {destination}', level=logging.INFO)\n\tfor region, mirrors in regions.items():\n\t\twith open(destination, 'w') as mirrorlist:\n\t\t\tfor mirror in mirrors:\n\t\t\t\tmirrorlist.write(f'## {region}\\n')\n\t\t\t\tmirrorlist.write(f'Server = {mirror}\\n')\n\treturn True\n\n\ndef re_rank_mirrors(\n\ttop: int = 10,\n\tsrc: str = '/etc/pacman.d/mirrorlist',\n\tdst: str = '/etc/pacman.d/mirrorlist',\n) -> bool:\n\tcmd = SysCommand(f\"/usr/bin/rankmirrors -n {top} {src}\")\n\tif cmd.exit_code != 0:\n\t\treturn False\n\twith open(dst, 'w') as f:\n\t\tf.write(str(cmd))\n\treturn True\n\n\ndef list_mirrors(sort_order=[\"https\", \"http\"]):\n\turl = \"https://archlinux.org/mirrorlist/?protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on\"\n\tregions = {}\n\n\ttry:\n\t\tresponse = urllib.request.urlopen(url)\n\texcept urllib.error.URLError as err:\n\t\tlog(f'Could not fetch an active mirror-list: {err}', level=logging.WARNING, fg=\"yellow\")\n\t\treturn regions\n\n\tmirrorlist = response.read()\n\tif sort_order:\n\t\tmirrorlist = sort_mirrorlist(mirrorlist, sort_order=sort_order)\n\n\tregion = 'Unknown region'\n\tfor line in mirrorlist.split(b'\\n'):\n\t\tif len(line.strip()) == 0:\n\t\t\tcontinue\n\n\t\tline = line.decode('UTF-8').strip('\\n').strip('\\r')\n\t\tif line[:3] == '## ':\n\t\t\tregion = line[3:]\n\t\telif line[:10] == '#Server = ':\n\t\t\tregions.setdefault(region, {})\n\n\t\t\turl = line.lstrip('#Server = ')\n\t\t\tregions[region][url] = True\n\n\treturn regions\n", "path": "archinstall/lib/mirrors.py"}]}
2,211
250
gh_patches_debug_3331
rasdani/github-patches
git_diff
fidals__shopelectro-885
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stale import db issue PO says the last import was at `<yml_catalog date="2019-05-23 00:38">` Check if import db can autolaunch. Import db is called as catalog update command --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/management/commands/_update_catalog/update_pack.py` Content: ``` 1 """ 2 Update Product.in_pack and prices. 3 4 The update_catalog command always resets product prices to per unit format, so: 5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack 6 2. Multiply product prices by in_pack value and save. 7 """ 8 import logging 9 10 from django.db import models, transaction 11 12 from shopelectro.models import TagQuerySet, TagGroup 13 14 logger = logging.getLogger(__name__) 15 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large'] 16 17 18 def update_in_packs(packs: TagQuerySet): 19 """Parse and save in pack quantity values.""" 20 # @todo #859:60m Implement update_pack and render prices properly. 21 22 23 def update_prices(packs: TagQuerySet): 24 """Multiply product prices on in pack quantity.""" 25 fields_to_update = {} 26 for price in PRICES: 27 fields_to_update[price] = models.F(price) * models.F('in_pack') 28 29 with transaction.atomic(): 30 packs.products().update(**fields_to_update) 31 32 33 def main(*args, kwargs): 34 uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118' 35 pack_group = TagGroup.objects.filter(uuid=uuid).first() 36 if not pack_group: 37 logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".') 38 return 39 40 return 41 42 packs = pack_group.tags.all().prefetch_related('products') 43 update_in_packs(packs) 44 update_prices(packs) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py --- a/shopelectro/management/commands/_update_catalog/update_pack.py +++ b/shopelectro/management/commands/_update_catalog/update_pack.py @@ -30,7 +30,7 @@ packs.products().update(**fields_to_update) -def main(*args, kwargs): +def main(*args, **kwargs): uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118' pack_group = TagGroup.objects.filter(uuid=uuid).first() if not pack_group:
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -30,7 +30,7 @@\n packs.products().update(**fields_to_update)\n \n \n-def main(*args, kwargs):\n+def main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n", "issue": "Stale import db issue\nPO says the last import was at `<yml_catalog date=\"2019-05-23 00:38\">`\r\nCheck if import db can autolaunch.\r\n\r\nImport db is called as catalog update command\r\n\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}], "after_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]}
762
164
gh_patches_debug_4833
rasdani/github-patches
git_diff
Theano__Theano-146
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Get rid of warning when using Scipy 0.10.x WARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `theano/sparse/__init__.py` Content: ``` 1 import sys 2 try: 3 import scipy 4 enable_sparse = scipy.__version__ >= '0.7' 5 if not enable_sparse: 6 sys.stderr.write("WARNING: scipy version = %s." 7 " We request version >=0.7.0 for the sparse code as it has" 8 " bugs fixed in the sparse matrix code.\n" % scipy.__version__) 9 except ImportError: 10 enable_sparse = False 11 sys.stderr.write("WARNING: scipy can't be imported." 12 " We disable the sparse matrix code.") 13 14 if enable_sparse: 15 from basic import * 16 import sharedvar 17 from sharedvar import sparse_constructor as shared 18 19 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py --- a/theano/sparse/__init__.py +++ b/theano/sparse/__init__.py @@ -1,7 +1,10 @@ +from pkg_resources import parse_version as V import sys + try: import scipy - enable_sparse = scipy.__version__ >= '0.7' + enable_sparse = V(scipy.__version__) >= V('0.7') + if not enable_sparse: sys.stderr.write("WARNING: scipy version = %s." " We request version >=0.7.0 for the sparse code as it has"
{"golden_diff": "diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py\n--- a/theano/sparse/__init__.py\n+++ b/theano/sparse/__init__.py\n@@ -1,7 +1,10 @@\n+from pkg_resources import parse_version as V\n import sys\n+\n try:\n import scipy\n- enable_sparse = scipy.__version__ >= '0.7'\n+ enable_sparse = V(scipy.__version__) >= V('0.7')\n+\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n", "issue": "Get rid of warning when using Scipy 0.10.x\nWARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.\n\n", "before_files": [{"content": "import sys\ntry:\n import scipy\n enable_sparse = scipy.__version__ >= '0.7'\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}], "after_files": [{"content": "from pkg_resources import parse_version as V\nimport sys\n\ntry:\n import scipy\n enable_sparse = V(scipy.__version__) >= V('0.7')\n\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}]}
480
147
gh_patches_debug_16010
rasdani/github-patches
git_diff
boto__botocore-15
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Using botocore without config files, env vars or IAM roles It's desirable to be able to use botocore entirely standalone - no reliance on configuration files, environment variables or IAM roles. Currently, it's necessary to do something hacky like this: ``` session = botocore.session.get_session() service = session.get_service('ec2') # HACK manually set the botocore credentials object session._credentials = botocore.credentials.Credentials( access_key=__opts__['AWS.id'], secret_key=__opts__['AWS.key'], ) endpoint = service.get_endpoint(region) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `botocore/session.py` Content: ``` 1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ 2 # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 # copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, dis- 8 # tribute, sublicense, and/or sell copies of the Software, and to permit 9 # persons to whom the Software is furnished to do so, subject to the fol- 10 # lowing conditions: 11 # 12 # The above copyright notice and this permission notice shall be included 13 # in all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 # IN THE SOFTWARE. 22 # 23 """ 24 This module contains the main interface to the botocore package, the 25 Session object. 26 """ 27 28 import logging 29 import platform 30 import os 31 import copy 32 import botocore.config 33 import botocore.credentials 34 import botocore.base 35 import botocore.service 36 from . import __version__ 37 38 39 EnvironmentVariables = { 40 'profile': 'BOTO_DEFAULT_PROFILE', 41 'region': 'BOTO_DEFAULT_REGION', 42 'data_path': 'BOTO_DATA_PATH', 43 'config_file': 'AWS_CONFIG_FILE', 44 'access_key': 'AWS_ACCESS_KEY_ID', 45 'secret_key': 'AWS_SECRET_ACCESS_KEY' 46 } 47 """ 48 A dictionary mapping logical names to specific environment variable 49 names. When creating a new Session object, you can pass in your own 50 dictionary to remap the logical names to the environment variable names 51 you want to use for your application. The logical variable names are: 52 53 * profile - Default profile name you want to use. 54 * region - Default region name to use, if not otherwise specified. 55 * data_path - Additional directories to search for data files. 56 * config_file - Location of a Boto config file. 57 * access_key - The AWS access key part of your credentials. 58 * secret_key - The AWS secret key part of your credentials. 59 """ 60 61 62 class Session(object): 63 """ 64 The Session object collects together useful functionality 65 from `botocore` as well as important data such as configuration 66 information and credentials into a single, easy-to-use object. 67 68 :ivar available_profiles: A list of profiles defined in the config 69 file associated with this session. 70 :ivar profile: The current profile. 71 """ 72 73 FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' 74 75 def __init__(self, env_vars=None): 76 """ 77 Create a new Session object. 78 79 :type env_vars: dict 80 :param env_vars: A dictionary that is used to override some or all 81 of the environment variables associated with this session. The 82 key/value pairs defined in this dictionary will override the 83 corresponding variables defined in `EnvironmentVariables`. 84 """ 85 self.env_vars = copy.copy(EnvironmentVariables) 86 if env_vars: 87 self.env_vars.update(env_vars) 88 self.user_agent_name = 'Boto' 89 self.user_agent_version = __version__ 90 self._profile = os.environ.get(self.env_vars['profile'], 'default') 91 self._config = None 92 self._credentials = None 93 94 @property 95 def available_profiles(self): 96 profiles = [] 97 self.get_config() 98 for key in self._config.keys(): 99 if not key.startswith('_'): 100 profiles.append(key) 101 return profiles 102 103 @property 104 def profile(self): 105 return self._profile 106 107 @profile.setter 108 def profile(self, profile): 109 if profile != self._profile: 110 self._credentials = None 111 self._profile = profile 112 113 def get_envvar(self, logical_name): 114 """ 115 Retrieve the value associated with the specified logical_name 116 from the environment. 117 118 :type logical_name: str 119 :param logical_name: The logical name of the environment variable 120 you want to retrieve. This name will be mapped to the 121 appropriate environment variable name for this session. 122 123 * profile - Default profile name you want to use. 124 * region - Default region name to use, if not otherwise specified. 125 * data_path - Additional directories to search for data files. 126 * config_file - Location of a Boto config file. 127 * access_key - The AWS access key part of your credentials. 128 * secret_key - The AWS secret key part of your credentials. 129 130 :returns: str value of variable of None if not defined. 131 """ 132 value = None 133 if logical_name in self.env_vars: 134 value = os.environ.get(self.env_vars[logical_name], None) 135 return value 136 137 def get_config(self): 138 """ 139 Returns the configuration associated with this session. If 140 the configuration has not yet been loaded, it will be loaded 141 using the current `profile` attribute value. If it has already been 142 loaded, the cached configuration will be returned. 143 144 :raises: ConfigNotFound, ConfigParseError 145 """ 146 if self._config is None: 147 self._config = botocore.config.get_config(self) 148 return self._config.get(self._profile, None) 149 150 def get_credentials(self, metadata=None): 151 """ 152 Return the :class:`botocore.credential.Credential` object 153 associated with this session. If the credentials have not 154 yet been loaded, this will attempt to load them. If they 155 have already been loaded, this will return the cached 156 credentials. 157 158 :type metadata: dict 159 :param metadata: This parameter allows you to pass in 160 EC2 instance metadata containing IAM Role credentials. 161 This metadata will be used rather than retrieving the 162 metadata from the metadata service. This is mainly used 163 for unit testing. 164 """ 165 if self._credentials is None: 166 cfg = self.get_config() 167 self._credentials = botocore.credentials.get_credentials(cfg, 168 metadata) 169 return self._credentials 170 171 def user_agent(self): 172 """ 173 Return a string suitable for use as a User-Agent header. 174 The string will be of the form: 175 176 <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver> 177 178 Where: 179 180 - agent_name is the value of the `user_agent_name` attribute 181 of the session object (`Boto` by default). 182 - agent_version is the value of the `user_agent_version` 183 attribute of the session object (the botocore version by default). 184 by default. 185 - py_ver is the version of the Python interpreter beng used. 186 - plat_name is the name of the platform (e.g. Darwin) 187 - plat_ver is the version of the platform 188 189 """ 190 return '%s/%s Python/%s %s/%s' % (self.user_agent_name, 191 self.user_agent_version, 192 platform.python_version(), 193 platform.system(), 194 platform.release()) 195 196 def get_data(self, data_path): 197 """ 198 Retrieve the data associated with `data_path`. 199 200 :type data_path: str 201 :param data_path: The path to the data you wish to retrieve. 202 """ 203 return botocore.base.get_data(self, data_path) 204 205 def get_service_data(self, service_name, provider_name='aws'): 206 """ 207 Retrieve the fully merged data associated with a service. 208 """ 209 return botocore.base.get_service_data(self, service_name, 210 provider_name) 211 212 def get_service(self, service_name, provider_name='aws'): 213 """ 214 Get information about a service. 215 216 :type service_name: str 217 :param service_name: The name of the service (e.g. 'ec2') 218 219 :type provider_name: str 220 :param provider_name: The name of the provider. Defaults 221 to 'aws'. 222 223 :returns: :class:`botocore.service.Service` 224 """ 225 return botocore.service.get_service(self, service_name, provider_name) 226 227 def set_debug_logger(self): 228 """ 229 Convenience function to quickly configure full debug output 230 to go to the console. 231 """ 232 log = logging.getLogger('botocore') 233 log.setLevel(logging.DEBUG) 234 235 # create console handler and set level to debug 236 ch = logging.StreamHandler() 237 ch.setLevel(logging.DEBUG) 238 239 # create formatter 240 formatter = logging.Formatter(self.FmtString) 241 242 # add formatter to ch 243 ch.setFormatter(formatter) 244 245 # add ch to logger 246 log.addHandler(ch) 247 248 def set_file_logger(self, log_level, path): 249 """ 250 Convenience function to quickly configure any level of logging 251 to a file. 252 253 :type log_level: int 254 :param log_level: A log level as specified in the `logging` module 255 256 :type path: string 257 :param path: Path to the log file. The file will be created 258 if it doesn't already exist. 259 """ 260 log = logging.getLogger('botocore') 261 log.setLevel(log_level) 262 263 # create console handler and set level to debug 264 ch = logging.FileHandler(path) 265 ch.setLevel(log_level) 266 267 # create formatter 268 formatter = logging.Formatter(self.FmtString) 269 270 # add formatter to ch 271 ch.setFormatter(formatter) 272 273 # add ch to logger 274 log.addHandler(ch) 275 276 277 def get_session(env_vars=None): 278 """ 279 Return a new session object. 280 """ 281 return Session(env_vars) 282 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/botocore/session.py b/botocore/session.py --- a/botocore/session.py +++ b/botocore/session.py @@ -147,6 +147,20 @@ self._config = botocore.config.get_config(self) return self._config.get(self._profile, None) + def set_credentials(self, access_key, secret_key): + """ + Create the :class:`botocore.credential.Credential` object + associated with this session using the supplied AccessKey and SecretKey + + :type access_key: str + :param access_key: The access key part of the credentials. + + :type secret_key: str + :param secret_key: The secret key part of the credentials. + """ + self._credentials = botocore.credentials.Credentials(access_key, + secret_key) + def get_credentials(self, metadata=None): """ Return the :class:`botocore.credential.Credential` object
{"golden_diff": "diff --git a/botocore/session.py b/botocore/session.py\n--- a/botocore/session.py\n+++ b/botocore/session.py\n@@ -147,6 +147,20 @@\n self._config = botocore.config.get_config(self)\n return self._config.get(self._profile, None)\n \n+ def set_credentials(self, access_key, secret_key):\n+ \"\"\"\n+ Create the :class:`botocore.credential.Credential` object\n+ associated with this session using the supplied AccessKey and SecretKey\n+\n+ :type access_key: str\n+ :param access_key: The access key part of the credentials.\n+\n+ :type secret_key: str\n+ :param secret_key: The secret key part of the credentials.\n+ \"\"\"\n+ self._credentials = botocore.credentials.Credentials(access_key,\n+ secret_key)\n+\n def get_credentials(self, metadata=None):\n \"\"\"\n Return the :class:`botocore.credential.Credential` object\n", "issue": "Using botocore without config files, env vars or IAM roles\nIt's desirable to be able to use botocore entirely standalone - no reliance on configuration files, environment variables or IAM roles.\n\nCurrently, it's necessary to do something hacky like this:\n\n```\nsession = botocore.session.get_session()\nservice = session.get_service('ec2')\n\n# HACK manually set the botocore credentials object\nsession._credentials = botocore.credentials.Credentials(\n access_key=__opts__['AWS.id'],\n secret_key=__opts__['AWS.key'],\n )\n\nendpoint = service.get_endpoint(region)\n```\n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\"\"\"\nThis module contains the main interface to the botocore package, the\nSession object.\n\"\"\"\n\nimport logging\nimport platform\nimport os\nimport copy\nimport botocore.config\nimport botocore.credentials\nimport botocore.base\nimport botocore.service\nfrom . import __version__\n\n\nEnvironmentVariables = {\n 'profile': 'BOTO_DEFAULT_PROFILE',\n 'region': 'BOTO_DEFAULT_REGION',\n 'data_path': 'BOTO_DATA_PATH',\n 'config_file': 'AWS_CONFIG_FILE',\n 'access_key': 'AWS_ACCESS_KEY_ID',\n 'secret_key': 'AWS_SECRET_ACCESS_KEY'\n }\n\"\"\"\nA dictionary mapping logical names to specific environment variable\nnames. When creating a new Session object, you can pass in your own\ndictionary to remap the logical names to the environment variable names\nyou want to use for your application. The logical variable names are:\n\n* profile - Default profile name you want to use.\n* region - Default region name to use, if not otherwise specified.\n* data_path - Additional directories to search for data files.\n* config_file - Location of a Boto config file.\n* access_key - The AWS access key part of your credentials.\n* secret_key - The AWS secret key part of your credentials.\n\"\"\"\n\n\nclass Session(object):\n \"\"\"\n The Session object collects together useful functionality\n from `botocore` as well as important data such as configuration\n information and credentials into a single, easy-to-use object.\n\n :ivar available_profiles: A list of profiles defined in the config\n file associated with this session.\n :ivar profile: The current profile.\n \"\"\"\n\n FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n\n def __init__(self, env_vars=None):\n \"\"\"\n Create a new Session object.\n\n :type env_vars: dict\n :param env_vars: A dictionary that is used to override some or all\n of the environment variables associated with this session. The\n key/value pairs defined in this dictionary will override the\n corresponding variables defined in `EnvironmentVariables`.\n \"\"\"\n self.env_vars = copy.copy(EnvironmentVariables)\n if env_vars:\n self.env_vars.update(env_vars)\n self.user_agent_name = 'Boto'\n self.user_agent_version = __version__\n self._profile = os.environ.get(self.env_vars['profile'], 'default')\n self._config = None\n self._credentials = None\n\n @property\n def available_profiles(self):\n profiles = []\n self.get_config()\n for key in self._config.keys():\n if not key.startswith('_'):\n profiles.append(key)\n return profiles\n\n @property\n def profile(self):\n return self._profile\n\n @profile.setter\n def profile(self, profile):\n if profile != self._profile:\n self._credentials = None\n self._profile = profile\n\n def get_envvar(self, logical_name):\n \"\"\"\n Retrieve the value associated with the specified logical_name\n from the environment.\n\n :type logical_name: str\n :param logical_name: The logical name of the environment variable\n you want to retrieve. This name will be mapped to the\n appropriate environment variable name for this session.\n\n * profile - Default profile name you want to use.\n * region - Default region name to use, if not otherwise specified.\n * data_path - Additional directories to search for data files.\n * config_file - Location of a Boto config file.\n * access_key - The AWS access key part of your credentials.\n * secret_key - The AWS secret key part of your credentials.\n\n :returns: str value of variable of None if not defined.\n \"\"\"\n value = None\n if logical_name in self.env_vars:\n value = os.environ.get(self.env_vars[logical_name], None)\n return value\n\n def get_config(self):\n \"\"\"\n Returns the configuration associated with this session. If\n the configuration has not yet been loaded, it will be loaded\n using the current `profile` attribute value. If it has already been\n loaded, the cached configuration will be returned.\n\n :raises: ConfigNotFound, ConfigParseError\n \"\"\"\n if self._config is None:\n self._config = botocore.config.get_config(self)\n return self._config.get(self._profile, None)\n\n def get_credentials(self, metadata=None):\n \"\"\"\n Return the :class:`botocore.credential.Credential` object\n associated with this session. If the credentials have not\n yet been loaded, this will attempt to load them. If they\n have already been loaded, this will return the cached\n credentials.\n\n :type metadata: dict\n :param metadata: This parameter allows you to pass in\n EC2 instance metadata containing IAM Role credentials.\n This metadata will be used rather than retrieving the\n metadata from the metadata service. This is mainly used\n for unit testing.\n \"\"\"\n if self._credentials is None:\n cfg = self.get_config()\n self._credentials = botocore.credentials.get_credentials(cfg,\n metadata)\n return self._credentials\n\n def user_agent(self):\n \"\"\"\n Return a string suitable for use as a User-Agent header.\n The string will be of the form:\n\n <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>\n\n Where:\n\n - agent_name is the value of the `user_agent_name` attribute\n of the session object (`Boto` by default).\n - agent_version is the value of the `user_agent_version`\n attribute of the session object (the botocore version by default).\n by default.\n - py_ver is the version of the Python interpreter beng used.\n - plat_name is the name of the platform (e.g. Darwin)\n - plat_ver is the version of the platform\n\n \"\"\"\n return '%s/%s Python/%s %s/%s' % (self.user_agent_name,\n self.user_agent_version,\n platform.python_version(),\n platform.system(),\n platform.release())\n\n def get_data(self, data_path):\n \"\"\"\n Retrieve the data associated with `data_path`.\n\n :type data_path: str\n :param data_path: The path to the data you wish to retrieve.\n \"\"\"\n return botocore.base.get_data(self, data_path)\n\n def get_service_data(self, service_name, provider_name='aws'):\n \"\"\"\n Retrieve the fully merged data associated with a service.\n \"\"\"\n return botocore.base.get_service_data(self, service_name,\n provider_name)\n\n def get_service(self, service_name, provider_name='aws'):\n \"\"\"\n Get information about a service.\n\n :type service_name: str\n :param service_name: The name of the service (e.g. 'ec2')\n\n :type provider_name: str\n :param provider_name: The name of the provider. Defaults\n to 'aws'.\n\n :returns: :class:`botocore.service.Service`\n \"\"\"\n return botocore.service.get_service(self, service_name, provider_name)\n\n def set_debug_logger(self):\n \"\"\"\n Convenience function to quickly configure full debug output\n to go to the console.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n def set_file_logger(self, log_level, path):\n \"\"\"\n Convenience function to quickly configure any level of logging\n to a file.\n\n :type log_level: int\n :param log_level: A log level as specified in the `logging` module\n\n :type path: string\n :param path: Path to the log file. The file will be created\n if it doesn't already exist.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(log_level)\n\n # create console handler and set level to debug\n ch = logging.FileHandler(path)\n ch.setLevel(log_level)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n\ndef get_session(env_vars=None):\n \"\"\"\n Return a new session object.\n \"\"\"\n return Session(env_vars)\n", "path": "botocore/session.py"}], "after_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\"\"\"\nThis module contains the main interface to the botocore package, the\nSession object.\n\"\"\"\n\nimport logging\nimport platform\nimport os\nimport copy\nimport botocore.config\nimport botocore.credentials\nimport botocore.base\nimport botocore.service\nfrom . import __version__\n\n\nEnvironmentVariables = {\n 'profile': 'BOTO_DEFAULT_PROFILE',\n 'region': 'BOTO_DEFAULT_REGION',\n 'data_path': 'BOTO_DATA_PATH',\n 'config_file': 'AWS_CONFIG_FILE',\n 'access_key': 'AWS_ACCESS_KEY_ID',\n 'secret_key': 'AWS_SECRET_ACCESS_KEY'\n }\n\"\"\"\nA dictionary mapping logical names to specific environment variable\nnames. When creating a new Session object, you can pass in your own\ndictionary to remap the logical names to the environment variable names\nyou want to use for your application. The logical variable names are:\n\n* profile - Default profile name you want to use.\n* region - Default region name to use, if not otherwise specified.\n* data_path - Additional directories to search for data files.\n* config_file - Location of a Boto config file.\n* access_key - The AWS access key part of your credentials.\n* secret_key - The AWS secret key part of your credentials.\n\"\"\"\n\n\nclass Session(object):\n \"\"\"\n The Session object collects together useful functionality\n from `botocore` as well as important data such as configuration\n information and credentials into a single, easy-to-use object.\n\n :ivar available_profiles: A list of profiles defined in the config\n file associated with this session.\n :ivar profile: The current profile.\n \"\"\"\n\n FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n\n def __init__(self, env_vars=None):\n \"\"\"\n Create a new Session object.\n\n :type env_vars: dict\n :param env_vars: A dictionary that is used to override some or all\n of the environment variables associated with this session. The\n key/value pairs defined in this dictionary will override the\n corresponding variables defined in `EnvironmentVariables`.\n \"\"\"\n self.env_vars = copy.copy(EnvironmentVariables)\n if env_vars:\n self.env_vars.update(env_vars)\n self.user_agent_name = 'Boto'\n self.user_agent_version = __version__\n self._profile = os.environ.get(self.env_vars['profile'], 'default')\n self._config = None\n self._credentials = None\n\n @property\n def available_profiles(self):\n profiles = []\n self.get_config()\n for key in self._config.keys():\n if not key.startswith('_'):\n profiles.append(key)\n return profiles\n\n @property\n def profile(self):\n return self._profile\n\n @profile.setter\n def profile(self, profile):\n if profile != self._profile:\n self._credentials = None\n self._profile = profile\n\n def get_envvar(self, logical_name):\n \"\"\"\n Retrieve the value associated with the specified logical_name\n from the environment.\n\n :type logical_name: str\n :param logical_name: The logical name of the environment variable\n you want to retrieve. This name will be mapped to the\n appropriate environment variable name for this session.\n\n * profile - Default profile name you want to use.\n * region - Default region name to use, if not otherwise specified.\n * data_path - Additional directories to search for data files.\n * config_file - Location of a Boto config file.\n * access_key - The AWS access key part of your credentials.\n * secret_key - The AWS secret key part of your credentials.\n\n :returns: str value of variable of None if not defined.\n \"\"\"\n value = None\n if logical_name in self.env_vars:\n value = os.environ.get(self.env_vars[logical_name], None)\n return value\n\n def get_config(self):\n \"\"\"\n Returns the configuration associated with this session. If\n the configuration has not yet been loaded, it will be loaded\n using the current `profile` attribute value. If it has already been\n loaded, the cached configuration will be returned.\n\n :raises: ConfigNotFound, ConfigParseError\n \"\"\"\n if self._config is None:\n self._config = botocore.config.get_config(self)\n return self._config.get(self._profile, None)\n\n def set_credentials(self, access_key, secret_key):\n \"\"\"\n Create the :class:`botocore.credential.Credential` object\n associated with this session using the supplied AccessKey and SecretKey\n\n :type access_key: str\n :param access_key: The access key part of the credentials.\n\n :type secret_key: str\n :param secret_key: The secret key part of the credentials.\n \"\"\"\n self._credentials = botocore.credentials.Credentials(access_key,\n secret_key)\n\n def get_credentials(self, metadata=None):\n \"\"\"\n Return the :class:`botocore.credential.Credential` object\n associated with this session. If the credentials have not\n yet been loaded, this will attempt to load them. If they\n have already been loaded, this will return the cached\n credentials.\n\n :type metadata: dict\n :param metadata: This parameter allows you to pass in\n EC2 instance metadata containing IAM Role credentials.\n This metadata will be used rather than retrieving the\n metadata from the metadata service. This is mainly used\n for unit testing.\n \"\"\"\n if self._credentials is None:\n cfg = self.get_config()\n self._credentials = botocore.credentials.get_credentials(cfg,\n metadata)\n return self._credentials\n\n def user_agent(self):\n \"\"\"\n Return a string suitable for use as a User-Agent header.\n The string will be of the form:\n\n <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>\n\n Where:\n\n - agent_name is the value of the `user_agent_name` attribute\n of the session object (`Boto` by default).\n - agent_version is the value of the `user_agent_version`\n attribute of the session object (the botocore version by default).\n by default.\n - py_ver is the version of the Python interpreter beng used.\n - plat_name is the name of the platform (e.g. Darwin)\n - plat_ver is the version of the platform\n\n \"\"\"\n return '%s/%s Python/%s %s/%s' % (self.user_agent_name,\n self.user_agent_version,\n platform.python_version(),\n platform.system(),\n platform.release())\n\n def get_data(self, data_path):\n \"\"\"\n Retrieve the data associated with `data_path`.\n\n :type data_path: str\n :param data_path: The path to the data you wish to retrieve.\n \"\"\"\n return botocore.base.get_data(self, data_path)\n\n def get_service_data(self, service_name, provider_name='aws'):\n \"\"\"\n Retrieve the fully merged data associated with a service.\n \"\"\"\n return botocore.base.get_service_data(self, service_name,\n provider_name)\n\n def get_service(self, service_name, provider_name='aws'):\n \"\"\"\n Get information about a service.\n\n :type service_name: str\n :param service_name: The name of the service (e.g. 'ec2')\n\n :type provider_name: str\n :param provider_name: The name of the provider. Defaults\n to 'aws'.\n\n :returns: :class:`botocore.service.Service`\n \"\"\"\n return botocore.service.get_service(self, service_name, provider_name)\n\n def set_debug_logger(self):\n \"\"\"\n Convenience function to quickly configure full debug output\n to go to the console.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n def set_file_logger(self, log_level, path):\n \"\"\"\n Convenience function to quickly configure any level of logging\n to a file.\n\n :type log_level: int\n :param log_level: A log level as specified in the `logging` module\n\n :type path: string\n :param path: Path to the log file. The file will be created\n if it doesn't already exist.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(log_level)\n\n # create console handler and set level to debug\n ch = logging.FileHandler(path)\n ch.setLevel(log_level)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n\ndef get_session(env_vars=None):\n \"\"\"\n Return a new session object.\n \"\"\"\n return Session(env_vars)\n", "path": "botocore/session.py"}]}
3,247
226
gh_patches_debug_1163
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-2712
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Document that RTD uses `rel` branch for production Hi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development. Thanks in advance! Oli --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 import os 4 import sys 5 6 from recommonmark.parser import CommonMarkParser 7 8 sys.path.insert(0, os.path.abspath('..')) 9 sys.path.append(os.path.dirname(__file__)) 10 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev") 11 12 from django.conf import settings 13 14 import django 15 django.setup() 16 17 18 sys.path.append(os.path.abspath('_ext')) 19 extensions = [ 20 'sphinx.ext.autodoc', 21 'sphinx.ext.intersphinx', 22 'sphinxcontrib.httpdomain', 23 'djangodocs', 24 'doc_extensions', 25 ] 26 templates_path = ['_templates'] 27 28 source_suffix = ['.rst', '.md'] 29 source_parsers = { 30 '.md': CommonMarkParser, 31 } 32 33 master_doc = 'index' 34 project = u'Read The Docs' 35 copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace' 36 version = '1.0' 37 release = '1.0' 38 exclude_patterns = ['_build'] 39 default_role = 'obj' 40 pygments_style = 'sphinx' 41 intersphinx_mapping = { 42 'python': ('http://python.readthedocs.io/en/latest/', None), 43 'django': ('http://django.readthedocs.io/en/1.8.x/', None), 44 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None), 45 } 46 # This doesn't exist since we aren't shipping any static files ourselves. 47 #html_static_path = ['_static'] 48 htmlhelp_basename = 'ReadTheDocsdoc' 49 latex_documents = [ 50 ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation', 51 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'), 52 ] 53 man_pages = [ 54 ('index', 'read-the-docs', u'Read The Docs Documentation', 55 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1) 56 ] 57 58 exclude_patterns = [ 59 # 'api' # needed for ``make gettext`` to not die. 60 ] 61 62 language = 'en' 63 64 locale_dirs = [ 65 'locale/', 66 ] 67 gettext_compact = False 68 69 70 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 71 if not on_rtd: # only import and set the theme if we're building docs locally 72 import sphinx_rtd_theme 73 html_theme = 'sphinx_rtd_theme' 74 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -32,7 +32,7 @@ master_doc = 'index' project = u'Read The Docs' -copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace' +copyright = u'2010-2017, Read the Docs, Inc & contributors' version = '1.0' release = '1.0' exclude_patterns = ['_build']
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -32,7 +32,7 @@\n \n master_doc = 'index'\n project = u'Read The Docs'\n-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\n+copyright = u'2010-2017, Read the Docs, Inc & contributors'\n version = '1.0'\n release = '1.0'\n exclude_patterns = ['_build']\n", "issue": "Document that RTD uses `rel` branch for production\nHi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.\r\n\r\nThanks in advance!\r\nOli\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010-2017, Read the Docs, Inc & contributors'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]}
1,005
117
gh_patches_debug_8285
rasdani/github-patches
git_diff
apache__airflow-35167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dry_run not optional in api for set task instances state ### Apache Airflow version 2.7.2 ### What happened Sent request without dry_run parameter to dags/{dag_id}/updateTaskInstancesState and got a 500 error. ### What you think should happen instead I should be able to send a request to update task instances state and get a valid response. ### How to reproduce You can see this by commenting out line 215 in tests/api_connexion/schemas/test_task_instance_schema.py and running tests. This is a similar error to #34563. ### Operating System Ubuntu 20.04 ### Versions of Apache Airflow Providers _No response_ ### Deployment Docker-Compose ### Deployment details _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/api_connexion/schemas/task_instance_schema.py` Content: ``` 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from typing import TYPE_CHECKING, NamedTuple 20 21 from marshmallow import Schema, ValidationError, fields, validate, validates_schema 22 from marshmallow.utils import get_value 23 from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field 24 25 from airflow.api_connexion.parameters import validate_istimezone 26 from airflow.api_connexion.schemas.common_schema import JsonObjectField 27 from airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField 28 from airflow.api_connexion.schemas.job_schema import JobSchema 29 from airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema 30 from airflow.api_connexion.schemas.trigger_schema import TriggerSchema 31 from airflow.models import TaskInstance 32 from airflow.utils.helpers import exactly_one 33 from airflow.utils.state import TaskInstanceState 34 35 if TYPE_CHECKING: 36 from airflow.models import SlaMiss 37 38 39 class TaskInstanceSchema(SQLAlchemySchema): 40 """Task instance schema.""" 41 42 class Meta: 43 """Meta.""" 44 45 model = TaskInstance 46 47 task_id = auto_field() 48 dag_id = auto_field() 49 run_id = auto_field(data_key="dag_run_id") 50 map_index = auto_field() 51 execution_date = auto_field() 52 start_date = auto_field() 53 end_date = auto_field() 54 duration = auto_field() 55 state = TaskInstanceStateField() 56 _try_number = auto_field(data_key="try_number") 57 max_tries = auto_field() 58 hostname = auto_field() 59 unixname = auto_field() 60 pool = auto_field() 61 pool_slots = auto_field() 62 queue = auto_field() 63 priority_weight = auto_field() 64 operator = auto_field() 65 queued_dttm = auto_field(data_key="queued_when") 66 pid = auto_field() 67 executor_config = auto_field() 68 note = auto_field() 69 sla_miss = fields.Nested(SlaMissSchema, dump_default=None) 70 rendered_fields = JsonObjectField(dump_default={}) 71 trigger = fields.Nested(TriggerSchema) 72 triggerer_job = fields.Nested(JobSchema) 73 74 def get_attribute(self, obj, attr, default): 75 if attr == "sla_miss": 76 # Object is a tuple of task_instance and slamiss 77 # and the get_value expects a dict with key, value 78 # corresponding to the attr. 79 slamiss_instance = {"sla_miss": obj[1]} 80 return get_value(slamiss_instance, attr, default) 81 elif attr == "rendered_fields": 82 return get_value(obj[0], "rendered_task_instance_fields.rendered_fields", default) 83 return get_value(obj[0], attr, default) 84 85 86 class TaskInstanceCollection(NamedTuple): 87 """List of task instances with metadata.""" 88 89 task_instances: list[tuple[TaskInstance, SlaMiss | None]] 90 total_entries: int 91 92 93 class TaskInstanceCollectionSchema(Schema): 94 """Task instance collection schema.""" 95 96 task_instances = fields.List(fields.Nested(TaskInstanceSchema)) 97 total_entries = fields.Int() 98 99 100 class TaskInstanceBatchFormSchema(Schema): 101 """Schema for the request form passed to Task Instance Batch endpoint.""" 102 103 page_offset = fields.Int(load_default=0, validate=validate.Range(min=0)) 104 page_limit = fields.Int(load_default=100, validate=validate.Range(min=1)) 105 dag_ids = fields.List(fields.Str(), load_default=None) 106 dag_run_ids = fields.List(fields.Str(), load_default=None) 107 task_ids = fields.List(fields.Str(), load_default=None) 108 execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone) 109 execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone) 110 start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone) 111 start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone) 112 end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone) 113 end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone) 114 duration_gte = fields.Int(load_default=None) 115 duration_lte = fields.Int(load_default=None) 116 state = fields.List(fields.Str(allow_none=True), load_default=None) 117 pool = fields.List(fields.Str(), load_default=None) 118 queue = fields.List(fields.Str(), load_default=None) 119 120 121 class ClearTaskInstanceFormSchema(Schema): 122 """Schema for handling the request of clearing task instance of a Dag.""" 123 124 dry_run = fields.Boolean(load_default=True) 125 start_date = fields.DateTime(load_default=None, validate=validate_istimezone) 126 end_date = fields.DateTime(load_default=None, validate=validate_istimezone) 127 only_failed = fields.Boolean(load_default=True) 128 only_running = fields.Boolean(load_default=False) 129 include_subdags = fields.Boolean(load_default=False) 130 include_parentdag = fields.Boolean(load_default=False) 131 reset_dag_runs = fields.Boolean(load_default=False) 132 task_ids = fields.List(fields.String(), validate=validate.Length(min=1)) 133 dag_run_id = fields.Str(load_default=None) 134 include_upstream = fields.Boolean(load_default=False) 135 include_downstream = fields.Boolean(load_default=False) 136 include_future = fields.Boolean(load_default=False) 137 include_past = fields.Boolean(load_default=False) 138 139 @validates_schema 140 def validate_form(self, data, **kwargs): 141 """Validate clear task instance form.""" 142 if data["only_failed"] and data["only_running"]: 143 raise ValidationError("only_failed and only_running both are set to True") 144 if data["start_date"] and data["end_date"]: 145 if data["start_date"] > data["end_date"]: 146 raise ValidationError("end_date is sooner than start_date") 147 if data["start_date"] and data["end_date"] and data["dag_run_id"]: 148 raise ValidationError("Exactly one of dag_run_id or (start_date and end_date) must be provided") 149 if data["start_date"] and data["dag_run_id"]: 150 raise ValidationError("Exactly one of dag_run_id or start_date must be provided") 151 if data["end_date"] and data["dag_run_id"]: 152 raise ValidationError("Exactly one of dag_run_id or end_date must be provided") 153 154 155 class SetTaskInstanceStateFormSchema(Schema): 156 """Schema for handling the request of setting state of task instance of a DAG.""" 157 158 dry_run = fields.Boolean(dump_default=True) 159 task_id = fields.Str(required=True) 160 execution_date = fields.DateTime(validate=validate_istimezone) 161 dag_run_id = fields.Str() 162 include_upstream = fields.Boolean(required=True) 163 include_downstream = fields.Boolean(required=True) 164 include_future = fields.Boolean(required=True) 165 include_past = fields.Boolean(required=True) 166 new_state = TaskInstanceStateField( 167 required=True, 168 validate=validate.OneOf( 169 [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED] 170 ), 171 ) 172 173 @validates_schema 174 def validate_form(self, data, **kwargs): 175 """Validate set task instance state form.""" 176 if not exactly_one(data.get("execution_date"), data.get("dag_run_id")): 177 raise ValidationError("Exactly one of execution_date or dag_run_id must be provided") 178 179 180 class SetSingleTaskInstanceStateFormSchema(Schema): 181 """Schema for handling the request of updating state of a single task instance.""" 182 183 dry_run = fields.Boolean(load_default=True) 184 new_state = TaskInstanceStateField( 185 required=True, 186 validate=validate.OneOf( 187 [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED] 188 ), 189 ) 190 191 192 class TaskInstanceReferenceSchema(Schema): 193 """Schema for the task instance reference schema.""" 194 195 task_id = fields.Str() 196 run_id = fields.Str(data_key="dag_run_id") 197 dag_id = fields.Str() 198 execution_date = fields.DateTime() 199 200 201 class TaskInstanceReferenceCollection(NamedTuple): 202 """List of objects with metadata about taskinstance and dag_run_id.""" 203 204 task_instances: list[tuple[TaskInstance, str]] 205 206 207 class TaskInstanceReferenceCollectionSchema(Schema): 208 """Collection schema for task reference.""" 209 210 task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema)) 211 212 213 class SetTaskInstanceNoteFormSchema(Schema): 214 """Schema for settings a note for a TaskInstance.""" 215 216 # Note: We can't add map_index to the url as subpaths can't start with dashes. 217 map_index = fields.Int(allow_none=False) 218 note = fields.String(allow_none=True, validate=validate.Length(max=1000)) 219 220 221 task_instance_schema = TaskInstanceSchema() 222 task_instance_collection_schema = TaskInstanceCollectionSchema() 223 task_instance_batch_form = TaskInstanceBatchFormSchema() 224 clear_task_instance_form = ClearTaskInstanceFormSchema() 225 set_task_instance_state_form = SetTaskInstanceStateFormSchema() 226 set_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema() 227 task_instance_reference_schema = TaskInstanceReferenceSchema() 228 task_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema() 229 set_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema() 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/api_connexion/schemas/task_instance_schema.py b/airflow/api_connexion/schemas/task_instance_schema.py --- a/airflow/api_connexion/schemas/task_instance_schema.py +++ b/airflow/api_connexion/schemas/task_instance_schema.py @@ -155,7 +155,7 @@ class SetTaskInstanceStateFormSchema(Schema): """Schema for handling the request of setting state of task instance of a DAG.""" - dry_run = fields.Boolean(dump_default=True) + dry_run = fields.Boolean(load_default=True) task_id = fields.Str(required=True) execution_date = fields.DateTime(validate=validate_istimezone) dag_run_id = fields.Str()
{"golden_diff": "diff --git a/airflow/api_connexion/schemas/task_instance_schema.py b/airflow/api_connexion/schemas/task_instance_schema.py\n--- a/airflow/api_connexion/schemas/task_instance_schema.py\n+++ b/airflow/api_connexion/schemas/task_instance_schema.py\n@@ -155,7 +155,7 @@\n class SetTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of setting state of task instance of a DAG.\"\"\"\n \n- dry_run = fields.Boolean(dump_default=True)\n+ dry_run = fields.Boolean(load_default=True)\n task_id = fields.Str(required=True)\n execution_date = fields.DateTime(validate=validate_istimezone)\n dag_run_id = fields.Str()\n", "issue": "dry_run not optional in api for set task instances state\n### Apache Airflow version\n\n2.7.2\n\n### What happened\n\nSent request without dry_run parameter to dags/{dag_id}/updateTaskInstancesState and got a 500 error.\n\n### What you think should happen instead\n\nI should be able to send a request to update task instances state and get a valid response.\n\n### How to reproduce\n\nYou can see this by commenting out line 215 in tests/api_connexion/schemas/test_task_instance_schema.py and running tests. This is a similar error to #34563.\n\n### Operating System\n\nUbuntu 20.04\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, NamedTuple\n\nfrom marshmallow import Schema, ValidationError, fields, validate, validates_schema\nfrom marshmallow.utils import get_value\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.common_schema import JsonObjectField\nfrom airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField\nfrom airflow.api_connexion.schemas.job_schema import JobSchema\nfrom airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema\nfrom airflow.api_connexion.schemas.trigger_schema import TriggerSchema\nfrom airflow.models import TaskInstance\nfrom airflow.utils.helpers import exactly_one\nfrom airflow.utils.state import TaskInstanceState\n\nif TYPE_CHECKING:\n from airflow.models import SlaMiss\n\n\nclass TaskInstanceSchema(SQLAlchemySchema):\n \"\"\"Task instance schema.\"\"\"\n\n class Meta:\n \"\"\"Meta.\"\"\"\n\n model = TaskInstance\n\n task_id = auto_field()\n dag_id = auto_field()\n run_id = auto_field(data_key=\"dag_run_id\")\n map_index = auto_field()\n execution_date = auto_field()\n start_date = auto_field()\n end_date = auto_field()\n duration = auto_field()\n state = TaskInstanceStateField()\n _try_number = auto_field(data_key=\"try_number\")\n max_tries = auto_field()\n hostname = auto_field()\n unixname = auto_field()\n pool = auto_field()\n pool_slots = auto_field()\n queue = auto_field()\n priority_weight = auto_field()\n operator = auto_field()\n queued_dttm = auto_field(data_key=\"queued_when\")\n pid = auto_field()\n executor_config = auto_field()\n note = auto_field()\n sla_miss = fields.Nested(SlaMissSchema, dump_default=None)\n rendered_fields = JsonObjectField(dump_default={})\n trigger = fields.Nested(TriggerSchema)\n triggerer_job = fields.Nested(JobSchema)\n\n def get_attribute(self, obj, attr, default):\n if attr == \"sla_miss\":\n # Object is a tuple of task_instance and slamiss\n # and the get_value expects a dict with key, value\n # corresponding to the attr.\n slamiss_instance = {\"sla_miss\": obj[1]}\n return get_value(slamiss_instance, attr, default)\n elif attr == \"rendered_fields\":\n return get_value(obj[0], \"rendered_task_instance_fields.rendered_fields\", default)\n return get_value(obj[0], attr, default)\n\n\nclass TaskInstanceCollection(NamedTuple):\n \"\"\"List of task instances with metadata.\"\"\"\n\n task_instances: list[tuple[TaskInstance, SlaMiss | None]]\n total_entries: int\n\n\nclass TaskInstanceCollectionSchema(Schema):\n \"\"\"Task instance collection schema.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceSchema))\n total_entries = fields.Int()\n\n\nclass TaskInstanceBatchFormSchema(Schema):\n \"\"\"Schema for the request form passed to Task Instance Batch endpoint.\"\"\"\n\n page_offset = fields.Int(load_default=0, validate=validate.Range(min=0))\n page_limit = fields.Int(load_default=100, validate=validate.Range(min=1))\n dag_ids = fields.List(fields.Str(), load_default=None)\n dag_run_ids = fields.List(fields.Str(), load_default=None)\n task_ids = fields.List(fields.Str(), load_default=None)\n execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n duration_gte = fields.Int(load_default=None)\n duration_lte = fields.Int(load_default=None)\n state = fields.List(fields.Str(allow_none=True), load_default=None)\n pool = fields.List(fields.Str(), load_default=None)\n queue = fields.List(fields.Str(), load_default=None)\n\n\nclass ClearTaskInstanceFormSchema(Schema):\n \"\"\"Schema for handling the request of clearing task instance of a Dag.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n start_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n only_failed = fields.Boolean(load_default=True)\n only_running = fields.Boolean(load_default=False)\n include_subdags = fields.Boolean(load_default=False)\n include_parentdag = fields.Boolean(load_default=False)\n reset_dag_runs = fields.Boolean(load_default=False)\n task_ids = fields.List(fields.String(), validate=validate.Length(min=1))\n dag_run_id = fields.Str(load_default=None)\n include_upstream = fields.Boolean(load_default=False)\n include_downstream = fields.Boolean(load_default=False)\n include_future = fields.Boolean(load_default=False)\n include_past = fields.Boolean(load_default=False)\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate clear task instance form.\"\"\"\n if data[\"only_failed\"] and data[\"only_running\"]:\n raise ValidationError(\"only_failed and only_running both are set to True\")\n if data[\"start_date\"] and data[\"end_date\"]:\n if data[\"start_date\"] > data[\"end_date\"]:\n raise ValidationError(\"end_date is sooner than start_date\")\n if data[\"start_date\"] and data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or (start_date and end_date) must be provided\")\n if data[\"start_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or start_date must be provided\")\n if data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or end_date must be provided\")\n\n\nclass SetTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of setting state of task instance of a DAG.\"\"\"\n\n dry_run = fields.Boolean(dump_default=True)\n task_id = fields.Str(required=True)\n execution_date = fields.DateTime(validate=validate_istimezone)\n dag_run_id = fields.Str()\n include_upstream = fields.Boolean(required=True)\n include_downstream = fields.Boolean(required=True)\n include_future = fields.Boolean(required=True)\n include_past = fields.Boolean(required=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate set task instance state form.\"\"\"\n if not exactly_one(data.get(\"execution_date\"), data.get(\"dag_run_id\")):\n raise ValidationError(\"Exactly one of execution_date or dag_run_id must be provided\")\n\n\nclass SetSingleTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of updating state of a single task instance.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n\nclass TaskInstanceReferenceSchema(Schema):\n \"\"\"Schema for the task instance reference schema.\"\"\"\n\n task_id = fields.Str()\n run_id = fields.Str(data_key=\"dag_run_id\")\n dag_id = fields.Str()\n execution_date = fields.DateTime()\n\n\nclass TaskInstanceReferenceCollection(NamedTuple):\n \"\"\"List of objects with metadata about taskinstance and dag_run_id.\"\"\"\n\n task_instances: list[tuple[TaskInstance, str]]\n\n\nclass TaskInstanceReferenceCollectionSchema(Schema):\n \"\"\"Collection schema for task reference.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema))\n\n\nclass SetTaskInstanceNoteFormSchema(Schema):\n \"\"\"Schema for settings a note for a TaskInstance.\"\"\"\n\n # Note: We can't add map_index to the url as subpaths can't start with dashes.\n map_index = fields.Int(allow_none=False)\n note = fields.String(allow_none=True, validate=validate.Length(max=1000))\n\n\ntask_instance_schema = TaskInstanceSchema()\ntask_instance_collection_schema = TaskInstanceCollectionSchema()\ntask_instance_batch_form = TaskInstanceBatchFormSchema()\nclear_task_instance_form = ClearTaskInstanceFormSchema()\nset_task_instance_state_form = SetTaskInstanceStateFormSchema()\nset_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema()\ntask_instance_reference_schema = TaskInstanceReferenceSchema()\ntask_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema()\nset_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema()\n", "path": "airflow/api_connexion/schemas/task_instance_schema.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, NamedTuple\n\nfrom marshmallow import Schema, ValidationError, fields, validate, validates_schema\nfrom marshmallow.utils import get_value\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.common_schema import JsonObjectField\nfrom airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField\nfrom airflow.api_connexion.schemas.job_schema import JobSchema\nfrom airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema\nfrom airflow.api_connexion.schemas.trigger_schema import TriggerSchema\nfrom airflow.models import TaskInstance\nfrom airflow.utils.helpers import exactly_one\nfrom airflow.utils.state import TaskInstanceState\n\nif TYPE_CHECKING:\n from airflow.models import SlaMiss\n\n\nclass TaskInstanceSchema(SQLAlchemySchema):\n \"\"\"Task instance schema.\"\"\"\n\n class Meta:\n \"\"\"Meta.\"\"\"\n\n model = TaskInstance\n\n task_id = auto_field()\n dag_id = auto_field()\n run_id = auto_field(data_key=\"dag_run_id\")\n map_index = auto_field()\n execution_date = auto_field()\n start_date = auto_field()\n end_date = auto_field()\n duration = auto_field()\n state = TaskInstanceStateField()\n _try_number = auto_field(data_key=\"try_number\")\n max_tries = auto_field()\n hostname = auto_field()\n unixname = auto_field()\n pool = auto_field()\n pool_slots = auto_field()\n queue = auto_field()\n priority_weight = auto_field()\n operator = auto_field()\n queued_dttm = auto_field(data_key=\"queued_when\")\n pid = auto_field()\n executor_config = auto_field()\n note = auto_field()\n sla_miss = fields.Nested(SlaMissSchema, dump_default=None)\n rendered_fields = JsonObjectField(dump_default={})\n trigger = fields.Nested(TriggerSchema)\n triggerer_job = fields.Nested(JobSchema)\n\n def get_attribute(self, obj, attr, default):\n if attr == \"sla_miss\":\n # Object is a tuple of task_instance and slamiss\n # and the get_value expects a dict with key, value\n # corresponding to the attr.\n slamiss_instance = {\"sla_miss\": obj[1]}\n return get_value(slamiss_instance, attr, default)\n elif attr == \"rendered_fields\":\n return get_value(obj[0], \"rendered_task_instance_fields.rendered_fields\", default)\n return get_value(obj[0], attr, default)\n\n\nclass TaskInstanceCollection(NamedTuple):\n \"\"\"List of task instances with metadata.\"\"\"\n\n task_instances: list[tuple[TaskInstance, SlaMiss | None]]\n total_entries: int\n\n\nclass TaskInstanceCollectionSchema(Schema):\n \"\"\"Task instance collection schema.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceSchema))\n total_entries = fields.Int()\n\n\nclass TaskInstanceBatchFormSchema(Schema):\n \"\"\"Schema for the request form passed to Task Instance Batch endpoint.\"\"\"\n\n page_offset = fields.Int(load_default=0, validate=validate.Range(min=0))\n page_limit = fields.Int(load_default=100, validate=validate.Range(min=1))\n dag_ids = fields.List(fields.Str(), load_default=None)\n dag_run_ids = fields.List(fields.Str(), load_default=None)\n task_ids = fields.List(fields.Str(), load_default=None)\n execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n duration_gte = fields.Int(load_default=None)\n duration_lte = fields.Int(load_default=None)\n state = fields.List(fields.Str(allow_none=True), load_default=None)\n pool = fields.List(fields.Str(), load_default=None)\n queue = fields.List(fields.Str(), load_default=None)\n\n\nclass ClearTaskInstanceFormSchema(Schema):\n \"\"\"Schema for handling the request of clearing task instance of a Dag.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n start_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n only_failed = fields.Boolean(load_default=True)\n only_running = fields.Boolean(load_default=False)\n include_subdags = fields.Boolean(load_default=False)\n include_parentdag = fields.Boolean(load_default=False)\n reset_dag_runs = fields.Boolean(load_default=False)\n task_ids = fields.List(fields.String(), validate=validate.Length(min=1))\n dag_run_id = fields.Str(load_default=None)\n include_upstream = fields.Boolean(load_default=False)\n include_downstream = fields.Boolean(load_default=False)\n include_future = fields.Boolean(load_default=False)\n include_past = fields.Boolean(load_default=False)\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate clear task instance form.\"\"\"\n if data[\"only_failed\"] and data[\"only_running\"]:\n raise ValidationError(\"only_failed and only_running both are set to True\")\n if data[\"start_date\"] and data[\"end_date\"]:\n if data[\"start_date\"] > data[\"end_date\"]:\n raise ValidationError(\"end_date is sooner than start_date\")\n if data[\"start_date\"] and data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or (start_date and end_date) must be provided\")\n if data[\"start_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or start_date must be provided\")\n if data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or end_date must be provided\")\n\n\nclass SetTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of setting state of task instance of a DAG.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n task_id = fields.Str(required=True)\n execution_date = fields.DateTime(validate=validate_istimezone)\n dag_run_id = fields.Str()\n include_upstream = fields.Boolean(required=True)\n include_downstream = fields.Boolean(required=True)\n include_future = fields.Boolean(required=True)\n include_past = fields.Boolean(required=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate set task instance state form.\"\"\"\n if not exactly_one(data.get(\"execution_date\"), data.get(\"dag_run_id\")):\n raise ValidationError(\"Exactly one of execution_date or dag_run_id must be provided\")\n\n\nclass SetSingleTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of updating state of a single task instance.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n\nclass TaskInstanceReferenceSchema(Schema):\n \"\"\"Schema for the task instance reference schema.\"\"\"\n\n task_id = fields.Str()\n run_id = fields.Str(data_key=\"dag_run_id\")\n dag_id = fields.Str()\n execution_date = fields.DateTime()\n\n\nclass TaskInstanceReferenceCollection(NamedTuple):\n \"\"\"List of objects with metadata about taskinstance and dag_run_id.\"\"\"\n\n task_instances: list[tuple[TaskInstance, str]]\n\n\nclass TaskInstanceReferenceCollectionSchema(Schema):\n \"\"\"Collection schema for task reference.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema))\n\n\nclass SetTaskInstanceNoteFormSchema(Schema):\n \"\"\"Schema for settings a note for a TaskInstance.\"\"\"\n\n # Note: We can't add map_index to the url as subpaths can't start with dashes.\n map_index = fields.Int(allow_none=False)\n note = fields.String(allow_none=True, validate=validate.Length(max=1000))\n\n\ntask_instance_schema = TaskInstanceSchema()\ntask_instance_collection_schema = TaskInstanceCollectionSchema()\ntask_instance_batch_form = TaskInstanceBatchFormSchema()\nclear_task_instance_form = ClearTaskInstanceFormSchema()\nset_task_instance_state_form = SetTaskInstanceStateFormSchema()\nset_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema()\ntask_instance_reference_schema = TaskInstanceReferenceSchema()\ntask_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema()\nset_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema()\n", "path": "airflow/api_connexion/schemas/task_instance_schema.py"}]}
3,167
155
gh_patches_debug_9097
rasdani/github-patches
git_diff
chainer__chainer-1177
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Change a method reporting an error when CuPy is not correctly installed Currently CuPy raises `importError` with this error message: `ImportError: cannot import name core` This is because `core` module is a native module (i mean .so file), and the setup script cannot build it when CUDA environment is not correctly setup. This error doesn't represent its situation. We need to change its message. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/__init__.py` Content: ``` 1 from __future__ import division 2 import sys 3 4 import numpy 5 import six 6 7 8 from cupy import binary 9 from cupy import core 10 from cupy import creation 11 from cupy import indexing 12 from cupy import io 13 from cupy import linalg 14 from cupy import logic 15 from cupy import manipulation 16 from cupy import math 17 import cupy.random 18 from cupy import sorting 19 from cupy import statistics 20 from cupy import testing # NOQA 21 from cupy import util 22 23 random = cupy.random 24 25 ndarray = core.ndarray 26 27 # dtype short cut 28 number = numpy.number 29 integer = numpy.integer 30 signedinteger = numpy.signedinteger 31 unsignedinteger = numpy.unsignedinteger 32 inexact = numpy.inexact 33 floating = numpy.floating 34 35 bool_ = numpy.bool_ 36 byte = numpy.byte 37 short = numpy.short 38 intc = numpy.intc 39 int_ = numpy.int_ 40 longlong = numpy.longlong 41 ubyte = numpy.ubyte 42 ushort = numpy.ushort 43 uintc = numpy.uintc 44 uint = numpy.uint 45 ulonglong = numpy.ulonglong 46 47 half = numpy.half 48 single = numpy.single 49 float_ = numpy.float_ 50 longfloat = numpy.longfloat 51 52 int8 = numpy.int8 53 int16 = numpy.int16 54 int32 = numpy.int32 55 int64 = numpy.int64 56 uint8 = numpy.uint8 57 uint16 = numpy.uint16 58 uint32 = numpy.uint32 59 uint64 = numpy.uint64 60 61 float16 = numpy.float16 62 float32 = numpy.float32 63 float64 = numpy.float64 64 65 ufunc = core.ufunc 66 67 newaxis = numpy.newaxis # == None 68 69 # ============================================================================= 70 # Routines 71 # 72 # The order of these declarations are borrowed from the NumPy document: 73 # http://docs.scipy.org/doc/numpy/reference/routines.html 74 # ============================================================================= 75 76 # ----------------------------------------------------------------------------- 77 # Array creation routines 78 # ----------------------------------------------------------------------------- 79 empty = creation.basic.empty 80 empty_like = creation.basic.empty_like 81 eye = creation.basic.eye 82 identity = creation.basic.identity 83 ones = creation.basic.ones 84 ones_like = creation.basic.ones_like 85 zeros = creation.basic.zeros 86 zeros_like = creation.basic.zeros_like 87 full = creation.basic.full 88 full_like = creation.basic.full_like 89 90 array = creation.from_data.array 91 asarray = creation.from_data.asarray 92 asanyarray = creation.from_data.asanyarray 93 ascontiguousarray = creation.from_data.ascontiguousarray 94 copy = creation.from_data.copy 95 96 arange = creation.ranges.arange 97 linspace = creation.ranges.linspace 98 99 diag = creation.matrix.diag 100 diagflat = creation.matrix.diagflat 101 102 # ----------------------------------------------------------------------------- 103 # Array manipulation routines 104 # ----------------------------------------------------------------------------- 105 copyto = manipulation.basic.copyto 106 107 reshape = manipulation.shape.reshape 108 ravel = manipulation.shape.ravel 109 110 rollaxis = manipulation.transpose.rollaxis 111 swapaxes = manipulation.transpose.swapaxes 112 transpose = manipulation.transpose.transpose 113 114 atleast_1d = manipulation.dims.atleast_1d 115 atleast_2d = manipulation.dims.atleast_2d 116 atleast_3d = manipulation.dims.atleast_3d 117 broadcast = manipulation.dims.broadcast 118 broadcast_arrays = manipulation.dims.broadcast_arrays 119 broadcast_to = manipulation.dims.broadcast_to 120 expand_dims = manipulation.dims.expand_dims 121 squeeze = manipulation.dims.squeeze 122 123 column_stack = manipulation.join.column_stack 124 concatenate = manipulation.join.concatenate 125 dstack = manipulation.join.dstack 126 hstack = manipulation.join.hstack 127 vstack = manipulation.join.vstack 128 129 asfortranarray = manipulation.kind.asfortranarray 130 131 array_split = manipulation.split.array_split 132 dsplit = manipulation.split.dsplit 133 hsplit = manipulation.split.hsplit 134 split = manipulation.split.split 135 vsplit = manipulation.split.vsplit 136 137 tile = manipulation.tiling.tile 138 repeat = manipulation.tiling.repeat 139 140 roll = manipulation.rearrange.roll 141 142 # ----------------------------------------------------------------------------- 143 # Binary operations 144 # ----------------------------------------------------------------------------- 145 bitwise_and = binary.elementwise.bitwise_and 146 bitwise_or = binary.elementwise.bitwise_or 147 bitwise_xor = binary.elementwise.bitwise_xor 148 invert = binary.elementwise.invert 149 left_shift = binary.elementwise.left_shift 150 right_shift = binary.elementwise.right_shift 151 152 binary_repr = numpy.binary_repr 153 154 # ----------------------------------------------------------------------------- 155 # Data type routines (borrowed from NumPy) 156 # ----------------------------------------------------------------------------- 157 can_cast = numpy.can_cast 158 promote_types = numpy.promote_types 159 min_scalar_type = numpy.min_scalar_type 160 result_type = numpy.result_type 161 common_type = numpy.common_type 162 obj2sctype = numpy.obj2sctype 163 164 dtype = numpy.dtype 165 format_parser = numpy.format_parser 166 167 finfo = numpy.finfo 168 iinfo = numpy.iinfo 169 MachAr = numpy.MachAr 170 171 issctype = numpy.issctype 172 issubdtype = numpy.issubdtype 173 issubsctype = numpy.issubsctype 174 issubclass_ = numpy.issubclass_ 175 find_common_type = numpy.find_common_type 176 177 typename = numpy.typename 178 sctype2char = numpy.sctype2char 179 mintypecode = numpy.mintypecode 180 181 # ----------------------------------------------------------------------------- 182 # Optionally Scipy-accelerated routines 183 # ----------------------------------------------------------------------------- 184 # TODO(beam2d): Implement it 185 186 # ----------------------------------------------------------------------------- 187 # Discrete Fourier Transform 188 # ----------------------------------------------------------------------------- 189 # TODO(beam2d): Implement it 190 191 # ----------------------------------------------------------------------------- 192 # Indexing routines 193 # ----------------------------------------------------------------------------- 194 take = indexing.indexing.take 195 diagonal = indexing.indexing.diagonal 196 197 # ----------------------------------------------------------------------------- 198 # Input and output 199 # ----------------------------------------------------------------------------- 200 load = io.npz.load 201 save = io.npz.save 202 savez = io.npz.savez 203 savez_compressed = io.npz.savez_compressed 204 205 array_repr = io.formatting.array_repr 206 array_str = io.formatting.array_str 207 208 base_repr = numpy.base_repr 209 210 # ----------------------------------------------------------------------------- 211 # Linear algebra 212 # ----------------------------------------------------------------------------- 213 dot = linalg.product.dot 214 vdot = linalg.product.vdot 215 inner = linalg.product.inner 216 outer = linalg.product.outer 217 tensordot = linalg.product.tensordot 218 219 trace = linalg.norm.trace 220 221 # ----------------------------------------------------------------------------- 222 # Logic functions 223 # ----------------------------------------------------------------------------- 224 isfinite = logic.content.isfinite 225 isinf = logic.content.isinf 226 isnan = logic.content.isnan 227 228 isscalar = numpy.isscalar 229 230 logical_and = logic.ops.logical_and 231 logical_or = logic.ops.logical_or 232 logical_not = logic.ops.logical_not 233 logical_xor = logic.ops.logical_xor 234 235 greater = logic.comparison.greater 236 greater_equal = logic.comparison.greater_equal 237 less = logic.comparison.less 238 less_equal = logic.comparison.less_equal 239 equal = logic.comparison.equal 240 not_equal = logic.comparison.not_equal 241 242 all = logic.truth.all 243 any = logic.truth.any 244 245 # ----------------------------------------------------------------------------- 246 # Mathematical functions 247 # ----------------------------------------------------------------------------- 248 sin = math.trigonometric.sin 249 cos = math.trigonometric.cos 250 tan = math.trigonometric.tan 251 arcsin = math.trigonometric.arcsin 252 arccos = math.trigonometric.arccos 253 arctan = math.trigonometric.arctan 254 hypot = math.trigonometric.hypot 255 arctan2 = math.trigonometric.arctan2 256 deg2rad = math.trigonometric.deg2rad 257 rad2deg = math.trigonometric.rad2deg 258 degrees = math.trigonometric.degrees 259 radians = math.trigonometric.radians 260 261 sinh = math.hyperbolic.sinh 262 cosh = math.hyperbolic.cosh 263 tanh = math.hyperbolic.tanh 264 arcsinh = math.hyperbolic.arcsinh 265 arccosh = math.hyperbolic.arccosh 266 arctanh = math.hyperbolic.arctanh 267 268 rint = math.rounding.rint 269 floor = math.rounding.floor 270 ceil = math.rounding.ceil 271 trunc = math.rounding.trunc 272 273 sum = math.sumprod.sum 274 prod = math.sumprod.prod 275 276 exp = math.explog.exp 277 expm1 = math.explog.expm1 278 exp2 = math.explog.exp2 279 log = math.explog.log 280 log10 = math.explog.log10 281 log2 = math.explog.log2 282 log1p = math.explog.log1p 283 logaddexp = math.explog.logaddexp 284 logaddexp2 = math.explog.logaddexp2 285 286 signbit = math.floating.signbit 287 copysign = math.floating.copysign 288 ldexp = math.floating.ldexp 289 frexp = math.floating.frexp 290 nextafter = math.floating.nextafter 291 292 add = math.arithmetic.add 293 reciprocal = math.arithmetic.reciprocal 294 negative = math.arithmetic.negative 295 multiply = math.arithmetic.multiply 296 divide = math.arithmetic.divide 297 power = math.arithmetic.power 298 subtract = math.arithmetic.subtract 299 true_divide = math.arithmetic.true_divide 300 floor_divide = math.arithmetic.floor_divide 301 fmod = math.arithmetic.fmod 302 mod = math.arithmetic.remainder 303 modf = math.arithmetic.modf 304 remainder = math.arithmetic.remainder 305 306 clip = math.misc.clip 307 sqrt = math.misc.sqrt 308 square = math.misc.square 309 absolute = math.misc.absolute 310 abs = math.misc.absolute 311 sign = math.misc.sign 312 maximum = math.misc.maximum 313 minimum = math.misc.minimum 314 fmax = math.misc.fmax 315 fmin = math.misc.fmin 316 317 # ----------------------------------------------------------------------------- 318 # Sorting, searching, and counting 319 # ----------------------------------------------------------------------------- 320 count_nonzero = sorting.count.count_nonzero 321 322 argmax = sorting.search.argmax 323 argmin = sorting.search.argmin 324 where = sorting.search.where 325 326 # ----------------------------------------------------------------------------- 327 # Statistics 328 # ----------------------------------------------------------------------------- 329 amin = statistics.order.amin 330 min = statistics.order.amin 331 amax = statistics.order.amax 332 max = statistics.order.amax 333 334 mean = statistics.meanvar.mean 335 var = statistics.meanvar.var 336 std = statistics.meanvar.std 337 338 bincount = statistics.histogram.bincount 339 340 341 # CuPy specific functions 342 def asnumpy(a, stream=None): 343 """Returns an array on the host memory from an arbitrary source array. 344 345 Args: 346 a: Arbitrary object that can be converted to :class:`numpy.ndarray`. 347 stream (cupy.cuda.Stream): CUDA stream object. If it is specified, then 348 the device-to-host copy runs asynchronously. Otherwise, the copy is 349 synchronous. Note that if ``a`` is not a :class:`cupy.ndarray` 350 object, then this argument has no effect. 351 352 Returns: 353 numpy.ndarray: Converted array on the host memory. 354 355 """ 356 if isinstance(a, ndarray): 357 return a.get(stream=stream) 358 else: 359 return numpy.asarray(a) 360 361 362 _cupy = sys.modules[__name__] 363 364 365 def get_array_module(*args): 366 """Returns the array module for arguments. 367 368 This function is used to implement CPU/GPU generic code. If at least one of 369 the arguments is a :class:`cupy.ndarray` object, the :mod:`cupy` module is 370 returned. 371 372 Args: 373 args: Values to determine whether NumPy or CuPy should be used. 374 375 Returns: 376 module: :mod:`cupy` or :mod:`numpy` is returned based on the types of 377 the arguments. 378 379 .. admonition:: Example 380 381 A NumPy/CuPy generic function can be written as follows:: 382 383 >>> def softplus(x): 384 ... xp = cupy.get_array_module(x) 385 ... return xp.maximum(0, x) + xp.log1p(xp.exp(-abs(x))) 386 387 """ 388 if six.moves.builtins.any(isinstance(arg, ndarray) for arg in args): 389 return _cupy 390 else: 391 return numpy 392 393 394 clear_memo = util.clear_memo 395 memoize = util.memoize 396 397 ElementwiseKernel = core.ElementwiseKernel 398 ReductionKernel = core.ReductionKernel 399 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/__init__.py b/cupy/__init__.py --- a/cupy/__init__.py +++ b/cupy/__init__.py @@ -4,9 +4,19 @@ import numpy import six +try: + from cupy import core +except ImportError: + # core is a c-extention module. + # When a user cannot import core, it represents that CuPy is not correctly + # built. + msg = ('CuPy is not correctly installed. Please check your environment, ' + 'uninstall Chainer and reinstall it with `pip install chainer ' + '--no-cache-dir -vvvv`.') + raise six.reraise(RuntimeError, msg, sys.exc_info()[2]) + from cupy import binary -from cupy import core from cupy import creation from cupy import indexing from cupy import io
{"golden_diff": "diff --git a/cupy/__init__.py b/cupy/__init__.py\n--- a/cupy/__init__.py\n+++ b/cupy/__init__.py\n@@ -4,9 +4,19 @@\n import numpy\n import six\n \n+try:\n+ from cupy import core\n+except ImportError:\n+ # core is a c-extention module.\n+ # When a user cannot import core, it represents that CuPy is not correctly\n+ # built.\n+ msg = ('CuPy is not correctly installed. Please check your environment, '\n+ 'uninstall Chainer and reinstall it with `pip install chainer '\n+ '--no-cache-dir -vvvv`.')\n+ raise six.reraise(RuntimeError, msg, sys.exc_info()[2])\n+\n \n from cupy import binary\n-from cupy import core\n from cupy import creation\n from cupy import indexing\n from cupy import io\n", "issue": "Change a method reporting an error when CuPy is not correctly installed\nCurrently CuPy raises `importError` with this error message:\n`ImportError: cannot import name core`\n\nThis is because `core` module is a native module (i mean .so file), and the setup script cannot build it when CUDA environment is not correctly setup. This error doesn't represent its situation. We need to change its message.\n\n", "before_files": [{"content": "from __future__ import division\nimport sys\n\nimport numpy\nimport six\n\n\nfrom cupy import binary\nfrom cupy import core\nfrom cupy import creation\nfrom cupy import indexing\nfrom cupy import io\nfrom cupy import linalg\nfrom cupy import logic\nfrom cupy import manipulation\nfrom cupy import math\nimport cupy.random\nfrom cupy import sorting\nfrom cupy import statistics\nfrom cupy import testing # NOQA\nfrom cupy import util\n\nrandom = cupy.random\n\nndarray = core.ndarray\n\n# dtype short cut\nnumber = numpy.number\ninteger = numpy.integer\nsignedinteger = numpy.signedinteger\nunsignedinteger = numpy.unsignedinteger\ninexact = numpy.inexact\nfloating = numpy.floating\n\nbool_ = numpy.bool_\nbyte = numpy.byte\nshort = numpy.short\nintc = numpy.intc\nint_ = numpy.int_\nlonglong = numpy.longlong\nubyte = numpy.ubyte\nushort = numpy.ushort\nuintc = numpy.uintc\nuint = numpy.uint\nulonglong = numpy.ulonglong\n\nhalf = numpy.half\nsingle = numpy.single\nfloat_ = numpy.float_\nlongfloat = numpy.longfloat\n\nint8 = numpy.int8\nint16 = numpy.int16\nint32 = numpy.int32\nint64 = numpy.int64\nuint8 = numpy.uint8\nuint16 = numpy.uint16\nuint32 = numpy.uint32\nuint64 = numpy.uint64\n\nfloat16 = numpy.float16\nfloat32 = numpy.float32\nfloat64 = numpy.float64\n\nufunc = core.ufunc\n\nnewaxis = numpy.newaxis # == None\n\n# =============================================================================\n# Routines\n#\n# The order of these declarations are borrowed from the NumPy document:\n# http://docs.scipy.org/doc/numpy/reference/routines.html\n# =============================================================================\n\n# -----------------------------------------------------------------------------\n# Array creation routines\n# -----------------------------------------------------------------------------\nempty = creation.basic.empty\nempty_like = creation.basic.empty_like\neye = creation.basic.eye\nidentity = creation.basic.identity\nones = creation.basic.ones\nones_like = creation.basic.ones_like\nzeros = creation.basic.zeros\nzeros_like = creation.basic.zeros_like\nfull = creation.basic.full\nfull_like = creation.basic.full_like\n\narray = creation.from_data.array\nasarray = creation.from_data.asarray\nasanyarray = creation.from_data.asanyarray\nascontiguousarray = creation.from_data.ascontiguousarray\ncopy = creation.from_data.copy\n\narange = creation.ranges.arange\nlinspace = creation.ranges.linspace\n\ndiag = creation.matrix.diag\ndiagflat = creation.matrix.diagflat\n\n# -----------------------------------------------------------------------------\n# Array manipulation routines\n# -----------------------------------------------------------------------------\ncopyto = manipulation.basic.copyto\n\nreshape = manipulation.shape.reshape\nravel = manipulation.shape.ravel\n\nrollaxis = manipulation.transpose.rollaxis\nswapaxes = manipulation.transpose.swapaxes\ntranspose = manipulation.transpose.transpose\n\natleast_1d = manipulation.dims.atleast_1d\natleast_2d = manipulation.dims.atleast_2d\natleast_3d = manipulation.dims.atleast_3d\nbroadcast = manipulation.dims.broadcast\nbroadcast_arrays = manipulation.dims.broadcast_arrays\nbroadcast_to = manipulation.dims.broadcast_to\nexpand_dims = manipulation.dims.expand_dims\nsqueeze = manipulation.dims.squeeze\n\ncolumn_stack = manipulation.join.column_stack\nconcatenate = manipulation.join.concatenate\ndstack = manipulation.join.dstack\nhstack = manipulation.join.hstack\nvstack = manipulation.join.vstack\n\nasfortranarray = manipulation.kind.asfortranarray\n\narray_split = manipulation.split.array_split\ndsplit = manipulation.split.dsplit\nhsplit = manipulation.split.hsplit\nsplit = manipulation.split.split\nvsplit = manipulation.split.vsplit\n\ntile = manipulation.tiling.tile\nrepeat = manipulation.tiling.repeat\n\nroll = manipulation.rearrange.roll\n\n# -----------------------------------------------------------------------------\n# Binary operations\n# -----------------------------------------------------------------------------\nbitwise_and = binary.elementwise.bitwise_and\nbitwise_or = binary.elementwise.bitwise_or\nbitwise_xor = binary.elementwise.bitwise_xor\ninvert = binary.elementwise.invert\nleft_shift = binary.elementwise.left_shift\nright_shift = binary.elementwise.right_shift\n\nbinary_repr = numpy.binary_repr\n\n# -----------------------------------------------------------------------------\n# Data type routines (borrowed from NumPy)\n# -----------------------------------------------------------------------------\ncan_cast = numpy.can_cast\npromote_types = numpy.promote_types\nmin_scalar_type = numpy.min_scalar_type\nresult_type = numpy.result_type\ncommon_type = numpy.common_type\nobj2sctype = numpy.obj2sctype\n\ndtype = numpy.dtype\nformat_parser = numpy.format_parser\n\nfinfo = numpy.finfo\niinfo = numpy.iinfo\nMachAr = numpy.MachAr\n\nissctype = numpy.issctype\nissubdtype = numpy.issubdtype\nissubsctype = numpy.issubsctype\nissubclass_ = numpy.issubclass_\nfind_common_type = numpy.find_common_type\n\ntypename = numpy.typename\nsctype2char = numpy.sctype2char\nmintypecode = numpy.mintypecode\n\n# -----------------------------------------------------------------------------\n# Optionally Scipy-accelerated routines\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Discrete Fourier Transform\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Indexing routines\n# -----------------------------------------------------------------------------\ntake = indexing.indexing.take\ndiagonal = indexing.indexing.diagonal\n\n# -----------------------------------------------------------------------------\n# Input and output\n# -----------------------------------------------------------------------------\nload = io.npz.load\nsave = io.npz.save\nsavez = io.npz.savez\nsavez_compressed = io.npz.savez_compressed\n\narray_repr = io.formatting.array_repr\narray_str = io.formatting.array_str\n\nbase_repr = numpy.base_repr\n\n# -----------------------------------------------------------------------------\n# Linear algebra\n# -----------------------------------------------------------------------------\ndot = linalg.product.dot\nvdot = linalg.product.vdot\ninner = linalg.product.inner\nouter = linalg.product.outer\ntensordot = linalg.product.tensordot\n\ntrace = linalg.norm.trace\n\n# -----------------------------------------------------------------------------\n# Logic functions\n# -----------------------------------------------------------------------------\nisfinite = logic.content.isfinite\nisinf = logic.content.isinf\nisnan = logic.content.isnan\n\nisscalar = numpy.isscalar\n\nlogical_and = logic.ops.logical_and\nlogical_or = logic.ops.logical_or\nlogical_not = logic.ops.logical_not\nlogical_xor = logic.ops.logical_xor\n\ngreater = logic.comparison.greater\ngreater_equal = logic.comparison.greater_equal\nless = logic.comparison.less\nless_equal = logic.comparison.less_equal\nequal = logic.comparison.equal\nnot_equal = logic.comparison.not_equal\n\nall = logic.truth.all\nany = logic.truth.any\n\n# -----------------------------------------------------------------------------\n# Mathematical functions\n# -----------------------------------------------------------------------------\nsin = math.trigonometric.sin\ncos = math.trigonometric.cos\ntan = math.trigonometric.tan\narcsin = math.trigonometric.arcsin\narccos = math.trigonometric.arccos\narctan = math.trigonometric.arctan\nhypot = math.trigonometric.hypot\narctan2 = math.trigonometric.arctan2\ndeg2rad = math.trigonometric.deg2rad\nrad2deg = math.trigonometric.rad2deg\ndegrees = math.trigonometric.degrees\nradians = math.trigonometric.radians\n\nsinh = math.hyperbolic.sinh\ncosh = math.hyperbolic.cosh\ntanh = math.hyperbolic.tanh\narcsinh = math.hyperbolic.arcsinh\narccosh = math.hyperbolic.arccosh\narctanh = math.hyperbolic.arctanh\n\nrint = math.rounding.rint\nfloor = math.rounding.floor\nceil = math.rounding.ceil\ntrunc = math.rounding.trunc\n\nsum = math.sumprod.sum\nprod = math.sumprod.prod\n\nexp = math.explog.exp\nexpm1 = math.explog.expm1\nexp2 = math.explog.exp2\nlog = math.explog.log\nlog10 = math.explog.log10\nlog2 = math.explog.log2\nlog1p = math.explog.log1p\nlogaddexp = math.explog.logaddexp\nlogaddexp2 = math.explog.logaddexp2\n\nsignbit = math.floating.signbit\ncopysign = math.floating.copysign\nldexp = math.floating.ldexp\nfrexp = math.floating.frexp\nnextafter = math.floating.nextafter\n\nadd = math.arithmetic.add\nreciprocal = math.arithmetic.reciprocal\nnegative = math.arithmetic.negative\nmultiply = math.arithmetic.multiply\ndivide = math.arithmetic.divide\npower = math.arithmetic.power\nsubtract = math.arithmetic.subtract\ntrue_divide = math.arithmetic.true_divide\nfloor_divide = math.arithmetic.floor_divide\nfmod = math.arithmetic.fmod\nmod = math.arithmetic.remainder\nmodf = math.arithmetic.modf\nremainder = math.arithmetic.remainder\n\nclip = math.misc.clip\nsqrt = math.misc.sqrt\nsquare = math.misc.square\nabsolute = math.misc.absolute\nabs = math.misc.absolute\nsign = math.misc.sign\nmaximum = math.misc.maximum\nminimum = math.misc.minimum\nfmax = math.misc.fmax\nfmin = math.misc.fmin\n\n# -----------------------------------------------------------------------------\n# Sorting, searching, and counting\n# -----------------------------------------------------------------------------\ncount_nonzero = sorting.count.count_nonzero\n\nargmax = sorting.search.argmax\nargmin = sorting.search.argmin\nwhere = sorting.search.where\n\n# -----------------------------------------------------------------------------\n# Statistics\n# -----------------------------------------------------------------------------\namin = statistics.order.amin\nmin = statistics.order.amin\namax = statistics.order.amax\nmax = statistics.order.amax\n\nmean = statistics.meanvar.mean\nvar = statistics.meanvar.var\nstd = statistics.meanvar.std\n\nbincount = statistics.histogram.bincount\n\n\n# CuPy specific functions\ndef asnumpy(a, stream=None):\n \"\"\"Returns an array on the host memory from an arbitrary source array.\n\n Args:\n a: Arbitrary object that can be converted to :class:`numpy.ndarray`.\n stream (cupy.cuda.Stream): CUDA stream object. If it is specified, then\n the device-to-host copy runs asynchronously. Otherwise, the copy is\n synchronous. Note that if ``a`` is not a :class:`cupy.ndarray`\n object, then this argument has no effect.\n\n Returns:\n numpy.ndarray: Converted array on the host memory.\n\n \"\"\"\n if isinstance(a, ndarray):\n return a.get(stream=stream)\n else:\n return numpy.asarray(a)\n\n\n_cupy = sys.modules[__name__]\n\n\ndef get_array_module(*args):\n \"\"\"Returns the array module for arguments.\n\n This function is used to implement CPU/GPU generic code. If at least one of\n the arguments is a :class:`cupy.ndarray` object, the :mod:`cupy` module is\n returned.\n\n Args:\n args: Values to determine whether NumPy or CuPy should be used.\n\n Returns:\n module: :mod:`cupy` or :mod:`numpy` is returned based on the types of\n the arguments.\n\n .. admonition:: Example\n\n A NumPy/CuPy generic function can be written as follows::\n\n >>> def softplus(x):\n ... xp = cupy.get_array_module(x)\n ... return xp.maximum(0, x) + xp.log1p(xp.exp(-abs(x)))\n\n \"\"\"\n if six.moves.builtins.any(isinstance(arg, ndarray) for arg in args):\n return _cupy\n else:\n return numpy\n\n\nclear_memo = util.clear_memo\nmemoize = util.memoize\n\nElementwiseKernel = core.ElementwiseKernel\nReductionKernel = core.ReductionKernel\n", "path": "cupy/__init__.py"}], "after_files": [{"content": "from __future__ import division\nimport sys\n\nimport numpy\nimport six\n\ntry:\n from cupy import core\nexcept ImportError:\n # core is a c-extention module.\n # When a user cannot import core, it represents that CuPy is not correctly\n # built.\n msg = ('CuPy is not correctly installed. Please check your environment, '\n 'uninstall Chainer and reinstall it with `pip install chainer '\n '--no-cache-dir -vvvv`.')\n raise six.reraise(RuntimeError, msg, sys.exc_info()[2])\n\n\nfrom cupy import binary\nfrom cupy import creation\nfrom cupy import indexing\nfrom cupy import io\nfrom cupy import linalg\nfrom cupy import logic\nfrom cupy import manipulation\nfrom cupy import math\nimport cupy.random\nfrom cupy import sorting\nfrom cupy import statistics\nfrom cupy import testing # NOQA\nfrom cupy import util\n\nrandom = cupy.random\n\nndarray = core.ndarray\n\n# dtype short cut\nnumber = numpy.number\ninteger = numpy.integer\nsignedinteger = numpy.signedinteger\nunsignedinteger = numpy.unsignedinteger\ninexact = numpy.inexact\nfloating = numpy.floating\n\nbool_ = numpy.bool_\nbyte = numpy.byte\nshort = numpy.short\nintc = numpy.intc\nint_ = numpy.int_\nlonglong = numpy.longlong\nubyte = numpy.ubyte\nushort = numpy.ushort\nuintc = numpy.uintc\nuint = numpy.uint\nulonglong = numpy.ulonglong\n\nhalf = numpy.half\nsingle = numpy.single\nfloat_ = numpy.float_\nlongfloat = numpy.longfloat\n\nint8 = numpy.int8\nint16 = numpy.int16\nint32 = numpy.int32\nint64 = numpy.int64\nuint8 = numpy.uint8\nuint16 = numpy.uint16\nuint32 = numpy.uint32\nuint64 = numpy.uint64\n\nfloat16 = numpy.float16\nfloat32 = numpy.float32\nfloat64 = numpy.float64\n\nufunc = core.ufunc\n\nnewaxis = numpy.newaxis # == None\n\n# =============================================================================\n# Routines\n#\n# The order of these declarations are borrowed from the NumPy document:\n# http://docs.scipy.org/doc/numpy/reference/routines.html\n# =============================================================================\n\n# -----------------------------------------------------------------------------\n# Array creation routines\n# -----------------------------------------------------------------------------\nempty = creation.basic.empty\nempty_like = creation.basic.empty_like\neye = creation.basic.eye\nidentity = creation.basic.identity\nones = creation.basic.ones\nones_like = creation.basic.ones_like\nzeros = creation.basic.zeros\nzeros_like = creation.basic.zeros_like\nfull = creation.basic.full\nfull_like = creation.basic.full_like\n\narray = creation.from_data.array\nasarray = creation.from_data.asarray\nasanyarray = creation.from_data.asanyarray\nascontiguousarray = creation.from_data.ascontiguousarray\ncopy = creation.from_data.copy\n\narange = creation.ranges.arange\nlinspace = creation.ranges.linspace\n\ndiag = creation.matrix.diag\ndiagflat = creation.matrix.diagflat\n\n# -----------------------------------------------------------------------------\n# Array manipulation routines\n# -----------------------------------------------------------------------------\ncopyto = manipulation.basic.copyto\n\nreshape = manipulation.shape.reshape\nravel = manipulation.shape.ravel\n\nrollaxis = manipulation.transpose.rollaxis\nswapaxes = manipulation.transpose.swapaxes\ntranspose = manipulation.transpose.transpose\n\natleast_1d = manipulation.dims.atleast_1d\natleast_2d = manipulation.dims.atleast_2d\natleast_3d = manipulation.dims.atleast_3d\nbroadcast = manipulation.dims.broadcast\nbroadcast_arrays = manipulation.dims.broadcast_arrays\nbroadcast_to = manipulation.dims.broadcast_to\nexpand_dims = manipulation.dims.expand_dims\nsqueeze = manipulation.dims.squeeze\n\ncolumn_stack = manipulation.join.column_stack\nconcatenate = manipulation.join.concatenate\ndstack = manipulation.join.dstack\nhstack = manipulation.join.hstack\nvstack = manipulation.join.vstack\n\nasfortranarray = manipulation.kind.asfortranarray\n\narray_split = manipulation.split.array_split\ndsplit = manipulation.split.dsplit\nhsplit = manipulation.split.hsplit\nsplit = manipulation.split.split\nvsplit = manipulation.split.vsplit\n\ntile = manipulation.tiling.tile\nrepeat = manipulation.tiling.repeat\n\nroll = manipulation.rearrange.roll\n\n# -----------------------------------------------------------------------------\n# Binary operations\n# -----------------------------------------------------------------------------\nbitwise_and = binary.elementwise.bitwise_and\nbitwise_or = binary.elementwise.bitwise_or\nbitwise_xor = binary.elementwise.bitwise_xor\ninvert = binary.elementwise.invert\nleft_shift = binary.elementwise.left_shift\nright_shift = binary.elementwise.right_shift\n\nbinary_repr = numpy.binary_repr\n\n# -----------------------------------------------------------------------------\n# Data type routines (borrowed from NumPy)\n# -----------------------------------------------------------------------------\ncan_cast = numpy.can_cast\npromote_types = numpy.promote_types\nmin_scalar_type = numpy.min_scalar_type\nresult_type = numpy.result_type\ncommon_type = numpy.common_type\nobj2sctype = numpy.obj2sctype\n\ndtype = numpy.dtype\nformat_parser = numpy.format_parser\n\nfinfo = numpy.finfo\niinfo = numpy.iinfo\nMachAr = numpy.MachAr\n\nissctype = numpy.issctype\nissubdtype = numpy.issubdtype\nissubsctype = numpy.issubsctype\nissubclass_ = numpy.issubclass_\nfind_common_type = numpy.find_common_type\n\ntypename = numpy.typename\nsctype2char = numpy.sctype2char\nmintypecode = numpy.mintypecode\n\n# -----------------------------------------------------------------------------\n# Optionally Scipy-accelerated routines\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Discrete Fourier Transform\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Indexing routines\n# -----------------------------------------------------------------------------\ntake = indexing.indexing.take\ndiagonal = indexing.indexing.diagonal\n\n# -----------------------------------------------------------------------------\n# Input and output\n# -----------------------------------------------------------------------------\nload = io.npz.load\nsave = io.npz.save\nsavez = io.npz.savez\nsavez_compressed = io.npz.savez_compressed\n\narray_repr = io.formatting.array_repr\narray_str = io.formatting.array_str\n\nbase_repr = numpy.base_repr\n\n# -----------------------------------------------------------------------------\n# Linear algebra\n# -----------------------------------------------------------------------------\ndot = linalg.product.dot\nvdot = linalg.product.vdot\ninner = linalg.product.inner\nouter = linalg.product.outer\ntensordot = linalg.product.tensordot\n\ntrace = linalg.norm.trace\n\n# -----------------------------------------------------------------------------\n# Logic functions\n# -----------------------------------------------------------------------------\nisfinite = logic.content.isfinite\nisinf = logic.content.isinf\nisnan = logic.content.isnan\n\nisscalar = numpy.isscalar\n\nlogical_and = logic.ops.logical_and\nlogical_or = logic.ops.logical_or\nlogical_not = logic.ops.logical_not\nlogical_xor = logic.ops.logical_xor\n\ngreater = logic.comparison.greater\ngreater_equal = logic.comparison.greater_equal\nless = logic.comparison.less\nless_equal = logic.comparison.less_equal\nequal = logic.comparison.equal\nnot_equal = logic.comparison.not_equal\n\nall = logic.truth.all\nany = logic.truth.any\n\n# -----------------------------------------------------------------------------\n# Mathematical functions\n# -----------------------------------------------------------------------------\nsin = math.trigonometric.sin\ncos = math.trigonometric.cos\ntan = math.trigonometric.tan\narcsin = math.trigonometric.arcsin\narccos = math.trigonometric.arccos\narctan = math.trigonometric.arctan\nhypot = math.trigonometric.hypot\narctan2 = math.trigonometric.arctan2\ndeg2rad = math.trigonometric.deg2rad\nrad2deg = math.trigonometric.rad2deg\ndegrees = math.trigonometric.degrees\nradians = math.trigonometric.radians\n\nsinh = math.hyperbolic.sinh\ncosh = math.hyperbolic.cosh\ntanh = math.hyperbolic.tanh\narcsinh = math.hyperbolic.arcsinh\narccosh = math.hyperbolic.arccosh\narctanh = math.hyperbolic.arctanh\n\nrint = math.rounding.rint\nfloor = math.rounding.floor\nceil = math.rounding.ceil\ntrunc = math.rounding.trunc\n\nsum = math.sumprod.sum\nprod = math.sumprod.prod\n\nexp = math.explog.exp\nexpm1 = math.explog.expm1\nexp2 = math.explog.exp2\nlog = math.explog.log\nlog10 = math.explog.log10\nlog2 = math.explog.log2\nlog1p = math.explog.log1p\nlogaddexp = math.explog.logaddexp\nlogaddexp2 = math.explog.logaddexp2\n\nsignbit = math.floating.signbit\ncopysign = math.floating.copysign\nldexp = math.floating.ldexp\nfrexp = math.floating.frexp\nnextafter = math.floating.nextafter\n\nadd = math.arithmetic.add\nreciprocal = math.arithmetic.reciprocal\nnegative = math.arithmetic.negative\nmultiply = math.arithmetic.multiply\ndivide = math.arithmetic.divide\npower = math.arithmetic.power\nsubtract = math.arithmetic.subtract\ntrue_divide = math.arithmetic.true_divide\nfloor_divide = math.arithmetic.floor_divide\nfmod = math.arithmetic.fmod\nmod = math.arithmetic.remainder\nmodf = math.arithmetic.modf\nremainder = math.arithmetic.remainder\n\nclip = math.misc.clip\nsqrt = math.misc.sqrt\nsquare = math.misc.square\nabsolute = math.misc.absolute\nabs = math.misc.absolute\nsign = math.misc.sign\nmaximum = math.misc.maximum\nminimum = math.misc.minimum\nfmax = math.misc.fmax\nfmin = math.misc.fmin\n\n# -----------------------------------------------------------------------------\n# Sorting, searching, and counting\n# -----------------------------------------------------------------------------\ncount_nonzero = sorting.count.count_nonzero\n\nargmax = sorting.search.argmax\nargmin = sorting.search.argmin\nwhere = sorting.search.where\n\n# -----------------------------------------------------------------------------\n# Statistics\n# -----------------------------------------------------------------------------\namin = statistics.order.amin\nmin = statistics.order.amin\namax = statistics.order.amax\nmax = statistics.order.amax\n\nmean = statistics.meanvar.mean\nvar = statistics.meanvar.var\nstd = statistics.meanvar.std\n\nbincount = statistics.histogram.bincount\n\n\n# CuPy specific functions\ndef asnumpy(a, stream=None):\n \"\"\"Returns an array on the host memory from an arbitrary source array.\n\n Args:\n a: Arbitrary object that can be converted to :class:`numpy.ndarray`.\n stream (cupy.cuda.Stream): CUDA stream object. If it is specified, then\n the device-to-host copy runs asynchronously. Otherwise, the copy is\n synchronous. Note that if ``a`` is not a :class:`cupy.ndarray`\n object, then this argument has no effect.\n\n Returns:\n numpy.ndarray: Converted array on the host memory.\n\n \"\"\"\n if isinstance(a, ndarray):\n return a.get(stream=stream)\n else:\n return numpy.asarray(a)\n\n\n_cupy = sys.modules[__name__]\n\n\ndef get_array_module(*args):\n \"\"\"Returns the array module for arguments.\n\n This function is used to implement CPU/GPU generic code. If at least one of\n the arguments is a :class:`cupy.ndarray` object, the :mod:`cupy` module is\n returned.\n\n Args:\n args: Values to determine whether NumPy or CuPy should be used.\n\n Returns:\n module: :mod:`cupy` or :mod:`numpy` is returned based on the types of\n the arguments.\n\n .. admonition:: Example\n\n A NumPy/CuPy generic function can be written as follows::\n\n >>> def softplus(x):\n ... xp = cupy.get_array_module(x)\n ... return xp.maximum(0, x) + xp.log1p(xp.exp(-abs(x)))\n\n \"\"\"\n if six.moves.builtins.any(isinstance(arg, ndarray) for arg in args):\n return _cupy\n else:\n return numpy\n\n\nclear_memo = util.clear_memo\nmemoize = util.memoize\n\nElementwiseKernel = core.ElementwiseKernel\nReductionKernel = core.ReductionKernel\n", "path": "cupy/__init__.py"}]}
3,950
199
gh_patches_debug_6901
rasdani/github-patches
git_diff
pymedusa__Medusa-2851
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [APP SUBMITTED]: ValueError: hour must be in 0..23 ### INFO **Python Version**: `2.7.9 (default, Sep 17 2016, 20:26:04) [GCC 4.9.2]` **Operating System**: `Linux-4.4.38-v7+-armv7l-with-debian-8.0` **Locale**: `UTF-8` **Branch**: [develop](../tree/develop) **Database**: `44.8` **Commit**: pymedusa/Medusa@94e0649919e9e05ac380f8e6d3817c18255931ee **Link to Log**: https://gist.github.com/31ec35e671692a5cc7ba13b4d2d07353 ### ERROR <pre> 2017-05-30 15:24:07 ERROR SEARCHQUEUE-BACKLOG-75340 :: [TorrentProject] :: [94e0649] Failed parsing publishing date: 24 hours ago Traceback (most recent call last): File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/medusa/providers/generic_provider.py#L512">medusa/providers/generic_provider.py</a>", line 512, in parse_pubdate dt = parser.parse(pubdate, fuzzy=True) File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L1181">lib/dateutil/parser.py</a>", line 1181, in parse return DEFAULTPARSER.parse(timestr, **kwargs) File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L584">lib/dateutil/parser.py</a>", line 584, in parse ret = default.replace(**repl) ValueError: hour must be in 0..23 </pre> --- _STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/providers/torrent/html/torrentproject.py` Content: ``` 1 # coding=utf-8 2 3 """Provider code for TorrentProject.""" 4 5 from __future__ import unicode_literals 6 7 import logging 8 import traceback 9 10 from medusa import tv 11 from medusa.bs4_parser import BS4Parser 12 from medusa.common import USER_AGENT 13 from medusa.helper.common import ( 14 convert_size, 15 try_int, 16 ) 17 from medusa.logger.adapters.style import BraceAdapter 18 from medusa.providers.torrent.torrent_provider import TorrentProvider 19 20 import validators 21 22 log = BraceAdapter(logging.getLogger(__name__)) 23 log.logger.addHandler(logging.NullHandler()) 24 25 26 class TorrentProjectProvider(TorrentProvider): 27 """TorrentProject Torrent provider.""" 28 29 def __init__(self): 30 """Initialize the class.""" 31 super(TorrentProjectProvider, self).__init__('TorrentProject') 32 33 # Credentials 34 self.public = True 35 36 # URLs 37 self.url = 'https://torrentproject.se' 38 self.custom_url = None 39 40 # Proper Strings 41 42 # Miscellaneous Options 43 self.headers.update({'User-Agent': USER_AGENT}) 44 45 # Torrent Stats 46 self.minseed = None 47 self.minleech = None 48 49 # Cache 50 self.cache = tv.Cache(self, min_time=20) 51 52 def search(self, search_strings, age=0, ep_obj=None): 53 """ 54 Search a provider and parse the results. 55 56 :param search_strings: A dict with mode (key) and the search value (value) 57 :param age: Not used 58 :param ep_obj: Not used 59 :returns: A list of search results (structure) 60 """ 61 results = [] 62 63 if self.custom_url: 64 if not validators.url(self.custom_url): 65 log.warning('Invalid custom url: {0}', self.custom_url) 66 return results 67 search_url = self.custom_url 68 else: 69 search_url = self.url 70 71 search_params = { 72 'hl': 'en', 73 'num': 40, 74 'start': 0, 75 'filter': 2101, 76 'safe': 'on', 77 'orderby': 'latest', 78 's': '', 79 } 80 81 for mode in search_strings: 82 log.debug('Search mode: {0}', mode) 83 84 for search_string in search_strings[mode]: 85 if mode != 'RSS': 86 search_params['s'] = search_string 87 log.debug('Search string: {search}', 88 {'search': search_string}) 89 90 response = self.get_url(search_url, params=search_params, returns='response') 91 if not response or not response.text: 92 log.debug('No data returned from provider') 93 continue 94 95 results += self.parse(response.text, mode) 96 97 return results 98 99 def parse(self, data, mode): 100 """ 101 Parse search results for items. 102 103 :param data: The raw response from a search 104 :param mode: The current mode used to search, e.g. RSS 105 106 :return: A list of items found 107 """ 108 items = [] 109 110 with BS4Parser(data, 'html5lib') as html: 111 torrent_rows = html.find_all('div', class_='torrent') 112 113 # Continue only if at least one release is found 114 if not torrent_rows: 115 log.debug('Data returned from provider does not contain any torrents') 116 return items 117 118 for row in torrent_rows: 119 anchor = row.find('a') 120 121 try: 122 # Removes ' torrent' in the end 123 title = anchor.get('title')[:-8] 124 download_url = anchor.get('href') 125 if not all([title, download_url]): 126 continue 127 128 info_hash = download_url.split('/')[1] 129 download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format( 130 hash=info_hash, title=title, trackers=self._custom_trackers) 131 132 seeders = try_int(row.find('span', class_='bc seeders').find('span').get_text(), 1) 133 leechers = try_int(row.find('span', class_='bc leechers').find('span').get_text()) 134 135 # Filter unseeded torrent 136 if seeders < min(self.minseed, 1): 137 if mode != 'RSS': 138 log.debug("Discarding torrent because it doesn't meet the" 139 " minimum seeders: {0}. Seeders: {1}", 140 title, seeders) 141 continue 142 143 torrent_size = row.find('span', class_='bc torrent-size').get_text().rstrip() 144 size = convert_size(torrent_size) or -1 145 146 pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True) 147 pubdate = self.parse_pubdate(pubdate_raw) 148 149 item = { 150 'title': title, 151 'link': download_url, 152 'size': size, 153 'seeders': seeders, 154 'leechers': leechers, 155 'pubdate': pubdate, 156 } 157 if mode != 'RSS': 158 log.debug('Found result: {0} with {1} seeders and {2} leechers', 159 title, seeders, leechers) 160 161 items.append(item) 162 except (AttributeError, TypeError, KeyError, ValueError, IndexError): 163 log.error('Failed parsing provider. Traceback: {0!r}', 164 traceback.format_exc()) 165 166 return items 167 168 169 provider = TorrentProjectProvider() 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/providers/torrent/html/torrentproject.py b/medusa/providers/torrent/html/torrentproject.py --- a/medusa/providers/torrent/html/torrentproject.py +++ b/medusa/providers/torrent/html/torrentproject.py @@ -144,7 +144,7 @@ size = convert_size(torrent_size) or -1 pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True) - pubdate = self.parse_pubdate(pubdate_raw) + pubdate = self.parse_pubdate(pubdate_raw, human_time=True) item = { 'title': title,
{"golden_diff": "diff --git a/medusa/providers/torrent/html/torrentproject.py b/medusa/providers/torrent/html/torrentproject.py\n--- a/medusa/providers/torrent/html/torrentproject.py\n+++ b/medusa/providers/torrent/html/torrentproject.py\n@@ -144,7 +144,7 @@\n size = convert_size(torrent_size) or -1\n \n pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)\n- pubdate = self.parse_pubdate(pubdate_raw)\n+ pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n \n item = {\n 'title': title,\n", "issue": "[APP SUBMITTED]: ValueError: hour must be in 0..23\n### INFO\n**Python Version**: `2.7.9 (default, Sep 17 2016, 20:26:04) [GCC 4.9.2]`\n**Operating System**: `Linux-4.4.38-v7+-armv7l-with-debian-8.0`\n**Locale**: `UTF-8`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.8`\n**Commit**: pymedusa/Medusa@94e0649919e9e05ac380f8e6d3817c18255931ee\n**Link to Log**: https://gist.github.com/31ec35e671692a5cc7ba13b4d2d07353\n### ERROR\n<pre>\n2017-05-30 15:24:07 ERROR SEARCHQUEUE-BACKLOG-75340 :: [TorrentProject] :: [94e0649] Failed parsing publishing date: 24 hours ago\nTraceback (most recent call last):\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/medusa/providers/generic_provider.py#L512\">medusa/providers/generic_provider.py</a>\", line 512, in parse_pubdate\n dt = parser.parse(pubdate, fuzzy=True)\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L1181\">lib/dateutil/parser.py</a>\", line 1181, in parse\n return DEFAULTPARSER.parse(timestr, **kwargs)\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L584\">lib/dateutil/parser.py</a>\", line 584, in parse\n ret = default.replace(**repl)\nValueError: hour must be in 0..23\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for TorrentProject.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport traceback\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.common import USER_AGENT\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nimport validators\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass TorrentProjectProvider(TorrentProvider):\n \"\"\"TorrentProject Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(TorrentProjectProvider, self).__init__('TorrentProject')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://torrentproject.se'\n self.custom_url = None\n\n # Proper Strings\n\n # Miscellaneous Options\n self.headers.update({'User-Agent': USER_AGENT})\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n if self.custom_url:\n if not validators.url(self.custom_url):\n log.warning('Invalid custom url: {0}', self.custom_url)\n return results\n search_url = self.custom_url\n else:\n search_url = self.url\n\n search_params = {\n 'hl': 'en',\n 'num': 40,\n 'start': 0,\n 'filter': 2101,\n 'safe': 'on',\n 'orderby': 'latest',\n 's': '',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n search_params['s'] = search_string\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.get_url(search_url, params=search_params, returns='response')\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_rows = html.find_all('div', class_='torrent')\n\n # Continue only if at least one release is found\n if not torrent_rows:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n anchor = row.find('a')\n\n try:\n # Removes ' torrent' in the end\n title = anchor.get('title')[:-8]\n download_url = anchor.get('href')\n if not all([title, download_url]):\n continue\n\n info_hash = download_url.split('/')[1]\n download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(\n hash=info_hash, title=title, trackers=self._custom_trackers)\n\n seeders = try_int(row.find('span', class_='bc seeders').find('span').get_text(), 1)\n leechers = try_int(row.find('span', class_='bc leechers').find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = row.find('span', class_='bc torrent-size').get_text().rstrip()\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.error('Failed parsing provider. Traceback: {0!r}',\n traceback.format_exc())\n\n return items\n\n\nprovider = TorrentProjectProvider()\n", "path": "medusa/providers/torrent/html/torrentproject.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for TorrentProject.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport traceback\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.common import USER_AGENT\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nimport validators\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass TorrentProjectProvider(TorrentProvider):\n \"\"\"TorrentProject Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(TorrentProjectProvider, self).__init__('TorrentProject')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://torrentproject.se'\n self.custom_url = None\n\n # Proper Strings\n\n # Miscellaneous Options\n self.headers.update({'User-Agent': USER_AGENT})\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n if self.custom_url:\n if not validators.url(self.custom_url):\n log.warning('Invalid custom url: {0}', self.custom_url)\n return results\n search_url = self.custom_url\n else:\n search_url = self.url\n\n search_params = {\n 'hl': 'en',\n 'num': 40,\n 'start': 0,\n 'filter': 2101,\n 'safe': 'on',\n 'orderby': 'latest',\n 's': '',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n search_params['s'] = search_string\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.get_url(search_url, params=search_params, returns='response')\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_rows = html.find_all('div', class_='torrent')\n\n # Continue only if at least one release is found\n if not torrent_rows:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n anchor = row.find('a')\n\n try:\n # Removes ' torrent' in the end\n title = anchor.get('title')[:-8]\n download_url = anchor.get('href')\n if not all([title, download_url]):\n continue\n\n info_hash = download_url.split('/')[1]\n download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(\n hash=info_hash, title=title, trackers=self._custom_trackers)\n\n seeders = try_int(row.find('span', class_='bc seeders').find('span').get_text(), 1)\n leechers = try_int(row.find('span', class_='bc leechers').find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = row.find('span', class_='bc torrent-size').get_text().rstrip()\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.error('Failed parsing provider. Traceback: {0!r}',\n traceback.format_exc())\n\n return items\n\n\nprovider = TorrentProjectProvider()\n", "path": "medusa/providers/torrent/html/torrentproject.py"}]}
2,425
145