problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_23222
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-7217
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NullWriter has no attribute 'closed' This is similar to issue #1883 It is triggered when using the "click" library in a PyInstaller --noconsole application. A workaround is as follows: ```python # fixup somw problems from pyinstaller if "NullWriter" in str(type(sys.stdout)): sys.stdout.closed = sys.stderr.closed = False ``` I suggest adding a class attribute, closed=False to fix this. You may want to add the "errors" and "newlines" attributes as well, see the python docs. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/loader/pyiboot01_bootstrap.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2022, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 #-- Start bootstrap process 13 # Only python built-in modules can be used. 14 15 import sys 16 17 import pyimod02_importers 18 19 # Extend Python import machinery by adding PEP302 importers to sys.meta_path. 20 pyimod02_importers.install() 21 22 #-- Bootstrap process is complete. 23 # We can use other python modules (e.g. os) 24 25 import os # noqa: E402 26 27 # Let other python modules know that the code is running in frozen mode. 28 if not hasattr(sys, 'frozen'): 29 sys.frozen = True 30 31 # sys._MEIPASS is now set in the bootloader. Hooray. 32 33 # Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix. 34 # Let's do the same for Python 3. 35 sys.prefix = sys._MEIPASS 36 sys.exec_prefix = sys.prefix 37 38 # Python 3.3+ defines also sys.base_prefix. Let's set them too. 39 sys.base_prefix = sys.prefix 40 sys.base_exec_prefix = sys.exec_prefix 41 42 # Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path 43 # VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior. 44 VIRTENV = 'VIRTUAL_ENV' 45 if VIRTENV in os.environ: 46 # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not 47 # delete it from the environment. 48 os.environ[VIRTENV] = '' 49 del os.environ[VIRTENV] 50 51 # Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working 52 # directory is changed by the frozen application. 53 python_path = [] 54 for pth in sys.path: 55 python_path.append(os.path.abspath(pth)) 56 sys.path = python_path 57 58 59 # Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with 60 # "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout 61 # object is bound to an invalid file descriptor. 62 # Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a 63 # good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code 64 # with "pythonw" (and it is difficult to debug, anyway). 65 class NullWriter: 66 softspace = 0 67 encoding = 'UTF-8' 68 69 def write(*args): 70 pass 71 72 def flush(*args): 73 pass 74 75 # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883. 76 def isatty(self): 77 return False 78 79 80 # sys.stdout/err is None in GUI mode on Windows. 81 if sys.stdout is None: 82 sys.stdout = NullWriter() 83 if sys.stderr is None: 84 sys.stderr = NullWriter() 85 86 # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all 87 # the encodings. 88 # 89 # It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations: 90 # http://www.pyinstaller.org/ticket/651 91 # 92 # Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the 93 # order of running the code from the run-time hooks is not defined. 94 try: 95 import encodings # noqa: F401 96 except ImportError: 97 pass 98 99 # In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior. 100 if sys.warnoptions: 101 import warnings # noqa: F401 102 103 # Install the hooks for ctypes 104 import pyimod03_ctypes # noqa: E402 105 106 pyimod03_ctypes.install() 107 108 # Install the hooks for pywin32 (Windows only) 109 if sys.platform.startswith('win'): 110 import pyimod04_pywin32 111 pyimod04_pywin32.install() 112 113 # Make .eggs and zipfiles available at runtime 114 d = "eggs" 115 d = os.path.join(sys._MEIPASS, d) 116 # Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe, 117 # even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653). 118 if os.path.isdir(d): 119 for fn in os.listdir(d): 120 sys.path.append(os.path.join(d, fn)) 121 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py --- a/PyInstaller/loader/pyiboot01_bootstrap.py +++ b/PyInstaller/loader/pyiboot01_bootstrap.py @@ -55,34 +55,6 @@ python_path.append(os.path.abspath(pth)) sys.path = python_path - -# Implement workaround for prints in non-console mode. In non-console mode (with "pythonw"), print randomly fails with -# "[errno 9] Bad file descriptor" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout -# object is bound to an invalid file descriptor. -# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a -# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code -# with "pythonw" (and it is difficult to debug, anyway). -class NullWriter: - softspace = 0 - encoding = 'UTF-8' - - def write(*args): - pass - - def flush(*args): - pass - - # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883. - def isatty(self): - return False - - -# sys.stdout/err is None in GUI mode on Windows. -if sys.stdout is None: - sys.stdout = NullWriter() -if sys.stderr is None: - sys.stderr = NullWriter() - # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all # the encodings. #
{"golden_diff": "diff --git a/PyInstaller/loader/pyiboot01_bootstrap.py b/PyInstaller/loader/pyiboot01_bootstrap.py\n--- a/PyInstaller/loader/pyiboot01_bootstrap.py\n+++ b/PyInstaller/loader/pyiboot01_bootstrap.py\n@@ -55,34 +55,6 @@\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n \n-\n-# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n-# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n-# object is bound to an invalid file descriptor.\n-# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n-# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n-# with \"pythonw\" (and it is difficult to debug, anyway).\n-class NullWriter:\n- softspace = 0\n- encoding = 'UTF-8'\n-\n- def write(*args):\n- pass\n-\n- def flush(*args):\n- pass\n-\n- # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n- def isatty(self):\n- return False\n-\n-\n-# sys.stdout/err is None in GUI mode on Windows.\n-if sys.stdout is None:\n- sys.stdout = NullWriter()\n-if sys.stderr is None:\n- sys.stderr = NullWriter()\n-\n # At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n # the encodings.\n #\n", "issue": "NullWriter has no attribute 'closed'\nThis is similar to issue #1883\r\nIt is triggered when using the \"click\" library in a PyInstaller --noconsole application.\r\n\r\nA workaround is as follows:\r\n```python\r\n# fixup somw problems from pyinstaller\r\nif \"NullWriter\" in str(type(sys.stdout)):\r\n sys.stdout.closed = sys.stderr.closed = False\r\n```\r\nI suggest adding a class attribute, closed=False to fix this.\r\nYou may want to add the \"errors\" and \"newlines\" attributes as well, see the python docs.\r\n\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n#-- Start bootstrap process\n# Only python built-in modules can be used.\n\nimport sys\n\nimport pyimod02_importers\n\n# Extend Python import machinery by adding PEP302 importers to sys.meta_path.\npyimod02_importers.install()\n\n#-- Bootstrap process is complete.\n# We can use other python modules (e.g. os)\n\nimport os # noqa: E402\n\n# Let other python modules know that the code is running in frozen mode.\nif not hasattr(sys, 'frozen'):\n sys.frozen = True\n\n# sys._MEIPASS is now set in the bootloader. Hooray.\n\n# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.\n# Let's do the same for Python 3.\nsys.prefix = sys._MEIPASS\nsys.exec_prefix = sys.prefix\n\n# Python 3.3+ defines also sys.base_prefix. Let's set them too.\nsys.base_prefix = sys.prefix\nsys.base_exec_prefix = sys.exec_prefix\n\n# Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path\n# VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.\nVIRTENV = 'VIRTUAL_ENV'\nif VIRTENV in os.environ:\n # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not\n # delete it from the environment.\n os.environ[VIRTENV] = ''\n del os.environ[VIRTENV]\n\n# Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working\n# directory is changed by the frozen application.\npython_path = []\nfor pth in sys.path:\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n\n\n# Implement workaround for prints in non-console mode. In non-console mode (with \"pythonw\"), print randomly fails with\n# \"[errno 9] Bad file descriptor\" when the printed text is flushed (e.g., buffer full); this is because the sys.stdout\n# object is bound to an invalid file descriptor.\n# Python 3000 has a fix for it (http://bugs.python.org/issue1415), but we feel that a workaround in PyInstaller is a\n# good thing, because most people first encounter this problem with PyInstaller as they do not usually run their code\n# with \"pythonw\" (and it is difficult to debug, anyway).\nclass NullWriter:\n softspace = 0\n encoding = 'UTF-8'\n\n def write(*args):\n pass\n\n def flush(*args):\n pass\n\n # Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.\n def isatty(self):\n return False\n\n\n# sys.stdout/err is None in GUI mode on Windows.\nif sys.stdout is None:\n sys.stdout = NullWriter()\nif sys.stderr is None:\n sys.stderr = NullWriter()\n\n# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n# the encodings.\n#\n# It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:\n# http://www.pyinstaller.org/ticket/651\n#\n# Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the\n# order of running the code from the run-time hooks is not defined.\ntry:\n import encodings # noqa: F401\nexcept ImportError:\n pass\n\n# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.\nif sys.warnoptions:\n import warnings # noqa: F401\n\n# Install the hooks for ctypes\nimport pyimod03_ctypes # noqa: E402\n\npyimod03_ctypes.install()\n\n# Install the hooks for pywin32 (Windows only)\nif sys.platform.startswith('win'):\n import pyimod04_pywin32\n pyimod04_pywin32.install()\n\n# Make .eggs and zipfiles available at runtime\nd = \"eggs\"\nd = os.path.join(sys._MEIPASS, d)\n# Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,\n# even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).\nif os.path.isdir(d):\n for fn in os.listdir(d):\n sys.path.append(os.path.join(d, fn))\n", "path": "PyInstaller/loader/pyiboot01_bootstrap.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n#-- Start bootstrap process\n# Only python built-in modules can be used.\n\nimport sys\n\nimport pyimod02_importers\n\n# Extend Python import machinery by adding PEP302 importers to sys.meta_path.\npyimod02_importers.install()\n\n#-- Bootstrap process is complete.\n# We can use other python modules (e.g. os)\n\nimport os # noqa: E402\n\n# Let other python modules know that the code is running in frozen mode.\nif not hasattr(sys, 'frozen'):\n sys.frozen = True\n\n# sys._MEIPASS is now set in the bootloader. Hooray.\n\n# Python 3 C-API function Py_SetPath() resets sys.prefix to empty string. Python 2 was using PYTHONHOME for sys.prefix.\n# Let's do the same for Python 3.\nsys.prefix = sys._MEIPASS\nsys.exec_prefix = sys.prefix\n\n# Python 3.3+ defines also sys.base_prefix. Let's set them too.\nsys.base_prefix = sys.prefix\nsys.base_exec_prefix = sys.exec_prefix\n\n# Some packages behave differently when running inside virtual environment. E.g., IPython tries to append path\n# VIRTUAL_ENV to sys.path. For the frozen app we want to prevent this behavior.\nVIRTENV = 'VIRTUAL_ENV'\nif VIRTENV in os.environ:\n # On some platforms (e.g., AIX) 'os.unsetenv()' is unavailable and deleting the var from os.environ does not\n # delete it from the environment.\n os.environ[VIRTENV] = ''\n del os.environ[VIRTENV]\n\n# Ensure sys.path contains absolute paths. Otherwise, import of other python modules will fail when current working\n# directory is changed by the frozen application.\npython_path = []\nfor pth in sys.path:\n python_path.append(os.path.abspath(pth))\n sys.path = python_path\n\n# At least on Windows, Python seems to hook up the codecs on this import, so it is not enough to just package up all\n# the encodings.\n#\n# It was also reported that without 'encodings' module, the frozen executable fails to load in some configurations:\n# http://www.pyinstaller.org/ticket/651\n#\n# Importing 'encodings' module in a run-time hook is not enough, since some run-time hooks require this module, and the\n# order of running the code from the run-time hooks is not defined.\ntry:\n import encodings # noqa: F401\nexcept ImportError:\n pass\n\n# In the Python interpreter 'warnings' module is imported when 'sys.warnoptions' is not empty. Mimic this behavior.\nif sys.warnoptions:\n import warnings # noqa: F401\n\n# Install the hooks for ctypes\nimport pyimod03_ctypes # noqa: E402\n\npyimod03_ctypes.install()\n\n# Install the hooks for pywin32 (Windows only)\nif sys.platform.startswith('win'):\n import pyimod04_pywin32\n pyimod04_pywin32.install()\n\n# Make .eggs and zipfiles available at runtime\nd = \"eggs\"\nd = os.path.join(sys._MEIPASS, d)\n# Test if the 'eggs' directory exists. This allows us to opportunistically include this script into the packaged exe,\n# even if no eggs were found when packaging the program. (Which may be a use-case, see issue #653).\nif os.path.isdir(d):\n for fn in os.listdir(d):\n sys.path.append(os.path.join(d, fn))\n", "path": "PyInstaller/loader/pyiboot01_bootstrap.py"}]}
1,774
402
gh_patches_debug_12124
rasdani/github-patches
git_diff
facebookresearch__fairscale-830
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- shard_model does not respect shard_count. ## 🐛 Bug `autoshard.shard_model()` does not respect `shard_count`. See [comments](https://github.com/facebookresearch/fairscale/pull/817/files#r733237403) in https://github.com/facebookresearch/fairscale/pull/817 for more context and how to reproduce. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fairscale/experimental/nn/auto_shard.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 import logging 7 from typing import Dict, List, Set 8 9 import torch 10 import torch.fx 11 from torch.fx.node import Node 12 13 14 def _get_count(param_count: Dict, node_name: str) -> int: 15 """Identify different mutations of a given node name.""" 16 # TODO(anj): This is not very stable since it is possible that the name 17 # may not be in the same format. Is there another way to identify nodes 18 # in a graph? 19 if node_name in param_count: 20 return param_count[node_name] 21 elif node_name.split("_")[0] in param_count: 22 return param_count[node_name.split("_")[0]] 23 else: 24 raise RuntimeError(f"Unable to find match between param {param_count} and node {node_name}") 25 26 27 def _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict: 28 """Utility to create a map from shard id to param count using existing state.""" 29 30 shard_to_param_count: Dict[int, int] = {} 31 for node_name in node_name_to_shard_id.keys(): 32 try: 33 count = _get_count(param_count, node_name) 34 except RuntimeError: 35 continue 36 if node_name_to_shard_id[node_name] in shard_to_param_count: 37 shard_to_param_count[node_name_to_shard_id[node_name]] += count 38 else: 39 shard_to_param_count[node_name_to_shard_id[node_name]] = count 40 return shard_to_param_count 41 42 43 def _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict: 44 """Utility used to trace a graph and identify shard cutpoints.""" 45 46 node_name_to_shard_id: Dict[str, int] = {} 47 shard_id = 0 48 nodes_so_far = [] 49 param_count: Dict[str, int] = {} 50 shard_to_param_count = {} 51 52 # Find the total number of params in the model and 53 # the number of params per shard we are aiming for. 54 for name, module in traced_graph_module.named_modules(): 55 if "." in name: 56 continue 57 param_count[name] = sum([x.numel() for x in module.parameters()]) 58 logging.info(f"Total number of params are {param_count['']}") 59 per_shard_param = param_count[""] // shard_count 60 logging.info(f"Per shard param count {per_shard_param}") 61 62 for node in traced_graph_module.graph.nodes: 63 if node.op == "placeholder": 64 node_name_to_shard_id[node.name] = shard_id 65 nodes_so_far.append(node.name) 66 elif node.op in ["get_attr", "call_function", "call_method", "call_module"]: 67 68 min_shard_id = shard_id 69 min_node_name = "" 70 # For each of the args of a given node, find the arg that is not the 71 # last node we traversed. This is to help us find skip connections 72 # across shards. 73 for arg in node.args: 74 # If the node has args that are inputs to the forward function, they 75 # may not have explicit names. 76 if not hasattr(arg, "name"): 77 continue 78 79 if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]: 80 if node_name_to_shard_id[arg.name] < min_shard_id: 81 min_shard_id = node_name_to_shard_id[arg.name] 82 min_node_name = arg.name 83 84 # If there is an input that is not from the previous shard, 85 # we collapse all the shards in between to be part of 1 shard. 86 # and update the param count per shard accordingly. 87 if min_shard_id < shard_id: 88 for node_name in reversed(nodes_so_far): 89 node_name_to_shard_id[node_name] = min_shard_id 90 if node_name == min_node_name: 91 break 92 shard_id = min_shard_id 93 # TODO(anj-s): Find a way to raise an error early if this can cause OOM errors. 94 shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id) 95 96 # Update state that is tracking node -> shard id and shard id -> param count. 97 node_name_to_shard_id[node.name] = shard_id 98 nodes_so_far.append(node.name) 99 # TODO(anj): This could just be an update, we don't need to recreate the map. 100 shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id) 101 # If we have gone over the number of params per shard count that we want to 102 # achieve, we should add a new shard. 103 # The shard_id may not have been updated in the map if we are at a node that does not 104 # have params. 105 if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param: 106 shard_id += 1 107 elif node.op == "output": 108 break 109 return node_name_to_shard_id 110 111 112 class _ExtendedLeafTracer(torch.fx.Tracer): 113 """Tracer with an extended set of leaf nn.Modules.""" 114 115 def __init__(self, leaf_modules: Set[torch.nn.Module]): 116 """Initializes a new _ExtendedLeafTracer object. 117 118 Args: 119 leaf_modules: The set of extra nn.Modules instances which will not be traced 120 through but instead considered to be leaves. 121 """ 122 super().__init__() 123 self.leaf_modules = leaf_modules 124 125 def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool: 126 return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules 127 128 129 # TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do 130 # would be to wrap the Module tree bottom up, first wrapping untracable children and 131 # only wrapping parents if they are also untracable. 132 def _trace(model: torch.nn.Module) -> torch.fx.GraphModule: 133 """Traces the given model and automatically wraps untracable modules into leaves.""" 134 leaf_modules = set() 135 tracer = _ExtendedLeafTracer(leaf_modules) 136 for name, module in model.named_modules(): 137 # TODO(ehotaj): The default is_leaf_module includes everything in torch.nn. 138 # This means that some coarse modules like nn.TransformerEncoder are treated 139 # as leaves, not traced, and are unable to be sharded. We may want to extend our 140 # sharding code to trace through these modules as well. 141 if tracer.is_leaf_module(module, ""): 142 continue 143 try: 144 tracer.trace(module) 145 except (TypeError, torch.fx.proxy.TraceError): 146 leaf_modules.add(module) 147 tracer = _ExtendedLeafTracer(leaf_modules) 148 graph = tracer.trace(model) 149 return torch.fx.GraphModule(model, graph) 150 151 152 def shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]: 153 """Utility used to shard a model using torch.fx. 154 155 This function traces the model twice in an attempt to identify the 156 right cutpoints and then shard the model. In the first pass we calculate 157 the number of parameters as we are tracing the graph and mark nodes at 158 which we might want to create a new module. In the second pass we 159 modify the graph by inserting placeholders and output nodes to essentially 160 shard the graph. 161 162 We don't support skip connections between shards. This means that all 163 input and output is self contained within a given shard. A node from 164 shard 1 cannot be an input to a node from shard 3. We expect all inputs 165 to a given shard to be coming from the last node in the previous shard. 166 This means that we may not be able to shard models by the specified 167 `shard_count` mentioned by the user. 168 169 Args: 170 model (nn.Module): Model to be sharded as specified by the device count. 171 172 shard_count (int): Number of shards that we want to split the model into. 173 174 """ 175 module_list: List[torch.fx.GraphModule] = [] 176 num_graphs = 0 177 new_graph = torch.fx.Graph() # type: ignore 178 env: Dict[str, Node] = {} 179 new_input_node = None 180 181 traced_graph_module = _trace(model) 182 183 # This is the first pass where we attempt to get a map of where 184 # we need to insert placeholder and output nodes. 185 node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count) 186 187 # dummy value which indicates that this is the first node. 188 prev_shard_id = 1000 189 prev_node = None 190 for node in traced_graph_module.graph.nodes: 191 # If the current node is in the next shard, we insert an output node. 192 # A new graph is created and a placeholder is added for the next shard. 193 if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]: 194 assert prev_node, "prev_node cannot be None" 195 196 with new_graph.inserting_after(prev_node): 197 new_graph.output(env[prev_node.name]) 198 num_graphs += 1 199 module_list.append(torch.fx.GraphModule(model, new_graph)) 200 new_graph = torch.fx.Graph() 201 node_name = "placeholder" + str(num_graphs) 202 pl_node = new_graph.create_node("placeholder", node_name) 203 env[node_name] = pl_node 204 new_input_node = pl_node 205 206 if new_input_node is not None: 207 # Account for a placeholder in the new graph. 208 node.args = (new_input_node,) 209 new_input_node = None 210 if node.op in ["placeholder", "get_attr", "call_function", "call_method", "call_module"]: 211 # Copy the nodes from the existing graph to the new graph. 212 new_node = new_graph.node_copy(node, lambda x: env[x.name]) 213 env[node.name] = new_node 214 elif node.op == "output": 215 # If this is the last node, we should add an output 216 # node and add the last graph to the list. 217 assert prev_node, "prev_node cannot be None" 218 219 with new_graph.inserting_after(prev_node): 220 new_graph.output(env[prev_node.name]) 221 module_list.append(torch.fx.GraphModule(model, new_graph)) 222 break 223 prev_node = new_node 224 prev_shard_id = node_name_to_shard_id[node.name] 225 226 return module_list 227 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fairscale/experimental/nn/auto_shard.py b/fairscale/experimental/nn/auto_shard.py --- a/fairscale/experimental/nn/auto_shard.py +++ b/fairscale/experimental/nn/auto_shard.py @@ -52,8 +52,7 @@ # Find the total number of params in the model and # the number of params per shard we are aiming for. for name, module in traced_graph_module.named_modules(): - if "." in name: - continue + name = name.replace(".", "_") param_count[name] = sum([x.numel() for x in module.parameters()]) logging.info(f"Total number of params are {param_count['']}") per_shard_param = param_count[""] // shard_count
{"golden_diff": "diff --git a/fairscale/experimental/nn/auto_shard.py b/fairscale/experimental/nn/auto_shard.py\n--- a/fairscale/experimental/nn/auto_shard.py\n+++ b/fairscale/experimental/nn/auto_shard.py\n@@ -52,8 +52,7 @@\n # Find the total number of params in the model and\n # the number of params per shard we are aiming for.\n for name, module in traced_graph_module.named_modules():\n- if \".\" in name:\n- continue\n+ name = name.replace(\".\", \"_\")\n param_count[name] = sum([x.numel() for x in module.parameters()])\n logging.info(f\"Total number of params are {param_count['']}\")\n per_shard_param = param_count[\"\"] // shard_count\n", "issue": "shard_model does not respect shard_count.\n## \ud83d\udc1b Bug\r\n\r\n`autoshard.shard_model()` does not respect `shard_count`. See [comments](https://github.com/facebookresearch/fairscale/pull/817/files#r733237403) in https://github.com/facebookresearch/fairscale/pull/817 for more context and how to reproduce. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Dict, List, Set\n\nimport torch\nimport torch.fx\nfrom torch.fx.node import Node\n\n\ndef _get_count(param_count: Dict, node_name: str) -> int:\n \"\"\"Identify different mutations of a given node name.\"\"\"\n # TODO(anj): This is not very stable since it is possible that the name\n # may not be in the same format. Is there another way to identify nodes\n # in a graph?\n if node_name in param_count:\n return param_count[node_name]\n elif node_name.split(\"_\")[0] in param_count:\n return param_count[node_name.split(\"_\")[0]]\n else:\n raise RuntimeError(f\"Unable to find match between param {param_count} and node {node_name}\")\n\n\ndef _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict:\n \"\"\"Utility to create a map from shard id to param count using existing state.\"\"\"\n\n shard_to_param_count: Dict[int, int] = {}\n for node_name in node_name_to_shard_id.keys():\n try:\n count = _get_count(param_count, node_name)\n except RuntimeError:\n continue\n if node_name_to_shard_id[node_name] in shard_to_param_count:\n shard_to_param_count[node_name_to_shard_id[node_name]] += count\n else:\n shard_to_param_count[node_name_to_shard_id[node_name]] = count\n return shard_to_param_count\n\n\ndef _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict:\n \"\"\"Utility used to trace a graph and identify shard cutpoints.\"\"\"\n\n node_name_to_shard_id: Dict[str, int] = {}\n shard_id = 0\n nodes_so_far = []\n param_count: Dict[str, int] = {}\n shard_to_param_count = {}\n\n # Find the total number of params in the model and\n # the number of params per shard we are aiming for.\n for name, module in traced_graph_module.named_modules():\n if \".\" in name:\n continue\n param_count[name] = sum([x.numel() for x in module.parameters()])\n logging.info(f\"Total number of params are {param_count['']}\")\n per_shard_param = param_count[\"\"] // shard_count\n logging.info(f\"Per shard param count {per_shard_param}\")\n\n for node in traced_graph_module.graph.nodes:\n if node.op == \"placeholder\":\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n elif node.op in [\"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n\n min_shard_id = shard_id\n min_node_name = \"\"\n # For each of the args of a given node, find the arg that is not the\n # last node we traversed. This is to help us find skip connections\n # across shards.\n for arg in node.args:\n # If the node has args that are inputs to the forward function, they\n # may not have explicit names.\n if not hasattr(arg, \"name\"):\n continue\n\n if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]:\n if node_name_to_shard_id[arg.name] < min_shard_id:\n min_shard_id = node_name_to_shard_id[arg.name]\n min_node_name = arg.name\n\n # If there is an input that is not from the previous shard,\n # we collapse all the shards in between to be part of 1 shard.\n # and update the param count per shard accordingly.\n if min_shard_id < shard_id:\n for node_name in reversed(nodes_so_far):\n node_name_to_shard_id[node_name] = min_shard_id\n if node_name == min_node_name:\n break\n shard_id = min_shard_id\n # TODO(anj-s): Find a way to raise an error early if this can cause OOM errors.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n\n # Update state that is tracking node -> shard id and shard id -> param count.\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n # TODO(anj): This could just be an update, we don't need to recreate the map.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n # If we have gone over the number of params per shard count that we want to\n # achieve, we should add a new shard.\n # The shard_id may not have been updated in the map if we are at a node that does not\n # have params.\n if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param:\n shard_id += 1\n elif node.op == \"output\":\n break\n return node_name_to_shard_id\n\n\nclass _ExtendedLeafTracer(torch.fx.Tracer):\n \"\"\"Tracer with an extended set of leaf nn.Modules.\"\"\"\n\n def __init__(self, leaf_modules: Set[torch.nn.Module]):\n \"\"\"Initializes a new _ExtendedLeafTracer object.\n\n Args:\n leaf_modules: The set of extra nn.Modules instances which will not be traced\n through but instead considered to be leaves.\n \"\"\"\n super().__init__()\n self.leaf_modules = leaf_modules\n\n def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool:\n return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules\n\n\n# TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do\n# would be to wrap the Module tree bottom up, first wrapping untracable children and\n# only wrapping parents if they are also untracable.\ndef _trace(model: torch.nn.Module) -> torch.fx.GraphModule:\n \"\"\"Traces the given model and automatically wraps untracable modules into leaves.\"\"\"\n leaf_modules = set()\n tracer = _ExtendedLeafTracer(leaf_modules)\n for name, module in model.named_modules():\n # TODO(ehotaj): The default is_leaf_module includes everything in torch.nn.\n # This means that some coarse modules like nn.TransformerEncoder are treated\n # as leaves, not traced, and are unable to be sharded. We may want to extend our\n # sharding code to trace through these modules as well.\n if tracer.is_leaf_module(module, \"\"):\n continue\n try:\n tracer.trace(module)\n except (TypeError, torch.fx.proxy.TraceError):\n leaf_modules.add(module)\n tracer = _ExtendedLeafTracer(leaf_modules)\n graph = tracer.trace(model)\n return torch.fx.GraphModule(model, graph)\n\n\ndef shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]:\n \"\"\"Utility used to shard a model using torch.fx.\n\n This function traces the model twice in an attempt to identify the\n right cutpoints and then shard the model. In the first pass we calculate\n the number of parameters as we are tracing the graph and mark nodes at\n which we might want to create a new module. In the second pass we\n modify the graph by inserting placeholders and output nodes to essentially\n shard the graph.\n\n We don't support skip connections between shards. This means that all\n input and output is self contained within a given shard. A node from\n shard 1 cannot be an input to a node from shard 3. We expect all inputs\n to a given shard to be coming from the last node in the previous shard.\n This means that we may not be able to shard models by the specified\n `shard_count` mentioned by the user.\n\n Args:\n model (nn.Module): Model to be sharded as specified by the device count.\n\n shard_count (int): Number of shards that we want to split the model into.\n\n \"\"\"\n module_list: List[torch.fx.GraphModule] = []\n num_graphs = 0\n new_graph = torch.fx.Graph() # type: ignore\n env: Dict[str, Node] = {}\n new_input_node = None\n\n traced_graph_module = _trace(model)\n\n # This is the first pass where we attempt to get a map of where\n # we need to insert placeholder and output nodes.\n node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count)\n\n # dummy value which indicates that this is the first node.\n prev_shard_id = 1000\n prev_node = None\n for node in traced_graph_module.graph.nodes:\n # If the current node is in the next shard, we insert an output node.\n # A new graph is created and a placeholder is added for the next shard.\n if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]:\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n num_graphs += 1\n module_list.append(torch.fx.GraphModule(model, new_graph))\n new_graph = torch.fx.Graph()\n node_name = \"placeholder\" + str(num_graphs)\n pl_node = new_graph.create_node(\"placeholder\", node_name)\n env[node_name] = pl_node\n new_input_node = pl_node\n\n if new_input_node is not None:\n # Account for a placeholder in the new graph.\n node.args = (new_input_node,)\n new_input_node = None\n if node.op in [\"placeholder\", \"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n # Copy the nodes from the existing graph to the new graph.\n new_node = new_graph.node_copy(node, lambda x: env[x.name])\n env[node.name] = new_node\n elif node.op == \"output\":\n # If this is the last node, we should add an output\n # node and add the last graph to the list.\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n module_list.append(torch.fx.GraphModule(model, new_graph))\n break\n prev_node = new_node\n prev_shard_id = node_name_to_shard_id[node.name]\n\n return module_list\n", "path": "fairscale/experimental/nn/auto_shard.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Dict, List, Set\n\nimport torch\nimport torch.fx\nfrom torch.fx.node import Node\n\n\ndef _get_count(param_count: Dict, node_name: str) -> int:\n \"\"\"Identify different mutations of a given node name.\"\"\"\n # TODO(anj): This is not very stable since it is possible that the name\n # may not be in the same format. Is there another way to identify nodes\n # in a graph?\n if node_name in param_count:\n return param_count[node_name]\n elif node_name.split(\"_\")[0] in param_count:\n return param_count[node_name.split(\"_\")[0]]\n else:\n raise RuntimeError(f\"Unable to find match between param {param_count} and node {node_name}\")\n\n\ndef _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict:\n \"\"\"Utility to create a map from shard id to param count using existing state.\"\"\"\n\n shard_to_param_count: Dict[int, int] = {}\n for node_name in node_name_to_shard_id.keys():\n try:\n count = _get_count(param_count, node_name)\n except RuntimeError:\n continue\n if node_name_to_shard_id[node_name] in shard_to_param_count:\n shard_to_param_count[node_name_to_shard_id[node_name]] += count\n else:\n shard_to_param_count[node_name_to_shard_id[node_name]] = count\n return shard_to_param_count\n\n\ndef _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict:\n \"\"\"Utility used to trace a graph and identify shard cutpoints.\"\"\"\n\n node_name_to_shard_id: Dict[str, int] = {}\n shard_id = 0\n nodes_so_far = []\n param_count: Dict[str, int] = {}\n shard_to_param_count = {}\n\n # Find the total number of params in the model and\n # the number of params per shard we are aiming for.\n for name, module in traced_graph_module.named_modules():\n name = name.replace(\".\", \"_\")\n param_count[name] = sum([x.numel() for x in module.parameters()])\n logging.info(f\"Total number of params are {param_count['']}\")\n per_shard_param = param_count[\"\"] // shard_count\n logging.info(f\"Per shard param count {per_shard_param}\")\n\n for node in traced_graph_module.graph.nodes:\n if node.op == \"placeholder\":\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n elif node.op in [\"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n\n min_shard_id = shard_id\n min_node_name = \"\"\n # For each of the args of a given node, find the arg that is not the\n # last node we traversed. This is to help us find skip connections\n # across shards.\n for arg in node.args:\n # If the node has args that are inputs to the forward function, they\n # may not have explicit names.\n if not hasattr(arg, \"name\"):\n continue\n\n if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]:\n if node_name_to_shard_id[arg.name] < min_shard_id:\n min_shard_id = node_name_to_shard_id[arg.name]\n min_node_name = arg.name\n\n # If there is an input that is not from the previous shard,\n # we collapse all the shards in between to be part of 1 shard.\n # and update the param count per shard accordingly.\n if min_shard_id < shard_id:\n for node_name in reversed(nodes_so_far):\n node_name_to_shard_id[node_name] = min_shard_id\n if node_name == min_node_name:\n break\n shard_id = min_shard_id\n # TODO(anj-s): Find a way to raise an error early if this can cause OOM errors.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n\n # Update state that is tracking node -> shard id and shard id -> param count.\n node_name_to_shard_id[node.name] = shard_id\n nodes_so_far.append(node.name)\n # TODO(anj): This could just be an update, we don't need to recreate the map.\n shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)\n # If we have gone over the number of params per shard count that we want to\n # achieve, we should add a new shard.\n # The shard_id may not have been updated in the map if we are at a node that does not\n # have params.\n if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param:\n shard_id += 1\n elif node.op == \"output\":\n break\n return node_name_to_shard_id\n\n\nclass _ExtendedLeafTracer(torch.fx.Tracer):\n \"\"\"Tracer with an extended set of leaf nn.Modules.\"\"\"\n\n def __init__(self, leaf_modules: Set[torch.nn.Module]):\n \"\"\"Initializes a new _ExtendedLeafTracer object.\n\n Args:\n leaf_modules: The set of extra nn.Modules instances which will not be traced\n through but instead considered to be leaves.\n \"\"\"\n super().__init__()\n self.leaf_modules = leaf_modules\n\n def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool:\n return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules\n\n\n# TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do\n# would be to wrap the Module tree bottom up, first wrapping untracable children and\n# only wrapping parents if they are also untracable.\ndef _trace(model: torch.nn.Module) -> torch.fx.GraphModule:\n \"\"\"Traces the given model and automatically wraps untracable modules into leaves.\"\"\"\n leaf_modules = set()\n tracer = _ExtendedLeafTracer(leaf_modules)\n for name, module in model.named_modules():\n # TODO(ehotaj): The default is_leaf_module includes everything in torch.nn.\n # This means that some coarse modules like nn.TransformerEncoder are treated\n # as leaves, not traced, and are unable to be sharded. We may want to extend our\n # sharding code to trace through these modules as well.\n if tracer.is_leaf_module(module, \"\"):\n continue\n try:\n tracer.trace(module)\n except (TypeError, torch.fx.proxy.TraceError):\n leaf_modules.add(module)\n tracer = _ExtendedLeafTracer(leaf_modules)\n graph = tracer.trace(model)\n return torch.fx.GraphModule(model, graph)\n\n\ndef shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]:\n \"\"\"Utility used to shard a model using torch.fx.\n\n This function traces the model twice in an attempt to identify the\n right cutpoints and then shard the model. In the first pass we calculate\n the number of parameters as we are tracing the graph and mark nodes at\n which we might want to create a new module. In the second pass we\n modify the graph by inserting placeholders and output nodes to essentially\n shard the graph.\n\n We don't support skip connections between shards. This means that all\n input and output is self contained within a given shard. A node from\n shard 1 cannot be an input to a node from shard 3. We expect all inputs\n to a given shard to be coming from the last node in the previous shard.\n This means that we may not be able to shard models by the specified\n `shard_count` mentioned by the user.\n\n Args:\n model (nn.Module): Model to be sharded as specified by the device count.\n\n shard_count (int): Number of shards that we want to split the model into.\n\n \"\"\"\n module_list: List[torch.fx.GraphModule] = []\n num_graphs = 0\n new_graph = torch.fx.Graph() # type: ignore\n env: Dict[str, Node] = {}\n new_input_node = None\n\n traced_graph_module = _trace(model)\n\n # This is the first pass where we attempt to get a map of where\n # we need to insert placeholder and output nodes.\n node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count)\n\n # dummy value which indicates that this is the first node.\n prev_shard_id = 1000\n prev_node = None\n for node in traced_graph_module.graph.nodes:\n # If the current node is in the next shard, we insert an output node.\n # A new graph is created and a placeholder is added for the next shard.\n if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]:\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n num_graphs += 1\n module_list.append(torch.fx.GraphModule(model, new_graph))\n new_graph = torch.fx.Graph()\n node_name = \"placeholder\" + str(num_graphs)\n pl_node = new_graph.create_node(\"placeholder\", node_name)\n env[node_name] = pl_node\n new_input_node = pl_node\n\n if new_input_node is not None:\n # Account for a placeholder in the new graph.\n node.args = (new_input_node,)\n new_input_node = None\n if node.op in [\"placeholder\", \"get_attr\", \"call_function\", \"call_method\", \"call_module\"]:\n # Copy the nodes from the existing graph to the new graph.\n new_node = new_graph.node_copy(node, lambda x: env[x.name])\n env[node.name] = new_node\n elif node.op == \"output\":\n # If this is the last node, we should add an output\n # node and add the last graph to the list.\n assert prev_node, \"prev_node cannot be None\"\n\n with new_graph.inserting_after(prev_node):\n new_graph.output(env[prev_node.name])\n module_list.append(torch.fx.GraphModule(model, new_graph))\n break\n prev_node = new_node\n prev_shard_id = node_name_to_shard_id[node.name]\n\n return module_list\n", "path": "fairscale/experimental/nn/auto_shard.py"}]}
3,256
174
gh_patches_debug_39414
rasdani/github-patches
git_diff
buildbot__buildbot-4467
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SecretInVault secret provider integration tests no longer work The test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works. See https://travis-ci.org/buildbot/buildbot/jobs/464401540. Looks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/secrets/providers/vault.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 """ 16 vault based providers 17 """ 18 19 from __future__ import absolute_import 20 from __future__ import print_function 21 22 from twisted.internet import defer 23 24 from buildbot import config 25 from buildbot.secrets.providers.base import SecretProviderBase 26 from buildbot.util import httpclientservice 27 28 29 class HashiCorpVaultSecretProvider(SecretProviderBase): 30 """ 31 basic provider where each secret is stored in Vault 32 """ 33 34 name = 'SecretInVault' 35 36 def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None): 37 if not isinstance(vaultServer, str): 38 config.error("vaultServer must be a string while it is %s" % (type(vaultServer,))) 39 if not isinstance(vaultToken, str): 40 config.error("vaultToken must be a string while it is %s" % (type(vaultToken,))) 41 42 @defer.inlineCallbacks 43 def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None): 44 if secretsmount is None: 45 self.secretsmount = "secret" 46 else: 47 self.secretsmount = secretsmount 48 self.vaultServer = vaultServer 49 self.vaultToken = vaultToken 50 if vaultServer.endswith('/'): 51 vaultServer = vaultServer[:-1] 52 self._http = yield httpclientservice.HTTPClientService.getService( 53 self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken}) 54 55 @defer.inlineCallbacks 56 def get(self, entry): 57 """ 58 get the value from vault secret backend 59 """ 60 path = self.secretsmount + '/' + entry 61 proj = yield self._http.get('/v1/{0}'.format(path)) 62 code = yield proj.code 63 if code != 200: 64 raise KeyError("The key %s does not exist in Vault provider: request" 65 " return code:%d." % (entry, code)) 66 json = yield proj.json() 67 defer.returnValue(json.get(u'data', {}).get('value')) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py --- a/master/buildbot/secrets/providers/vault.py +++ b/master/buildbot/secrets/providers/vault.py @@ -28,25 +28,30 @@ class HashiCorpVaultSecretProvider(SecretProviderBase): """ - basic provider where each secret is stored in Vault + basic provider where each secret is stored in Vault KV secret engine """ name = 'SecretInVault' - def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None): + def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None, + apiVersion=1): if not isinstance(vaultServer, str): config.error("vaultServer must be a string while it is %s" % (type(vaultServer,))) if not isinstance(vaultToken, str): config.error("vaultToken must be a string while it is %s" % (type(vaultToken,))) + if apiVersion not in [1, 2]: + config.error("apiVersion %s is not supported" % apiVersion) @defer.inlineCallbacks - def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None): + def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None, + apiVersion=1): if secretsmount is None: self.secretsmount = "secret" else: self.secretsmount = secretsmount self.vaultServer = vaultServer self.vaultToken = vaultToken + self.apiVersion = apiVersion if vaultServer.endswith('/'): vaultServer = vaultServer[:-1] self._http = yield httpclientservice.HTTPClientService.getService( @@ -57,11 +62,23 @@ """ get the value from vault secret backend """ - path = self.secretsmount + '/' + entry + if self.apiVersion == 1: + path = self.secretsmount + '/' + entry + else: + path = self.secretsmount + '/data/' + entry + + # note that the HTTP path contains v1 for both versions of the key-value + # secret engine. Different versions of the key-value engine are + # effectively separate secret engines in vault, with the same base HTTP + # API, but with different paths within it. proj = yield self._http.get('/v1/{0}'.format(path)) code = yield proj.code if code != 200: raise KeyError("The key %s does not exist in Vault provider: request" " return code:%d." % (entry, code)) json = yield proj.json() - defer.returnValue(json.get(u'data', {}).get('value')) + if self.apiVersion == 1: + ret = json.get(u'data', {}).get('value') + else: + ret = json.get(u'data', {}).get(u'data', {}).get('value') + defer.returnValue(ret)
{"golden_diff": "diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py\n--- a/master/buildbot/secrets/providers/vault.py\n+++ b/master/buildbot/secrets/providers/vault.py\n@@ -28,25 +28,30 @@\n \n class HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n- basic provider where each secret is stored in Vault\n+ basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n \n name = 'SecretInVault'\n \n- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n+ if apiVersion not in [1, 2]:\n+ config.error(\"apiVersion %s is not supported\" % apiVersion)\n \n @defer.inlineCallbacks\n- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n+ self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n@@ -57,11 +62,23 @@\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n- path = self.secretsmount + '/' + entry\n+ if self.apiVersion == 1:\n+ path = self.secretsmount + '/' + entry\n+ else:\n+ path = self.secretsmount + '/data/' + entry\n+\n+ # note that the HTTP path contains v1 for both versions of the key-value\n+ # secret engine. Different versions of the key-value engine are\n+ # effectively separate secret engines in vault, with the same base HTTP\n+ # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n- defer.returnValue(json.get(u'data', {}).get('value'))\n+ if self.apiVersion == 1:\n+ ret = json.get(u'data', {}).get('value')\n+ else:\n+ ret = json.get(u'data', {}).get(u'data', {}).get('value')\n+ defer.returnValue(ret)\n", "issue": "SecretInVault secret provider integration tests no longer work\nThe test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.\r\n\r\nSee https://travis-ci.org/buildbot/buildbot/jobs/464401540.\r\n\r\nLooks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n path = self.secretsmount + '/' + entry\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n defer.returnValue(json.get(u'data', {}).get('value'))\n", "path": "master/buildbot/secrets/providers/vault.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n if apiVersion not in [1, 2]:\n config.error(\"apiVersion %s is not supported\" % apiVersion)\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n if self.apiVersion == 1:\n path = self.secretsmount + '/' + entry\n else:\n path = self.secretsmount + '/data/' + entry\n\n # note that the HTTP path contains v1 for both versions of the key-value\n # secret engine. Different versions of the key-value engine are\n # effectively separate secret engines in vault, with the same base HTTP\n # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n if self.apiVersion == 1:\n ret = json.get(u'data', {}).get('value')\n else:\n ret = json.get(u'data', {}).get(u'data', {}).get('value')\n defer.returnValue(ret)\n", "path": "master/buildbot/secrets/providers/vault.py"}]}
1,072
684
gh_patches_debug_30219
rasdani/github-patches
git_diff
ansible__awx-13455
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CyberArk Conjur lookup plugin does not work with open source Conjur version ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Bug Summary The CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121 ### AWX version 2.4 ### Select the relevant components - [ ] UI - [X] API - [ ] Docs - [ ] Collection - [ ] CLI - [ ] Other ### Installation method docker development environment ### Modifications no ### Ansible version _No response_ ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce 1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is ``` Version 1.19.1-3398 API Version 5.3.0 ``` 2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token. 3. Click Test and provide a path. ### Expected results Connection to Conjur should be established. HTTP 200 response. ### Actual results 401 error returned. ``` Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 504, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 937, in build_args ssh_username = creds.get_input('username', default='') File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 275, in get_input return self._get_dynamic_input(field_name) File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 309, in _get_dynamic_input return input_source.get_input_value() File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 1250, in get_input_value return backend(**backend_kwargs) File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py", line 72, in conjur_backend raise_for_status(resp) File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py", line 12, in raise_for_status resp.raise_for_status() File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate ``` ### Additional information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awx/main/credential_plugins/conjur.py` Content: ``` 1 from .plugin import CredentialPlugin, CertFiles, raise_for_status 2 3 from urllib.parse import urljoin, quote 4 5 from django.utils.translation import gettext_lazy as _ 6 import requests 7 8 9 conjur_inputs = { 10 'fields': [ 11 { 12 'id': 'url', 13 'label': _('Conjur URL'), 14 'type': 'string', 15 'format': 'url', 16 }, 17 { 18 'id': 'api_key', 19 'label': _('API Key'), 20 'type': 'string', 21 'secret': True, 22 }, 23 { 24 'id': 'account', 25 'label': _('Account'), 26 'type': 'string', 27 }, 28 { 29 'id': 'username', 30 'label': _('Username'), 31 'type': 'string', 32 }, 33 {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True}, 34 ], 35 'metadata': [ 36 { 37 'id': 'secret_path', 38 'label': _('Secret Identifier'), 39 'type': 'string', 40 'help_text': _('The identifier for the secret e.g., /some/identifier'), 41 }, 42 { 43 'id': 'secret_version', 44 'label': _('Secret Version'), 45 'type': 'string', 46 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'), 47 }, 48 ], 49 'required': ['url', 'api_key', 'account', 'username'], 50 } 51 52 53 def conjur_backend(**kwargs): 54 url = kwargs['url'] 55 api_key = kwargs['api_key'] 56 account = quote(kwargs['account'], safe='') 57 username = quote(kwargs['username'], safe='') 58 secret_path = quote(kwargs['secret_path'], safe='') 59 version = kwargs.get('secret_version') 60 cacert = kwargs.get('cacert', None) 61 62 auth_kwargs = { 63 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'}, 64 'data': api_key, 65 'allow_redirects': False, 66 } 67 68 with CertFiles(cacert) as cert: 69 # https://www.conjur.org/api.html#authentication-authenticate-post 70 auth_kwargs['verify'] = cert 71 resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs) 72 raise_for_status(resp) 73 token = resp.content.decode('utf-8') 74 75 lookup_kwargs = { 76 'headers': {'Authorization': 'Token token="{}"'.format(token)}, 77 'allow_redirects': False, 78 } 79 80 # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get 81 path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path])) 82 if version: 83 ver = "version={}".format(version) 84 path = '?'.join([path, ver]) 85 86 with CertFiles(cacert) as cert: 87 lookup_kwargs['verify'] = cert 88 resp = requests.get(path, timeout=30, **lookup_kwargs) 89 raise_for_status(resp) 90 return resp.text 91 92 93 conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py --- a/awx/main/credential_plugins/conjur.py +++ b/awx/main/credential_plugins/conjur.py @@ -68,7 +68,10 @@ with CertFiles(cacert) as cert: # https://www.conjur.org/api.html#authentication-authenticate-post auth_kwargs['verify'] = cert - resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs) + try: + resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs) + except requests.exceptions.ConnectionError: + resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs) raise_for_status(resp) token = resp.content.decode('utf-8') @@ -78,14 +81,19 @@ } # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get - path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path])) + path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path])) + path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path])) if version: ver = "version={}".format(version) path = '?'.join([path, ver]) + path_conjurcloud = '?'.join([path_conjurcloud, ver]) with CertFiles(cacert) as cert: lookup_kwargs['verify'] = cert - resp = requests.get(path, timeout=30, **lookup_kwargs) + try: + resp = requests.get(path, timeout=30, **lookup_kwargs) + except requests.exceptions.ConnectionError: + resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs) raise_for_status(resp) return resp.text
{"golden_diff": "diff --git a/awx/main/credential_plugins/conjur.py b/awx/main/credential_plugins/conjur.py\n--- a/awx/main/credential_plugins/conjur.py\n+++ b/awx/main/credential_plugins/conjur.py\n@@ -68,7 +68,10 @@\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n- resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n+ try:\n+ resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n \n@@ -78,14 +81,19 @@\n }\n \n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n- path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n+ path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n+ path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n+ path_conjurcloud = '?'.join([path_conjurcloud, ver])\n \n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n- resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ try:\n+ resp = requests.get(path, timeout=30, **lookup_kwargs)\n+ except requests.exceptions.ConnectionError:\n+ resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n", "issue": "CyberArk Conjur lookup plugin does not work with open source Conjur version\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nThe CyberArk Conjur Secrets Manager Lookup plugin no longer supports the lookup against the Conjur OSS. Most likely because the opensource conjure version API endpoint does not have the `/api/` portion recently added by this change: https://github.com/ansible/awx/pull/13121\r\n\r\n### AWX version\r\n\r\n2.4\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\ndocker development environment\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n1. Deploy Conjur OSS. We use the latest `cyberark/conjur` image for that. The Conjur version is\r\n```\r\n Version 1.19.1-3398\r\n API Version 5.3.0 \r\n```\r\n2. Under the AWX, create the CyberArk Conjur Secrets Manager Lookup credential. Provide Conjure URL, user, and token. \r\n3. Click Test and provide a path. \r\n\r\n\r\n### Expected results\r\n\r\nConnection to Conjur should be established. HTTP 200 response. \r\n\r\n### Actual results\r\n\r\n401 error returned. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 504, in run\r\n args = self.build_args(self.instance, private_data_dir, passwords)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 937, in build_args\r\n ssh_username = creds.get_input('username', default='')\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 275, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 309, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 1250, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/conjur.py\", line 72, in conjur_backend\r\n raise_for_status(resp)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/plugin.py\", line 12, in raise_for_status\r\n resp.raise_for_status()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/requests/models.py\", line 1021, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\nrequests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://conjureurl.com/api/authn/test/admin/authenticate\r\n```\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}], "after_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import urljoin, quote\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\n\nconjur_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('Conjur URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'api_key',\n 'label': _('API Key'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'account',\n 'label': _('Account'),\n 'type': 'string',\n },\n {\n 'id': 'username',\n 'label': _('Username'),\n 'type': 'string',\n },\n {'id': 'cacert', 'label': _('Public Key Certificate'), 'type': 'string', 'multiline': True},\n ],\n 'metadata': [\n {\n 'id': 'secret_path',\n 'label': _('Secret Identifier'),\n 'type': 'string',\n 'help_text': _('The identifier for the secret e.g., /some/identifier'),\n },\n {\n 'id': 'secret_version',\n 'label': _('Secret Version'),\n 'type': 'string',\n 'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),\n },\n ],\n 'required': ['url', 'api_key', 'account', 'username'],\n}\n\n\ndef conjur_backend(**kwargs):\n url = kwargs['url']\n api_key = kwargs['api_key']\n account = quote(kwargs['account'], safe='')\n username = quote(kwargs['username'], safe='')\n secret_path = quote(kwargs['secret_path'], safe='')\n version = kwargs.get('secret_version')\n cacert = kwargs.get('cacert', None)\n\n auth_kwargs = {\n 'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},\n 'data': api_key,\n 'allow_redirects': False,\n }\n\n with CertFiles(cacert) as cert:\n # https://www.conjur.org/api.html#authentication-authenticate-post\n auth_kwargs['verify'] = cert\n try:\n resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)\n raise_for_status(resp)\n token = resp.content.decode('utf-8')\n\n lookup_kwargs = {\n 'headers': {'Authorization': 'Token token=\"{}\"'.format(token)},\n 'allow_redirects': False,\n }\n\n # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get\n path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))\n path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))\n if version:\n ver = \"version={}\".format(version)\n path = '?'.join([path, ver])\n path_conjurcloud = '?'.join([path_conjurcloud, ver])\n\n with CertFiles(cacert) as cert:\n lookup_kwargs['verify'] = cert\n try:\n resp = requests.get(path, timeout=30, **lookup_kwargs)\n except requests.exceptions.ConnectionError:\n resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)\n raise_for_status(resp)\n return resp.text\n\n\nconjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)\n", "path": "awx/main/credential_plugins/conjur.py"}]}
2,012
479
gh_patches_debug_19765
rasdani/github-patches
git_diff
bokeh__bokeh-10170
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] `TileRenderer` ignores the `visible` property #### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages) Bokeh 2.1.0rc1 #### Description of expected behavior and the observed behavior `TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server. #### Complete, minimal, self-contained example code that reproduces the issue ```python from bokeh.io import show from bokeh.plotting import figure from bokeh.tile_providers import CARTODBPOSITRON, get_provider p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000), x_axis_type="mercator", y_axis_type="mercator") p.add_tile(get_provider(CARTODBPOSITRON), visible=False) show(p) ``` The root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/models/file/latex_extension.py` Content: ``` 1 """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html 2 """ 3 import numpy as np 4 from scipy.special import jv 5 6 from bokeh.models import Label 7 from bokeh.palettes import Spectral4 8 from bokeh.plotting import figure, output_file, show 9 from bokeh.util.compiler import TypeScript 10 11 output_file('latex_extension.html') 12 13 class LatexLabel(Label): 14 """A subclass of `Label` with all of the same class attributes except 15 canvas mode isn't supported and DOM manipulation happens in the TypeScript 16 superclass implementation that requires setting `render_mode='css'`). 17 18 Only the render method of LabelView is overwritten to perform the 19 text -> latex (via katex) conversion 20 """ 21 __javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js"] 22 __css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css"] 23 __implementation__ = TypeScript(""" 24 import {Label, LabelView} from "models/annotations/label" 25 26 declare namespace katex { 27 function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void 28 } 29 30 export class LatexLabelView extends LabelView { 31 model: LatexLabel 32 33 render(): void { 34 // Here because AngleSpec does units tranform and label doesn't support specs 35 let angle: number 36 switch (this.model.angle_units) { 37 case "rad": { 38 angle = -1 * this.model.angle 39 break 40 } 41 case "deg": { 42 angle = -1 * this.model.angle * Math.PI/180.0 43 break 44 } 45 default: 46 throw new Error("unreachable") 47 } 48 49 const panel = this.panel || this.plot_view.frame 50 51 const xscale = this.plot_view.frame.xscales[this.model.x_range_name] 52 const yscale = this.plot_view.frame.yscales[this.model.y_range_name] 53 54 const {x, y} = this.model 55 let sx = this.model.x_units == "data" ? xscale.compute(x) : panel.xview.compute(x) 56 let sy = this.model.y_units == "data" ? yscale.compute(y) : panel.yview.compute(y) 57 58 sx += this.model.x_offset 59 sy -= this.model.y_offset 60 61 this._css_text(this.layer.ctx, "", sx, sy, angle) 62 katex.render(this.model.text, this.el, {displayMode: true}) 63 } 64 } 65 66 export class LatexLabel extends Label { 67 static init_LatexLabel(): void { 68 this.prototype.default_view = LatexLabelView 69 } 70 } 71 """) 72 73 p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350, 74 background_fill_color="#fafafa") 75 p.x_range.range_padding = 0 76 77 x = np.arange(0.0, 20.0, 0.02) 78 79 for i, n in enumerate([0, 1, 4, 7]): 80 p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label="𝜈=%d" % n) 81 82 83 text = (r"\text{Bessel Functions of the First Kind: }" + 84 r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" + 85 r"\left(\frac{x}{2}\right)^{2m+\nu}") 86 latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen', 87 render_mode='css', text_font_size='11px', 88 background_fill_color="white", border_line_color="lightgrey") 89 90 p.add_layout(latex) 91 92 show(p) 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py --- a/examples/models/file/latex_extension.py +++ b/examples/models/file/latex_extension.py @@ -30,7 +30,7 @@ export class LatexLabelView extends LabelView { model: LatexLabel - render(): void { + protected _render(): void { // Here because AngleSpec does units tranform and label doesn't support specs let angle: number switch (this.model.angle_units) { @@ -59,13 +59,17 @@ sy -= this.model.y_offset this._css_text(this.layer.ctx, "", sx, sy, angle) - katex.render(this.model.text, this.el, {displayMode: true}) + katex.render(this.model.text, this.el!, {displayMode: true}) } } export class LatexLabel extends Label { static init_LatexLabel(): void { this.prototype.default_view = LatexLabelView + + this.override({ + render_mode: "css", + }) } } """)
{"golden_diff": "diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py\n--- a/examples/models/file/latex_extension.py\n+++ b/examples/models/file/latex_extension.py\n@@ -30,7 +30,7 @@\n export class LatexLabelView extends LabelView {\n model: LatexLabel\n \n- render(): void {\n+ protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n@@ -59,13 +59,17 @@\n sy -= this.model.y_offset\n \n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n- katex.render(this.model.text, this.el, {displayMode: true})\n+ katex.render(this.model.text, this.el!, {displayMode: true})\n }\n }\n \n export class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n+\n+ this.override({\n+ render_mode: \"css\",\n+ })\n }\n }\n \"\"\")\n", "issue": "[BUG] `TileRenderer` ignores the `visible` property\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.1.0rc1\r\n\r\n#### Description of expected behavior and the observed behavior\r\n`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n```python\r\nfrom bokeh.io import show\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.tile_providers import CARTODBPOSITRON, get_provider\r\n\r\np = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),\r\n x_axis_type=\"mercator\", y_axis_type=\"mercator\")\r\n\r\np.add_tile(get_provider(CARTODBPOSITRON), visible=False)\r\n\r\nshow(p)\r\n```\r\nThe root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?\n", "before_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}], "after_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el!, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n\n this.override({\n render_mode: \"css\",\n })\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}]}
1,544
244
gh_patches_debug_30984
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1901
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Directories of exploded archives not recognized as cluster Directories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/core/hydration.py` Content: ``` 1 import logging 2 import os 3 from itertools import product 4 5 from insights.core import archives 6 from insights.core.context import (ClusterArchiveContext, 7 JDRContext, 8 HostArchiveContext, 9 SosArchiveContext, 10 SerializedArchiveContext) 11 12 log = logging.getLogger(__name__) 13 14 15 def get_all_files(path): 16 all_files = [] 17 for f in archives.get_all_files(path): 18 if os.path.isfile(f) and not os.path.islink(f): 19 all_files.append(f) 20 return all_files 21 22 23 def identify(files): 24 markers = {"insights_archive.txt": SerializedArchiveContext, 25 "insights_commands": HostArchiveContext, 26 "sos_commands": SosArchiveContext, 27 "JBOSS_HOME": JDRContext} 28 29 for f, m in product(files, markers): 30 if m in f: 31 i = f.find(m) 32 common_path = os.path.dirname(f[:i]) 33 ctx = markers[m] 34 return common_path, ctx 35 36 common_path = os.path.dirname(os.path.commonprefix(files)) 37 if not common_path: 38 raise archives.InvalidArchive("Unable to determine common path") 39 40 if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)): 41 return common_path, ClusterArchiveContext 42 43 return common_path, HostArchiveContext 44 45 46 def create_context(path, context=None): 47 all_files = get_all_files(path) 48 if not all_files: 49 raise archives.InvalidArchive("No files in archive") 50 51 common_path, ctx = identify(all_files) 52 context = context or ctx 53 return context(common_path, all_files=all_files) 54 ``` Path: `insights/core/cluster.py` Content: ``` 1 #!/usr/bin/env python 2 import itertools 3 import pandas as pd 4 from collections import defaultdict 5 6 from ansible.parsing.dataloader import DataLoader 7 from ansible.inventory.manager import InventoryManager 8 9 from insights.core import dr, plugins 10 from insights.core.archives import extract 11 from insights.core.hydration import create_context 12 from insights.specs import Specs 13 14 15 ID_GENERATOR = itertools.count() 16 17 18 class ClusterMeta(dict): 19 def __init__(self, num_members, kwargs): 20 self.num_members = num_members 21 self.update(**kwargs) 22 23 24 @plugins.combiner(optional=[Specs.machine_id, Specs.hostname]) 25 def machine_id(mid, hn): 26 ds = mid or hn 27 if ds: 28 return ds.content[0].strip() 29 return str(next(ID_GENERATOR)) 30 31 32 def parse_inventory(path): 33 inventory = InventoryManager(loader=DataLoader(), sources=path) 34 return inventory.get_groups_dict() 35 36 37 def attach_machine_id(result, mid): 38 key = "machine_id" 39 if isinstance(result, list): 40 for r in result: 41 r[key] = mid 42 else: 43 result[key] = mid 44 return result 45 46 47 def process_archives(archives): 48 for archive in archives: 49 with extract(archive) as ex: 50 ctx = create_context(ex.tmp_dir) 51 broker = dr.Broker() 52 broker[ctx.__class__] = ctx 53 yield dr.run(broker=broker) 54 55 56 def extract_facts(brokers): 57 results = defaultdict(list) 58 for b in brokers: 59 mid = b[machine_id] 60 for k, v in b.get_by_type(plugins.fact).items(): 61 r = attach_machine_id(v, mid) 62 if isinstance(r, list): 63 results[k].extend(r) 64 else: 65 results[k].append(r) 66 return results 67 68 69 def process_facts(facts, meta, broker): 70 broker[ClusterMeta] = meta 71 for k, v in facts.items(): 72 broker[k] = pd.DataFrame(v) 73 return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker) 74 75 76 def process_cluster(archives, broker, inventory=None): 77 inventory = parse_inventory(inventory) if inventory else {} 78 79 brokers = process_archives(archives) 80 facts = extract_facts(brokers) 81 meta = ClusterMeta(len(archives), inventory) 82 83 return process_facts(facts, meta, broker) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/core/cluster.py b/insights/core/cluster.py --- a/insights/core/cluster.py +++ b/insights/core/cluster.py @@ -1,8 +1,10 @@ #!/usr/bin/env python import itertools -import pandas as pd +import os from collections import defaultdict +import pandas as pd + from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager @@ -46,8 +48,14 @@ def process_archives(archives): for archive in archives: - with extract(archive) as ex: - ctx = create_context(ex.tmp_dir) + if os.path.isfile(archive): + with extract(archive) as ex: + ctx = create_context(ex.tmp_dir) + broker = dr.Broker() + broker[ctx.__class__] = ctx + yield dr.run(broker=broker) + else: + ctx = create_context(archive) broker = dr.Broker() broker[ctx.__class__] = ctx yield dr.run(broker=broker) diff --git a/insights/core/hydration.py b/insights/core/hydration.py --- a/insights/core/hydration.py +++ b/insights/core/hydration.py @@ -37,13 +37,15 @@ if not common_path: raise archives.InvalidArchive("Unable to determine common path") - if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)): - return common_path, ClusterArchiveContext - return common_path, HostArchiveContext def create_context(path, context=None): + top = os.listdir(path) + arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)] + if arc: + return ClusterArchiveContext(path, all_files=arc) + all_files = get_all_files(path) if not all_files: raise archives.InvalidArchive("No files in archive")
{"golden_diff": "diff --git a/insights/core/cluster.py b/insights/core/cluster.py\n--- a/insights/core/cluster.py\n+++ b/insights/core/cluster.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n import itertools\n-import pandas as pd\n+import os\n from collections import defaultdict\n \n+import pandas as pd\n+\n from ansible.parsing.dataloader import DataLoader\n from ansible.inventory.manager import InventoryManager\n \n@@ -46,8 +48,14 @@\n \n def process_archives(archives):\n for archive in archives:\n- with extract(archive) as ex:\n- ctx = create_context(ex.tmp_dir)\n+ if os.path.isfile(archive):\n+ with extract(archive) as ex:\n+ ctx = create_context(ex.tmp_dir)\n+ broker = dr.Broker()\n+ broker[ctx.__class__] = ctx\n+ yield dr.run(broker=broker)\n+ else:\n+ ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\ndiff --git a/insights/core/hydration.py b/insights/core/hydration.py\n--- a/insights/core/hydration.py\n+++ b/insights/core/hydration.py\n@@ -37,13 +37,15 @@\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n \n- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n- return common_path, ClusterArchiveContext\n-\n return common_path, HostArchiveContext\n \n \n def create_context(path, context=None):\n+ top = os.listdir(path)\n+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n+ if arc:\n+ return ClusterArchiveContext(path, all_files=arc)\n+\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n", "issue": "Directories of exploded archives not recognized as cluster\nDirectories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.\n", "before_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n return common_path, ClusterArchiveContext\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport pandas as pd\nfrom collections import defaultdict\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}], "after_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n top = os.listdir(path)\n arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n if arc:\n return ClusterArchiveContext(path, all_files=arc)\n\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport os\nfrom collections import defaultdict\n\nimport pandas as pd\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n if os.path.isfile(archive):\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n else:\n ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}]}
1,440
449
gh_patches_debug_20507
rasdani/github-patches
git_diff
freedomofpress__securedrop-5199
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- update Ansible due to CVE-2019-14864 ## Description [CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release. @emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/callback_plugins/ansible_version_check.py` Content: ``` 1 # -*- encoding:utf-8 -*- 2 from __future__ import absolute_import, division, print_function, \ 3 unicode_literals 4 5 import sys 6 7 import ansible 8 9 try: 10 # Version 2.0+ 11 from ansible.plugins.callback import CallbackBase 12 except ImportError: 13 CallbackBase = object 14 15 16 def print_red_bold(text): 17 print('\x1b[31;1m' + text + '\x1b[0m') 18 19 20 class CallbackModule(CallbackBase): 21 def __init__(self): 22 # Can't use `on_X` because this isn't forwards compatible 23 # with Ansible 2.0+ 24 required_version = '2.7.13' # Keep synchronized with requirements files 25 if not ansible.__version__.startswith(required_version): 26 print_red_bold( 27 "SecureDrop restriction: only Ansible {version}.*" 28 "is supported." 29 .format(version=required_version) 30 ) 31 sys.exit(1) 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py --- a/install_files/ansible-base/callback_plugins/ansible_version_check.py +++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py @@ -19,13 +19,18 @@ class CallbackModule(CallbackBase): def __init__(self): - # Can't use `on_X` because this isn't forwards compatible - # with Ansible 2.0+ - required_version = '2.7.13' # Keep synchronized with requirements files - if not ansible.__version__.startswith(required_version): + # The acceptable version range needs to be synchronized with + # requirements files. + viable_start = [2, 9, 7] + viable_end = [2, 10, 0] + ansible_version = [int(v) for v in ansible.__version__.split('.')] + if not (viable_start <= ansible_version < viable_end): print_red_bold( - "SecureDrop restriction: only Ansible {version}.*" - "is supported." - .format(version=required_version) + "SecureDrop restriction: Ansible version must be at least {viable_start} " + "and less than {viable_end}." + .format( + viable_start='.'.join(str(v) for v in viable_start), + viable_end='.'.join(str(v) for v in viable_end), + ) ) sys.exit(1)
{"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,13 +19,18 @@\n \n class CallbackModule(CallbackBase):\n def __init__(self):\n- # Can't use `on_X` because this isn't forwards compatible\n- # with Ansible 2.0+\n- required_version = '2.7.13' # Keep synchronized with requirements files\n- if not ansible.__version__.startswith(required_version):\n+ # The acceptable version range needs to be synchronized with\n+ # requirements files.\n+ viable_start = [2, 9, 7]\n+ viable_end = [2, 10, 0]\n+ ansible_version = [int(v) for v in ansible.__version__.split('.')]\n+ if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n- \"SecureDrop restriction: only Ansible {version}.*\"\n- \"is supported.\"\n- .format(version=required_version)\n+ \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n+ \"and less than {viable_end}.\"\n+ .format(\n+ viable_start='.'.join(str(v) for v in viable_start),\n+ viable_end='.'.join(str(v) for v in viable_end),\n+ )\n )\n sys.exit(1)\n", "issue": "update Ansible due to CVE-2019-14864 \n## Description\r\n\r\n[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.\r\n\r\n@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development). \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.7.13' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}], "after_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # The acceptable version range needs to be synchronized with\n # requirements files.\n viable_start = [2, 9, 7]\n viable_end = [2, 10, 0]\n ansible_version = [int(v) for v in ansible.__version__.split('.')]\n if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n \"and less than {viable_end}.\"\n .format(\n viable_start='.'.join(str(v) for v in viable_start),\n viable_end='.'.join(str(v) for v in viable_end),\n )\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]}
716
347
gh_patches_debug_24422
rasdani/github-patches
git_diff
buildbot__buildbot-3569
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Uninitialized ASN.1 value when using LdapUserInfo After updating some Python packages, I got the following exception on login: ``` Traceback (most recent call last): File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1442, in gotResult _inlineCallbacks(r, g, deferred) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1432, in _inlineCallbacks deferred.errback() File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 500, in errback self._startRunCallbacks(fail) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 567, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py", line 91, in failHttpError f.trap(Error) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 346, in trap self.raiseException() File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py", line 84, in failHttpRedirect f.trap(Redirect) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 346, in trap self.raiseException() File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks result = result.throwExceptionIntoGenerator(g) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator return g.throw(self.type, self.value, self.tb) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/oauth2.py", line 67, in renderLogin infos = yield self.auth.userInfoProvider.getUserInfo(details['username']) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py", line 250, in inContext result = inContext.theWork() File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py", line 266, in <lambda> inContext.theWork = lambda: context.call(ctx, func, *args, **kw) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py", line 92, in thd c = self.connectLdap() File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py", line 82, in connectLdap authentication=auth) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py", line 278, in __init__ self.bind(read_server_info=True) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py", line 427, in bind response = self.post_send_single_response(self.send('bindRequest', request, controls)) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py", line 122, in post_send_single_response responses, result = self.get_response(message_id) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py", line 298, in get_response responses = self._get_response(message_id) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py", line 168, in _get_response dict_response = self.decode_response(ldap_resp) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py", line 403, in decode_response result = bind_response_to_dict(component) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/operation/bind.py", line 119, in bind_response_to_dict 'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None else None} File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/univ.py", line 939, in __str__ return str(self._value) File "/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/base.py", line 186, in plug raise error.PyAsn1Error('Uninitialized ASN.1 value ("%s" attribute looked up)' % name) pyasn1.error.PyAsn1Error: Uninitialized ASN.1 value ("__str__" attribute looked up) ``` The problem disappeared after downgrading pyasn1 from 0.3.2 to the version 0.1.9, which I used before. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/www/ldapuserinfo.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 from __future__ import absolute_import 17 from __future__ import print_function 18 from future.moves.urllib.parse import urlparse 19 20 import ldap3 21 22 from twisted.internet import threads 23 24 from buildbot.util import flatten 25 from buildbot.www import auth 26 from buildbot.www import avatar 27 28 29 class LdapUserInfo(avatar.AvatarBase, auth.UserInfoProviderBase): 30 name = 'ldap' 31 32 def __init__(self, uri, bindUser, bindPw, 33 accountBase, 34 accountPattern, 35 accountFullName, 36 accountEmail, 37 groupBase=None, 38 groupMemberPattern=None, 39 groupName=None, 40 avatarPattern=None, 41 avatarData=None, 42 accountExtraFields=None): 43 avatar.AvatarBase.__init__(self) 44 auth.UserInfoProviderBase.__init__(self) 45 self.uri = uri 46 self.bindUser = bindUser 47 self.bindPw = bindPw 48 self.accountBase = accountBase 49 self.accountEmail = accountEmail 50 self.accountPattern = accountPattern 51 self.accountFullName = accountFullName 52 group_params = [p for p in (groupName, groupMemberPattern, groupBase) 53 if p is not None] 54 if len(group_params) not in (0, 3): 55 raise ValueError( 56 "Incomplete LDAP groups configuration. " 57 "To use Ldap groups, you need to specify the three " 58 "parameters (groupName, groupMemberPattern and groupBase). ") 59 60 self.groupName = groupName 61 self.groupMemberPattern = groupMemberPattern 62 self.groupBase = groupBase 63 self.avatarPattern = avatarPattern 64 self.avatarData = avatarData 65 if accountExtraFields is None: 66 accountExtraFields = [] 67 self.accountExtraFields = accountExtraFields 68 69 def connectLdap(self): 70 server = urlparse(self.uri) 71 netloc = server.netloc.split(":") 72 # define the server and the connection 73 s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps', 74 get_info=ldap3.GET_ALL_INFO) 75 76 auth = ldap3.AUTH_SIMPLE 77 if self.bindUser is None and self.bindPw is None: 78 auth = ldap3.AUTH_ANONYMOUS 79 80 c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC, 81 user=self.bindUser, password=self.bindPw, 82 authentication=auth) 83 return c 84 85 def search(self, c, base, filterstr='f', attributes=None): 86 c.search( 87 base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes) 88 return c.response 89 90 def getUserInfo(self, username): 91 def thd(): 92 c = self.connectLdap() 93 infos = {'username': username} 94 pattern = self.accountPattern % dict(username=username) 95 res = self.search(c, self.accountBase, pattern, 96 attributes=[ 97 self.accountEmail, self.accountFullName] + 98 self.accountExtraFields) 99 if len(res) != 1: 100 raise KeyError( 101 "ldap search \"%s\" returned %d results" % (pattern, len(res))) 102 dn, ldap_infos = res[0]['dn'], res[0]['raw_attributes'] 103 if isinstance(dn, bytes): 104 dn = dn.decode('utf-8') 105 106 def getLdapInfo(x): 107 if isinstance(x, list): 108 return x[0] 109 return x 110 infos['full_name'] = getLdapInfo(ldap_infos[self.accountFullName]) 111 infos['email'] = getLdapInfo(ldap_infos[self.accountEmail]) 112 for f in self.accountExtraFields: 113 if f in ldap_infos: 114 infos[f] = getLdapInfo(ldap_infos[f]) 115 116 if self.groupMemberPattern is None: 117 infos['groups'] = [] 118 return infos 119 120 # needs double quoting of backslashing 121 pattern = self.groupMemberPattern % dict(dn=dn) 122 res = self.search(c, self.groupBase, pattern, 123 attributes=[self.groupName]) 124 infos['groups'] = flatten( 125 [group_infos['raw_attributes'][self.groupName] for group_infos in res]) 126 return infos 127 return threads.deferToThread(thd) 128 129 def findAvatarMime(self, data): 130 # http://en.wikipedia.org/wiki/List_of_file_signatures 131 if data.startswith("\xff\xd8\xff"): 132 return ("image/jpeg", data) 133 if data.startswith("\x89PNG"): 134 return ("image/png", data) 135 if data.startswith("GIF8"): 136 return ("image/gif", data) 137 # ignore unknown image format 138 return None 139 140 def getUserAvatar(self, user_email, size, defaultAvatarUrl): 141 def thd(): 142 c = self.connectLdap() 143 pattern = self.avatarPattern % dict(email=user_email) 144 res = self.search(c, self.accountBase, pattern, 145 attributes=[self.avatarData]) 146 if not res: 147 return None 148 ldap_infos = res[0]['raw_attributes'] 149 if self.avatarData in ldap_infos and ldap_infos[self.avatarData]: 150 data = ldap_infos[self.avatarData][0] 151 return self.findAvatarMime(data) 152 return None 153 return threads.deferToThread(thd) 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/www/ldapuserinfo.py b/master/buildbot/www/ldapuserinfo.py --- a/master/buildbot/www/ldapuserinfo.py +++ b/master/buildbot/www/ldapuserinfo.py @@ -71,20 +71,19 @@ netloc = server.netloc.split(":") # define the server and the connection s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps', - get_info=ldap3.GET_ALL_INFO) + get_info=ldap3.ALL) - auth = ldap3.AUTH_SIMPLE + auth = ldap3.SIMPLE if self.bindUser is None and self.bindPw is None: - auth = ldap3.AUTH_ANONYMOUS + auth = ldap3.ANONYMOUS - c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC, + c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.SYNC, user=self.bindUser, password=self.bindPw, authentication=auth) return c def search(self, c, base, filterstr='f', attributes=None): - c.search( - base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes) + c.search(base, filterstr, ldap3.SUBTREE, attributes=attributes) return c.response def getUserInfo(self, username):
{"golden_diff": "diff --git a/master/buildbot/www/ldapuserinfo.py b/master/buildbot/www/ldapuserinfo.py\n--- a/master/buildbot/www/ldapuserinfo.py\n+++ b/master/buildbot/www/ldapuserinfo.py\n@@ -71,20 +71,19 @@\n netloc = server.netloc.split(\":\")\n # define the server and the connection\n s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',\n- get_info=ldap3.GET_ALL_INFO)\n+ get_info=ldap3.ALL)\n \n- auth = ldap3.AUTH_SIMPLE\n+ auth = ldap3.SIMPLE\n if self.bindUser is None and self.bindPw is None:\n- auth = ldap3.AUTH_ANONYMOUS\n+ auth = ldap3.ANONYMOUS\n \n- c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,\n+ c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.SYNC,\n user=self.bindUser, password=self.bindPw,\n authentication=auth)\n return c\n \n def search(self, c, base, filterstr='f', attributes=None):\n- c.search(\n- base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)\n+ c.search(base, filterstr, ldap3.SUBTREE, attributes=attributes)\n return c.response\n \n def getUserInfo(self, username):\n", "issue": "Uninitialized ASN.1 value when using LdapUserInfo\nAfter updating some Python packages, I got the following exception on login:\r\n\r\n```\r\n Traceback (most recent call last):\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1442, in gotResult\r\n _inlineCallbacks(r, g, deferred)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1432, in _inlineCallbacks\r\n deferred.errback()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 500, in errback\r\n self._startRunCallbacks(fail)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 567, in _startRunCallbacks\r\n self._runCallbacks()\r\n --- <exception caught here> ---\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py\", line 91, in failHttpError\r\n f.trap(Error)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 346, in trap\r\n self.raiseException()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/resource.py\", line 84, in failHttpRedirect\r\n f.trap(Redirect)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 346, in trap\r\n self.raiseException()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1384, in _inlineCallbacks\r\n result = result.throwExceptionIntoGenerator(g)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/failure.py\", line 393, in throwExceptionIntoGenerator\r\n return g.throw(self.type, self.value, self.tb)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/oauth2.py\", line 67, in renderLogin\r\n infos = yield self.auth.userInfoProvider.getUserInfo(details['username'])\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py\", line 250, in inContext\r\n result = inContext.theWork()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/threadpool.py\", line 266, in <lambda>\r\n inContext.theWork = lambda: context.call(ctx, func, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n return func(*args,**kw)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py\", line 92, in thd\r\n c = self.connectLdap()\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/buildbot/www/ldapuserinfo.py\", line 82, in connectLdap\r\n authentication=auth)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py\", line 278, in __init__\r\n self.bind(read_server_info=True)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/core/connection.py\", line 427, in bind\r\n response = self.post_send_single_response(self.send('bindRequest', request, controls))\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py\", line 122, in post_send_single_response\r\n responses, result = self.get_response(message_id)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py\", line 298, in get_response\r\n responses = self._get_response(message_id)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/sync.py\", line 168, in _get_response\r\n dict_response = self.decode_response(ldap_resp)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/strategy/base.py\", line 403, in decode_response\r\n result = bind_response_to_dict(component)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/ldap3/operation/bind.py\", line 119, in bind_response_to_dict\r\n 'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None else None}\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/univ.py\", line 939, in __str__\r\n return str(self._value)\r\n File \"/home/buibot00/venv/bb_twisted_test/local/lib/python2.7/site-packages/pyasn1/type/base.py\", line 186, in plug\r\n raise error.PyAsn1Error('Uninitialized ASN.1 value (\"%s\" attribute looked up)' % name)\r\n pyasn1.error.PyAsn1Error: Uninitialized ASN.1 value (\"__str__\" attribute looked up)\r\n```\r\n\r\nThe problem disappeared after downgrading pyasn1 from 0.3.2 to the version 0.1.9, which I used before.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom future.moves.urllib.parse import urlparse\n\nimport ldap3\n\nfrom twisted.internet import threads\n\nfrom buildbot.util import flatten\nfrom buildbot.www import auth\nfrom buildbot.www import avatar\n\n\nclass LdapUserInfo(avatar.AvatarBase, auth.UserInfoProviderBase):\n name = 'ldap'\n\n def __init__(self, uri, bindUser, bindPw,\n accountBase,\n accountPattern,\n accountFullName,\n accountEmail,\n groupBase=None,\n groupMemberPattern=None,\n groupName=None,\n avatarPattern=None,\n avatarData=None,\n accountExtraFields=None):\n avatar.AvatarBase.__init__(self)\n auth.UserInfoProviderBase.__init__(self)\n self.uri = uri\n self.bindUser = bindUser\n self.bindPw = bindPw\n self.accountBase = accountBase\n self.accountEmail = accountEmail\n self.accountPattern = accountPattern\n self.accountFullName = accountFullName\n group_params = [p for p in (groupName, groupMemberPattern, groupBase)\n if p is not None]\n if len(group_params) not in (0, 3):\n raise ValueError(\n \"Incomplete LDAP groups configuration. \"\n \"To use Ldap groups, you need to specify the three \"\n \"parameters (groupName, groupMemberPattern and groupBase). \")\n\n self.groupName = groupName\n self.groupMemberPattern = groupMemberPattern\n self.groupBase = groupBase\n self.avatarPattern = avatarPattern\n self.avatarData = avatarData\n if accountExtraFields is None:\n accountExtraFields = []\n self.accountExtraFields = accountExtraFields\n\n def connectLdap(self):\n server = urlparse(self.uri)\n netloc = server.netloc.split(\":\")\n # define the server and the connection\n s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',\n get_info=ldap3.GET_ALL_INFO)\n\n auth = ldap3.AUTH_SIMPLE\n if self.bindUser is None and self.bindPw is None:\n auth = ldap3.AUTH_ANONYMOUS\n\n c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.STRATEGY_SYNC,\n user=self.bindUser, password=self.bindPw,\n authentication=auth)\n return c\n\n def search(self, c, base, filterstr='f', attributes=None):\n c.search(\n base, filterstr, ldap3.SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attributes)\n return c.response\n\n def getUserInfo(self, username):\n def thd():\n c = self.connectLdap()\n infos = {'username': username}\n pattern = self.accountPattern % dict(username=username)\n res = self.search(c, self.accountBase, pattern,\n attributes=[\n self.accountEmail, self.accountFullName] +\n self.accountExtraFields)\n if len(res) != 1:\n raise KeyError(\n \"ldap search \\\"%s\\\" returned %d results\" % (pattern, len(res)))\n dn, ldap_infos = res[0]['dn'], res[0]['raw_attributes']\n if isinstance(dn, bytes):\n dn = dn.decode('utf-8')\n\n def getLdapInfo(x):\n if isinstance(x, list):\n return x[0]\n return x\n infos['full_name'] = getLdapInfo(ldap_infos[self.accountFullName])\n infos['email'] = getLdapInfo(ldap_infos[self.accountEmail])\n for f in self.accountExtraFields:\n if f in ldap_infos:\n infos[f] = getLdapInfo(ldap_infos[f])\n\n if self.groupMemberPattern is None:\n infos['groups'] = []\n return infos\n\n # needs double quoting of backslashing\n pattern = self.groupMemberPattern % dict(dn=dn)\n res = self.search(c, self.groupBase, pattern,\n attributes=[self.groupName])\n infos['groups'] = flatten(\n [group_infos['raw_attributes'][self.groupName] for group_infos in res])\n return infos\n return threads.deferToThread(thd)\n\n def findAvatarMime(self, data):\n # http://en.wikipedia.org/wiki/List_of_file_signatures\n if data.startswith(\"\\xff\\xd8\\xff\"):\n return (\"image/jpeg\", data)\n if data.startswith(\"\\x89PNG\"):\n return (\"image/png\", data)\n if data.startswith(\"GIF8\"):\n return (\"image/gif\", data)\n # ignore unknown image format\n return None\n\n def getUserAvatar(self, user_email, size, defaultAvatarUrl):\n def thd():\n c = self.connectLdap()\n pattern = self.avatarPattern % dict(email=user_email)\n res = self.search(c, self.accountBase, pattern,\n attributes=[self.avatarData])\n if not res:\n return None\n ldap_infos = res[0]['raw_attributes']\n if self.avatarData in ldap_infos and ldap_infos[self.avatarData]:\n data = ldap_infos[self.avatarData][0]\n return self.findAvatarMime(data)\n return None\n return threads.deferToThread(thd)\n", "path": "master/buildbot/www/ldapuserinfo.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom future.moves.urllib.parse import urlparse\n\nimport ldap3\n\nfrom twisted.internet import threads\n\nfrom buildbot.util import flatten\nfrom buildbot.www import auth\nfrom buildbot.www import avatar\n\n\nclass LdapUserInfo(avatar.AvatarBase, auth.UserInfoProviderBase):\n name = 'ldap'\n\n def __init__(self, uri, bindUser, bindPw,\n accountBase,\n accountPattern,\n accountFullName,\n accountEmail,\n groupBase=None,\n groupMemberPattern=None,\n groupName=None,\n avatarPattern=None,\n avatarData=None,\n accountExtraFields=None):\n avatar.AvatarBase.__init__(self)\n auth.UserInfoProviderBase.__init__(self)\n self.uri = uri\n self.bindUser = bindUser\n self.bindPw = bindPw\n self.accountBase = accountBase\n self.accountEmail = accountEmail\n self.accountPattern = accountPattern\n self.accountFullName = accountFullName\n group_params = [p for p in (groupName, groupMemberPattern, groupBase)\n if p is not None]\n if len(group_params) not in (0, 3):\n raise ValueError(\n \"Incomplete LDAP groups configuration. \"\n \"To use Ldap groups, you need to specify the three \"\n \"parameters (groupName, groupMemberPattern and groupBase). \")\n\n self.groupName = groupName\n self.groupMemberPattern = groupMemberPattern\n self.groupBase = groupBase\n self.avatarPattern = avatarPattern\n self.avatarData = avatarData\n if accountExtraFields is None:\n accountExtraFields = []\n self.accountExtraFields = accountExtraFields\n\n def connectLdap(self):\n server = urlparse(self.uri)\n netloc = server.netloc.split(\":\")\n # define the server and the connection\n s = ldap3.Server(netloc[0], port=int(netloc[1]), use_ssl=server.scheme == 'ldaps',\n get_info=ldap3.ALL)\n\n auth = ldap3.SIMPLE\n if self.bindUser is None and self.bindPw is None:\n auth = ldap3.ANONYMOUS\n\n c = ldap3.Connection(s, auto_bind=True, client_strategy=ldap3.SYNC,\n user=self.bindUser, password=self.bindPw,\n authentication=auth)\n return c\n\n def search(self, c, base, filterstr='f', attributes=None):\n c.search(base, filterstr, ldap3.SUBTREE, attributes=attributes)\n return c.response\n\n def getUserInfo(self, username):\n def thd():\n c = self.connectLdap()\n infos = {'username': username}\n pattern = self.accountPattern % dict(username=username)\n res = self.search(c, self.accountBase, pattern,\n attributes=[\n self.accountEmail, self.accountFullName] +\n self.accountExtraFields)\n if len(res) != 1:\n raise KeyError(\n \"ldap search \\\"%s\\\" returned %d results\" % (pattern, len(res)))\n dn, ldap_infos = res[0]['dn'], res[0]['raw_attributes']\n if isinstance(dn, bytes):\n dn = dn.decode('utf-8')\n\n def getLdapInfo(x):\n if isinstance(x, list):\n return x[0]\n return x\n infos['full_name'] = getLdapInfo(ldap_infos[self.accountFullName])\n infos['email'] = getLdapInfo(ldap_infos[self.accountEmail])\n for f in self.accountExtraFields:\n if f in ldap_infos:\n infos[f] = getLdapInfo(ldap_infos[f])\n\n if self.groupMemberPattern is None:\n infos['groups'] = []\n return infos\n\n # needs double quoting of backslashing\n pattern = self.groupMemberPattern % dict(dn=dn)\n res = self.search(c, self.groupBase, pattern,\n attributes=[self.groupName])\n infos['groups'] = flatten(\n [group_infos['raw_attributes'][self.groupName] for group_infos in res])\n return infos\n return threads.deferToThread(thd)\n\n def findAvatarMime(self, data):\n # http://en.wikipedia.org/wiki/List_of_file_signatures\n if data.startswith(\"\\xff\\xd8\\xff\"):\n return (\"image/jpeg\", data)\n if data.startswith(\"\\x89PNG\"):\n return (\"image/png\", data)\n if data.startswith(\"GIF8\"):\n return (\"image/gif\", data)\n # ignore unknown image format\n return None\n\n def getUserAvatar(self, user_email, size, defaultAvatarUrl):\n def thd():\n c = self.connectLdap()\n pattern = self.avatarPattern % dict(email=user_email)\n res = self.search(c, self.accountBase, pattern,\n attributes=[self.avatarData])\n if not res:\n return None\n ldap_infos = res[0]['raw_attributes']\n if self.avatarData in ldap_infos and ldap_infos[self.avatarData]:\n data = ldap_infos[self.avatarData][0]\n return self.findAvatarMime(data)\n return None\n return threads.deferToThread(thd)\n", "path": "master/buildbot/www/ldapuserinfo.py"}]}
3,543
323
gh_patches_debug_18849
rasdani/github-patches
git_diff
google__jax-1335
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `slogdet` sign is incorrect In the following, numpy disagrees with JAX ```python mat = np.array([[[-0.01]], [[-0.01]]]) print(np.linalg.slogdet(mat)) print(jnp.linalg.slogdet(jnp.array(mat))) ``` => ``` (array([-1., -1.]), array([-4.60517019, -4.60517019])) (DeviceArray([1., 1.]), DeviceArray([-4.60517019, -4.60517019])) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jax/numpy/linalg.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 from __future__ import division 17 from __future__ import print_function 18 19 from functools import partial 20 21 import numpy as onp 22 import warnings 23 24 from jax import jit 25 from .. import lax 26 from .. import lax_linalg 27 from .lax_numpy import _not_implemented 28 from .lax_numpy import _wraps 29 from . import lax_numpy as np 30 from ..util import get_module_functions 31 from ..lib import xla_bridge 32 33 34 _T = lambda x: np.swapaxes(x, -1, -2) 35 36 37 def _promote_arg_dtypes(*args): 38 """Promotes `args` to a common inexact type.""" 39 def _to_inexact_type(type): 40 return type if np.issubdtype(type, np.inexact) else np.float64 41 inexact_types = [_to_inexact_type(np._dtype(arg)) for arg in args] 42 dtype = xla_bridge.canonicalize_dtype(np.result_type(*inexact_types)) 43 args = [lax.convert_element_type(arg, dtype) for arg in args] 44 if len(args) == 1: 45 return args[0] 46 else: 47 return args 48 49 50 @_wraps(onp.linalg.cholesky) 51 def cholesky(a): 52 a = _promote_arg_dtypes(np.asarray(a)) 53 return lax_linalg.cholesky(a) 54 55 56 @_wraps(onp.linalg.svd) 57 def svd(a, full_matrices=True, compute_uv=True): 58 a = _promote_arg_dtypes(np.asarray(a)) 59 return lax_linalg.svd(a, full_matrices, compute_uv) 60 61 62 @_wraps(onp.linalg.slogdet) 63 def slogdet(a): 64 a = _promote_arg_dtypes(np.asarray(a)) 65 dtype = lax.dtype(a) 66 a_shape = np.shape(a) 67 if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]: 68 msg = "Argument to slogdet() must have shape [..., n, n], got {}" 69 raise ValueError(msg.format(a_shape)) 70 lu, pivot = lax_linalg.lu(a) 71 diag = np.diagonal(lu, axis1=-2, axis2=-1) 72 is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1) 73 parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1) 74 if np.iscomplexobj(a): 75 sign = np.prod(diag / np.abs(diag)) 76 else: 77 sign = np.array(1, dtype=dtype) 78 parity = parity + np.count_nonzero(diag < 0) 79 sign = np.where(is_zero, 80 np.array(0, dtype=dtype), 81 sign * np.array(-2 * (parity % 2) + 1, dtype=dtype)) 82 logdet = np.where( 83 is_zero, np.array(-np.inf, dtype=dtype), 84 np.sum(np.log(np.abs(diag)), axis=-1)) 85 return sign, np.real(logdet) 86 87 88 @_wraps(onp.linalg.det) 89 def det(a): 90 sign, logdet = slogdet(a) 91 return sign * np.exp(logdet) 92 93 94 @_wraps(onp.linalg.eig) 95 def eig(a): 96 a = _promote_arg_dtypes(np.asarray(a)) 97 w, vl, vr = lax_linalg.eig(a) 98 return w, vr 99 100 101 @_wraps(onp.linalg.eigh) 102 def eigh(a, UPLO=None, symmetrize_input=True): 103 if UPLO is None or UPLO == "L": 104 lower = True 105 elif UPLO == "U": 106 lower = False 107 else: 108 msg = "UPLO must be one of None, 'L', or 'U', got {}".format(UPLO) 109 raise ValueError(msg) 110 111 a = _promote_arg_dtypes(np.asarray(a)) 112 v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input) 113 return w, v 114 115 116 @_wraps(onp.linalg.inv) 117 def inv(a): 118 if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]: 119 raise ValueError("Argument to inv must have shape [..., n, n], got {}." 120 .format(np.shape(a))) 121 return solve( 122 a, lax.broadcast(np.eye(a.shape[-1], dtype=lax.dtype(a)), a.shape[:-2])) 123 124 125 @partial(jit, static_argnums=(1, 2, 3)) 126 def _norm(x, ord, axis, keepdims): 127 x = _promote_arg_dtypes(np.asarray(x)) 128 x_shape = np.shape(x) 129 ndim = len(x_shape) 130 131 if axis is None: 132 # NumPy has an undocumented behavior that admits arbitrary rank inputs if 133 # `ord` is None: https://github.com/numpy/numpy/issues/14215 134 if ord is None: 135 return np.sqrt(np.sum(np.real(x * np.conj(x)), keepdims=keepdims)) 136 axis = tuple(range(ndim)) 137 elif isinstance(axis, tuple): 138 axis = tuple(np._canonicalize_axis(x, ndim) for x in axis) 139 else: 140 axis = (np._canonicalize_axis(axis, ndim),) 141 142 num_axes = len(axis) 143 if num_axes == 1: 144 if ord is None or ord == 2: 145 return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis, 146 keepdims=keepdims)) 147 elif ord == np.inf: 148 return np.amax(np.abs(x), axis=axis, keepdims=keepdims) 149 elif ord == -np.inf: 150 return np.amin(np.abs(x), axis=axis, keepdims=keepdims) 151 elif ord == 0: 152 return np.sum(x != 0, dtype=np.finfo(lax.dtype(x)).dtype, 153 axis=axis, keepdims=keepdims) 154 elif ord == 1: 155 # Numpy has a special case for ord == 1 as an optimization. We don't 156 # really need the optimization (XLA could do it for us), but the Numpy 157 # code has slightly different type promotion semantics, so we need a 158 # special case too. 159 return np.sum(np.abs(x), axis=axis, keepdims=keepdims) 160 else: 161 return np.power(np.sum(np.abs(x) ** ord, axis=axis, keepdims=keepdims), 162 1. / ord) 163 164 elif num_axes == 2: 165 row_axis, col_axis = axis 166 if ord is None or ord in ('f', 'fro'): 167 return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis, 168 keepdims=keepdims)) 169 elif ord == 1: 170 if not keepdims and col_axis > row_axis: 171 col_axis -= 1 172 return np.amax(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims), 173 axis=col_axis, keepdims=keepdims) 174 elif ord == -1: 175 if not keepdims and col_axis > row_axis: 176 col_axis -= 1 177 return np.amin(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims), 178 axis=col_axis, keepdims=keepdims) 179 elif ord == np.inf: 180 if not keepdims and row_axis > col_axis: 181 row_axis -= 1 182 return np.amax(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims), 183 axis=row_axis, keepdims=keepdims) 184 elif ord == -np.inf: 185 if not keepdims and row_axis > col_axis: 186 row_axis -= 1 187 return np.amin(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims), 188 axis=row_axis, keepdims=keepdims) 189 elif ord in ('nuc', 2, -2): 190 x = np.moveaxis(x, axis, (-2, -1)) 191 if ord == 2: 192 reducer = np.amax 193 elif ord == -2: 194 reducer = np.amin 195 else: 196 reducer = np.sum 197 y = reducer(svd(x, compute_uv=False), axis=-1) 198 if keepdims: 199 result_shape = list(x_shape) 200 result_shape[axis[0]] = 1 201 result_shape[axis[1]] = 1 202 y = np.reshape(y, result_shape) 203 return y 204 else: 205 raise ValueError("Invalid order '{}' for matrix norm.".format(ord)) 206 else: 207 raise ValueError( 208 "Invalid axis values ({}) for np.linalg.norm.".format(axis)) 209 210 @_wraps(onp.linalg.norm) 211 def norm(x, ord=None, axis=None, keepdims=False): 212 return _norm(x, ord, axis, keepdims) 213 214 215 @_wraps(onp.linalg.qr) 216 def qr(a, mode="reduced"): 217 if mode in ("reduced", "r", "full"): 218 full_matrices = False 219 elif mode == "complete": 220 full_matrices = True 221 else: 222 raise ValueError("Unsupported QR decomposition mode '{}'".format(mode)) 223 a = _promote_arg_dtypes(np.asarray(a)) 224 q, r = lax_linalg.qr(a, full_matrices) 225 if mode == "r": 226 return r 227 return q, r 228 229 230 @_wraps(onp.linalg.solve) 231 @jit 232 def solve(a, b): 233 a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b)) 234 a_shape = np.shape(a) 235 b_shape = np.shape(b) 236 a_ndims = len(a_shape) 237 b_ndims = len(b_shape) 238 if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1): 239 msg = ("The arguments to solve must have shapes a=[..., m, m] and " 240 "b=[..., m, k] or b=[..., m]; got a={} and b={}") 241 raise ValueError(msg.format(a_shape, b_shape)) 242 lu, pivots = lax_linalg.lu(a) 243 dtype = lax.dtype(a) 244 245 m = a_shape[-1] 246 247 # Numpy treats the RHS as a (batched) vector if the number of dimensions 248 # differ by 1. Otherwise, broadcasting rules apply. 249 x = b[..., None] if a_ndims == b_ndims + 1 else b 250 251 batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2]) 252 x = np.broadcast_to(x, batch_dims + x.shape[-2:]) 253 lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:]) 254 255 permutation = lax_linalg.lu_pivots_to_permutation(pivots, m) 256 permutation = np.broadcast_to(permutation, batch_dims + (m,)) 257 iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,))) 258 x = x[iotas[:-1] + (permutation, slice(None))] 259 260 x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True, 261 unit_diagonal=True) 262 x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False) 263 264 return x[..., 0] if a_ndims == b_ndims + 1 else x 265 266 267 for func in get_module_functions(onp.linalg): 268 if func.__name__ not in globals(): 269 globals()[func.__name__] = _not_implemented(func) 270 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -60,6 +60,7 @@ @_wraps(onp.linalg.slogdet) +@jit def slogdet(a): a = _promote_arg_dtypes(np.asarray(a)) dtype = lax.dtype(a) @@ -72,10 +73,10 @@ is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1) parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1) if np.iscomplexobj(a): - sign = np.prod(diag / np.abs(diag)) + sign = np.prod(diag / np.abs(diag), axis=-1) else: sign = np.array(1, dtype=dtype) - parity = parity + np.count_nonzero(diag < 0) + parity = parity + np.count_nonzero(diag < 0, axis=-1) sign = np.where(is_zero, np.array(0, dtype=dtype), sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))
{"golden_diff": "diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py\n--- a/jax/numpy/linalg.py\n+++ b/jax/numpy/linalg.py\n@@ -60,6 +60,7 @@\n \n \n @_wraps(onp.linalg.slogdet)\n+@jit\n def slogdet(a):\n a = _promote_arg_dtypes(np.asarray(a))\n dtype = lax.dtype(a)\n@@ -72,10 +73,10 @@\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n- sign = np.prod(diag / np.abs(diag))\n+ sign = np.prod(diag / np.abs(diag), axis=-1)\n else:\n sign = np.array(1, dtype=dtype)\n- parity = parity + np.count_nonzero(diag < 0)\n+ parity = parity + np.count_nonzero(diag < 0, axis=-1)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n", "issue": "`slogdet` sign is incorrect\nIn the following, numpy disagrees with JAX\r\n```python\r\nmat = np.array([[[-0.01]],\r\n [[-0.01]]])\r\nprint(np.linalg.slogdet(mat))\r\nprint(jnp.linalg.slogdet(jnp.array(mat)))\r\n```\r\n=>\r\n```\r\n(array([-1., -1.]), array([-4.60517019, -4.60517019]))\r\n(DeviceArray([1., 1.]), DeviceArray([-4.60517019, -4.60517019]))\r\n```\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\n\nimport numpy as onp\nimport warnings\n\nfrom jax import jit\nfrom .. import lax\nfrom .. import lax_linalg\nfrom .lax_numpy import _not_implemented\nfrom .lax_numpy import _wraps\nfrom . import lax_numpy as np\nfrom ..util import get_module_functions\nfrom ..lib import xla_bridge\n\n\n_T = lambda x: np.swapaxes(x, -1, -2)\n\n\ndef _promote_arg_dtypes(*args):\n \"\"\"Promotes `args` to a common inexact type.\"\"\"\n def _to_inexact_type(type):\n return type if np.issubdtype(type, np.inexact) else np.float64\n inexact_types = [_to_inexact_type(np._dtype(arg)) for arg in args]\n dtype = xla_bridge.canonicalize_dtype(np.result_type(*inexact_types))\n args = [lax.convert_element_type(arg, dtype) for arg in args]\n if len(args) == 1:\n return args[0]\n else:\n return args\n\n\n@_wraps(onp.linalg.cholesky)\ndef cholesky(a):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.cholesky(a)\n\n\n@_wraps(onp.linalg.svd)\ndef svd(a, full_matrices=True, compute_uv=True):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.svd(a, full_matrices, compute_uv)\n\n\n@_wraps(onp.linalg.slogdet)\ndef slogdet(a):\n a = _promote_arg_dtypes(np.asarray(a))\n dtype = lax.dtype(a)\n a_shape = np.shape(a)\n if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:\n msg = \"Argument to slogdet() must have shape [..., n, n], got {}\"\n raise ValueError(msg.format(a_shape))\n lu, pivot = lax_linalg.lu(a)\n diag = np.diagonal(lu, axis1=-2, axis2=-1)\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n sign = np.prod(diag / np.abs(diag))\n else:\n sign = np.array(1, dtype=dtype)\n parity = parity + np.count_nonzero(diag < 0)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n logdet = np.where(\n is_zero, np.array(-np.inf, dtype=dtype),\n np.sum(np.log(np.abs(diag)), axis=-1))\n return sign, np.real(logdet)\n\n\n@_wraps(onp.linalg.det)\ndef det(a):\n sign, logdet = slogdet(a)\n return sign * np.exp(logdet)\n\n\n@_wraps(onp.linalg.eig)\ndef eig(a):\n a = _promote_arg_dtypes(np.asarray(a))\n w, vl, vr = lax_linalg.eig(a)\n return w, vr\n\n\n@_wraps(onp.linalg.eigh)\ndef eigh(a, UPLO=None, symmetrize_input=True):\n if UPLO is None or UPLO == \"L\":\n lower = True\n elif UPLO == \"U\":\n lower = False\n else:\n msg = \"UPLO must be one of None, 'L', or 'U', got {}\".format(UPLO)\n raise ValueError(msg)\n\n a = _promote_arg_dtypes(np.asarray(a))\n v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)\n return w, v\n\n\n@_wraps(onp.linalg.inv)\ndef inv(a):\n if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]:\n raise ValueError(\"Argument to inv must have shape [..., n, n], got {}.\"\n .format(np.shape(a)))\n return solve(\n a, lax.broadcast(np.eye(a.shape[-1], dtype=lax.dtype(a)), a.shape[:-2]))\n\n\n@partial(jit, static_argnums=(1, 2, 3))\ndef _norm(x, ord, axis, keepdims):\n x = _promote_arg_dtypes(np.asarray(x))\n x_shape = np.shape(x)\n ndim = len(x_shape)\n\n if axis is None:\n # NumPy has an undocumented behavior that admits arbitrary rank inputs if\n # `ord` is None: https://github.com/numpy/numpy/issues/14215\n if ord is None:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), keepdims=keepdims))\n axis = tuple(range(ndim))\n elif isinstance(axis, tuple):\n axis = tuple(np._canonicalize_axis(x, ndim) for x in axis)\n else:\n axis = (np._canonicalize_axis(axis, ndim),)\n\n num_axes = len(axis)\n if num_axes == 1:\n if ord is None or ord == 2:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == np.inf:\n return np.amax(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == -np.inf:\n return np.amin(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == 0:\n return np.sum(x != 0, dtype=np.finfo(lax.dtype(x)).dtype,\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # Numpy has a special case for ord == 1 as an optimization. We don't\n # really need the optimization (XLA could do it for us), but the Numpy\n # code has slightly different type promotion semantics, so we need a\n # special case too.\n return np.sum(np.abs(x), axis=axis, keepdims=keepdims)\n else:\n return np.power(np.sum(np.abs(x) ** ord, axis=axis, keepdims=keepdims),\n 1. / ord)\n\n elif num_axes == 2:\n row_axis, col_axis = axis\n if ord is None or ord in ('f', 'fro'):\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == 1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == -1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord == -np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord in ('nuc', 2, -2):\n x = np.moveaxis(x, axis, (-2, -1))\n if ord == 2:\n reducer = np.amax\n elif ord == -2:\n reducer = np.amin\n else:\n reducer = np.sum\n y = reducer(svd(x, compute_uv=False), axis=-1)\n if keepdims:\n result_shape = list(x_shape)\n result_shape[axis[0]] = 1\n result_shape[axis[1]] = 1\n y = np.reshape(y, result_shape)\n return y\n else:\n raise ValueError(\"Invalid order '{}' for matrix norm.\".format(ord))\n else:\n raise ValueError(\n \"Invalid axis values ({}) for np.linalg.norm.\".format(axis))\n\n@_wraps(onp.linalg.norm)\ndef norm(x, ord=None, axis=None, keepdims=False):\n return _norm(x, ord, axis, keepdims)\n\n\n@_wraps(onp.linalg.qr)\ndef qr(a, mode=\"reduced\"):\n if mode in (\"reduced\", \"r\", \"full\"):\n full_matrices = False\n elif mode == \"complete\":\n full_matrices = True\n else:\n raise ValueError(\"Unsupported QR decomposition mode '{}'\".format(mode))\n a = _promote_arg_dtypes(np.asarray(a))\n q, r = lax_linalg.qr(a, full_matrices)\n if mode == \"r\":\n return r\n return q, r\n\n\n@_wraps(onp.linalg.solve)\n@jit\ndef solve(a, b):\n a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b))\n a_shape = np.shape(a)\n b_shape = np.shape(b)\n a_ndims = len(a_shape)\n b_ndims = len(b_shape)\n if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1):\n msg = (\"The arguments to solve must have shapes a=[..., m, m] and \"\n \"b=[..., m, k] or b=[..., m]; got a={} and b={}\")\n raise ValueError(msg.format(a_shape, b_shape))\n lu, pivots = lax_linalg.lu(a)\n dtype = lax.dtype(a)\n\n m = a_shape[-1]\n\n # Numpy treats the RHS as a (batched) vector if the number of dimensions\n # differ by 1. Otherwise, broadcasting rules apply.\n x = b[..., None] if a_ndims == b_ndims + 1 else b\n\n batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2])\n x = np.broadcast_to(x, batch_dims + x.shape[-2:])\n lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:])\n\n permutation = lax_linalg.lu_pivots_to_permutation(pivots, m)\n permutation = np.broadcast_to(permutation, batch_dims + (m,))\n iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,)))\n x = x[iotas[:-1] + (permutation, slice(None))]\n\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,\n unit_diagonal=True)\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)\n\n return x[..., 0] if a_ndims == b_ndims + 1 else x\n\n\nfor func in get_module_functions(onp.linalg):\n if func.__name__ not in globals():\n globals()[func.__name__] = _not_implemented(func)\n", "path": "jax/numpy/linalg.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\n\nimport numpy as onp\nimport warnings\n\nfrom jax import jit\nfrom .. import lax\nfrom .. import lax_linalg\nfrom .lax_numpy import _not_implemented\nfrom .lax_numpy import _wraps\nfrom . import lax_numpy as np\nfrom ..util import get_module_functions\nfrom ..lib import xla_bridge\n\n\n_T = lambda x: np.swapaxes(x, -1, -2)\n\n\ndef _promote_arg_dtypes(*args):\n \"\"\"Promotes `args` to a common inexact type.\"\"\"\n def _to_inexact_type(type):\n return type if np.issubdtype(type, np.inexact) else np.float64\n inexact_types = [_to_inexact_type(np._dtype(arg)) for arg in args]\n dtype = xla_bridge.canonicalize_dtype(np.result_type(*inexact_types))\n args = [lax.convert_element_type(arg, dtype) for arg in args]\n if len(args) == 1:\n return args[0]\n else:\n return args\n\n\n@_wraps(onp.linalg.cholesky)\ndef cholesky(a):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.cholesky(a)\n\n\n@_wraps(onp.linalg.svd)\ndef svd(a, full_matrices=True, compute_uv=True):\n a = _promote_arg_dtypes(np.asarray(a))\n return lax_linalg.svd(a, full_matrices, compute_uv)\n\n\n@_wraps(onp.linalg.slogdet)\n@jit\ndef slogdet(a):\n a = _promote_arg_dtypes(np.asarray(a))\n dtype = lax.dtype(a)\n a_shape = np.shape(a)\n if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:\n msg = \"Argument to slogdet() must have shape [..., n, n], got {}\"\n raise ValueError(msg.format(a_shape))\n lu, pivot = lax_linalg.lu(a)\n diag = np.diagonal(lu, axis1=-2, axis2=-1)\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n sign = np.prod(diag / np.abs(diag), axis=-1)\n else:\n sign = np.array(1, dtype=dtype)\n parity = parity + np.count_nonzero(diag < 0, axis=-1)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n logdet = np.where(\n is_zero, np.array(-np.inf, dtype=dtype),\n np.sum(np.log(np.abs(diag)), axis=-1))\n return sign, np.real(logdet)\n\n\n@_wraps(onp.linalg.det)\ndef det(a):\n sign, logdet = slogdet(a)\n return sign * np.exp(logdet)\n\n\n@_wraps(onp.linalg.eig)\ndef eig(a):\n a = _promote_arg_dtypes(np.asarray(a))\n w, vl, vr = lax_linalg.eig(a)\n return w, vr\n\n\n@_wraps(onp.linalg.eigh)\ndef eigh(a, UPLO=None, symmetrize_input=True):\n if UPLO is None or UPLO == \"L\":\n lower = True\n elif UPLO == \"U\":\n lower = False\n else:\n msg = \"UPLO must be one of None, 'L', or 'U', got {}\".format(UPLO)\n raise ValueError(msg)\n\n a = _promote_arg_dtypes(np.asarray(a))\n v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)\n return w, v\n\n\n@_wraps(onp.linalg.inv)\ndef inv(a):\n if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]:\n raise ValueError(\"Argument to inv must have shape [..., n, n], got {}.\"\n .format(np.shape(a)))\n return solve(\n a, lax.broadcast(np.eye(a.shape[-1], dtype=lax.dtype(a)), a.shape[:-2]))\n\n\n@partial(jit, static_argnums=(1, 2, 3))\ndef _norm(x, ord, axis, keepdims):\n x = _promote_arg_dtypes(np.asarray(x))\n x_shape = np.shape(x)\n ndim = len(x_shape)\n\n if axis is None:\n # NumPy has an undocumented behavior that admits arbitrary rank inputs if\n # `ord` is None: https://github.com/numpy/numpy/issues/14215\n if ord is None:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), keepdims=keepdims))\n axis = tuple(range(ndim))\n elif isinstance(axis, tuple):\n axis = tuple(np._canonicalize_axis(x, ndim) for x in axis)\n else:\n axis = (np._canonicalize_axis(axis, ndim),)\n\n num_axes = len(axis)\n if num_axes == 1:\n if ord is None or ord == 2:\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == np.inf:\n return np.amax(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == -np.inf:\n return np.amin(np.abs(x), axis=axis, keepdims=keepdims)\n elif ord == 0:\n return np.sum(x != 0, dtype=np.finfo(lax.dtype(x)).dtype,\n axis=axis, keepdims=keepdims)\n elif ord == 1:\n # Numpy has a special case for ord == 1 as an optimization. We don't\n # really need the optimization (XLA could do it for us), but the Numpy\n # code has slightly different type promotion semantics, so we need a\n # special case too.\n return np.sum(np.abs(x), axis=axis, keepdims=keepdims)\n else:\n return np.power(np.sum(np.abs(x) ** ord, axis=axis, keepdims=keepdims),\n 1. / ord)\n\n elif num_axes == 2:\n row_axis, col_axis = axis\n if ord is None or ord in ('f', 'fro'):\n return np.sqrt(np.sum(np.real(x * np.conj(x)), axis=axis,\n keepdims=keepdims))\n elif ord == 1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == -1:\n if not keepdims and col_axis > row_axis:\n col_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=row_axis, keepdims=keepdims),\n axis=col_axis, keepdims=keepdims)\n elif ord == np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amax(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord == -np.inf:\n if not keepdims and row_axis > col_axis:\n row_axis -= 1\n return np.amin(np.sum(np.abs(x), axis=col_axis, keepdims=keepdims),\n axis=row_axis, keepdims=keepdims)\n elif ord in ('nuc', 2, -2):\n x = np.moveaxis(x, axis, (-2, -1))\n if ord == 2:\n reducer = np.amax\n elif ord == -2:\n reducer = np.amin\n else:\n reducer = np.sum\n y = reducer(svd(x, compute_uv=False), axis=-1)\n if keepdims:\n result_shape = list(x_shape)\n result_shape[axis[0]] = 1\n result_shape[axis[1]] = 1\n y = np.reshape(y, result_shape)\n return y\n else:\n raise ValueError(\"Invalid order '{}' for matrix norm.\".format(ord))\n else:\n raise ValueError(\n \"Invalid axis values ({}) for np.linalg.norm.\".format(axis))\n\n@_wraps(onp.linalg.norm)\ndef norm(x, ord=None, axis=None, keepdims=False):\n return _norm(x, ord, axis, keepdims)\n\n\n@_wraps(onp.linalg.qr)\ndef qr(a, mode=\"reduced\"):\n if mode in (\"reduced\", \"r\", \"full\"):\n full_matrices = False\n elif mode == \"complete\":\n full_matrices = True\n else:\n raise ValueError(\"Unsupported QR decomposition mode '{}'\".format(mode))\n a = _promote_arg_dtypes(np.asarray(a))\n q, r = lax_linalg.qr(a, full_matrices)\n if mode == \"r\":\n return r\n return q, r\n\n\n@_wraps(onp.linalg.solve)\n@jit\ndef solve(a, b):\n a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b))\n a_shape = np.shape(a)\n b_shape = np.shape(b)\n a_ndims = len(a_shape)\n b_ndims = len(b_shape)\n if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1):\n msg = (\"The arguments to solve must have shapes a=[..., m, m] and \"\n \"b=[..., m, k] or b=[..., m]; got a={} and b={}\")\n raise ValueError(msg.format(a_shape, b_shape))\n lu, pivots = lax_linalg.lu(a)\n dtype = lax.dtype(a)\n\n m = a_shape[-1]\n\n # Numpy treats the RHS as a (batched) vector if the number of dimensions\n # differ by 1. Otherwise, broadcasting rules apply.\n x = b[..., None] if a_ndims == b_ndims + 1 else b\n\n batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2])\n x = np.broadcast_to(x, batch_dims + x.shape[-2:])\n lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:])\n\n permutation = lax_linalg.lu_pivots_to_permutation(pivots, m)\n permutation = np.broadcast_to(permutation, batch_dims + (m,))\n iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,)))\n x = x[iotas[:-1] + (permutation, slice(None))]\n\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,\n unit_diagonal=True)\n x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)\n\n return x[..., 0] if a_ndims == b_ndims + 1 else x\n\n\nfor func in get_module_functions(onp.linalg):\n if func.__name__ not in globals():\n globals()[func.__name__] = _not_implemented(func)\n", "path": "jax/numpy/linalg.py"}]}
3,741
280
gh_patches_debug_33426
rasdani/github-patches
git_diff
ethereum__web3.py-2025
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Redundant async/await in AsyncJSONBaseProvider serde methods * Version: 5.20 * Python: 3.9.5 * OS: Linux ``` aiohttp==3.7.4.post0 web3==5.20 ``` ### What was wrong? Serde methods internally do not await on any coroutines, so there is no reason to have them `async` ### How can it be fixed? Remove `async/await` in those methods. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/providers/async_base.py` Content: ``` 1 import itertools 2 from typing import ( 3 TYPE_CHECKING, 4 Any, 5 Callable, 6 Sequence, 7 Tuple, 8 cast, 9 ) 10 import warnings 11 12 from eth_utils import ( 13 to_bytes, 14 to_text, 15 ) 16 17 from web3._utils.encoding import ( 18 FriendlyJsonSerde, 19 ) 20 from web3.middleware import ( 21 async_combine_middlewares, 22 ) 23 from web3.types import ( 24 Middleware, 25 MiddlewareOnion, 26 RPCEndpoint, 27 RPCResponse, 28 ) 29 30 if TYPE_CHECKING: 31 from web3 import Web3 # noqa: F401 32 33 34 class AsyncBaseProvider: 35 _middlewares: Tuple[Middleware, ...] = () 36 # a tuple of (all_middlewares, request_func) 37 _request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (None, None) 38 39 def __init__(self) -> None: 40 warnings.warn( 41 "Async providers are still being developed and refined. " 42 "Expect breaking changes in minor releases.") 43 44 @property 45 def middlewares(self) -> Tuple[Middleware, ...]: 46 return self._middlewares 47 48 @middlewares.setter 49 def middlewares( 50 self, values: MiddlewareOnion 51 ) -> None: 52 # tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...] 53 self._middlewares = tuple(values) # type: ignore 54 55 async def request_func( 56 self, web3: "Web3", outer_middlewares: MiddlewareOnion 57 ) -> Callable[[RPCEndpoint], Any]: 58 all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501 59 60 cache_key = self._request_func_cache[0] 61 if cache_key is None or cache_key != all_middlewares: 62 self._request_func_cache = ( 63 all_middlewares, 64 await self._generate_request_func(web3, all_middlewares) 65 ) 66 return self._request_func_cache[-1] 67 68 async def _generate_request_func( 69 self, web3: "Web3", middlewares: Sequence[Middleware] 70 ) -> Callable[..., RPCResponse]: 71 return await async_combine_middlewares( 72 middlewares=middlewares, 73 web3=web3, 74 provider_request_fn=self.make_request, 75 ) 76 77 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse: 78 raise NotImplementedError("Providers must implement this method") 79 80 async def isConnected(self) -> bool: 81 raise NotImplementedError("Providers must implement this method") 82 83 84 class AsyncJSONBaseProvider(AsyncBaseProvider): 85 def __init__(self) -> None: 86 self.request_counter = itertools.count() 87 88 async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes: 89 rpc_dict = { 90 "jsonrpc": "2.0", 91 "method": method, 92 "params": params or [], 93 "id": next(self.request_counter), 94 } 95 encoded = FriendlyJsonSerde().json_encode(rpc_dict) 96 return to_bytes(text=encoded) 97 98 async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse: 99 text_response = to_text(raw_response) 100 return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response)) 101 102 async def isConnected(self) -> bool: 103 try: 104 response = await self.make_request(RPCEndpoint('web3_clientVersion'), []) 105 except IOError: 106 return False 107 108 assert response['jsonrpc'] == '2.0' 109 assert 'error' not in response 110 111 return True 112 ``` Path: `web3/providers/async_rpc.py` Content: ``` 1 import logging 2 from typing import ( 3 Any, 4 Dict, 5 Iterable, 6 Optional, 7 Tuple, 8 Union, 9 ) 10 11 from eth_typing import ( 12 URI, 13 ) 14 from eth_utils import ( 15 to_dict, 16 ) 17 18 from web3._utils.http import ( 19 construct_user_agent, 20 ) 21 from web3._utils.request import ( 22 async_make_post_request, 23 get_default_http_endpoint, 24 ) 25 from web3.types import ( 26 RPCEndpoint, 27 RPCResponse, 28 ) 29 30 from .async_base import ( 31 AsyncJSONBaseProvider, 32 ) 33 34 35 class AsyncHTTPProvider(AsyncJSONBaseProvider): 36 logger = logging.getLogger("web3.providers.HTTPProvider") 37 endpoint_uri = None 38 _request_kwargs = None 39 40 def __init__( 41 self, endpoint_uri: Optional[Union[URI, str]] = None, 42 request_kwargs: Optional[Any] = None 43 ) -> None: 44 if endpoint_uri is None: 45 self.endpoint_uri = get_default_http_endpoint() 46 else: 47 self.endpoint_uri = URI(endpoint_uri) 48 49 self._request_kwargs = request_kwargs or {} 50 51 super().__init__() 52 53 def __str__(self) -> str: 54 return "RPC connection {0}".format(self.endpoint_uri) 55 56 @to_dict 57 def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]: 58 if 'headers' not in self._request_kwargs: 59 yield 'headers', self.get_request_headers() 60 for key, value in self._request_kwargs.items(): 61 yield key, value 62 63 def get_request_headers(self) -> Dict[str, str]: 64 return { 65 'Content-Type': 'application/json', 66 'User-Agent': construct_user_agent(str(type(self))), 67 } 68 69 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse: 70 self.logger.debug("Making request HTTP. URI: %s, Method: %s", 71 self.endpoint_uri, method) 72 request_data = await self.encode_rpc_request(method, params) 73 raw_response = await async_make_post_request( 74 self.endpoint_uri, 75 request_data, 76 **self.get_request_kwargs() 77 ) 78 response = await self.decode_rpc_response(raw_response) 79 self.logger.debug("Getting response HTTP. URI: %s, " 80 "Method: %s, Response: %s", 81 self.endpoint_uri, method, response) 82 return response 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/providers/async_base.py b/web3/providers/async_base.py --- a/web3/providers/async_base.py +++ b/web3/providers/async_base.py @@ -85,7 +85,7 @@ def __init__(self) -> None: self.request_counter = itertools.count() - async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes: + def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes: rpc_dict = { "jsonrpc": "2.0", "method": method, @@ -95,7 +95,7 @@ encoded = FriendlyJsonSerde().json_encode(rpc_dict) return to_bytes(text=encoded) - async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse: + def decode_rpc_response(self, raw_response: bytes) -> RPCResponse: text_response = to_text(raw_response) return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response)) diff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py --- a/web3/providers/async_rpc.py +++ b/web3/providers/async_rpc.py @@ -69,13 +69,13 @@ async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse: self.logger.debug("Making request HTTP. URI: %s, Method: %s", self.endpoint_uri, method) - request_data = await self.encode_rpc_request(method, params) + request_data = self.encode_rpc_request(method, params) raw_response = await async_make_post_request( self.endpoint_uri, request_data, **self.get_request_kwargs() ) - response = await self.decode_rpc_response(raw_response) + response = self.decode_rpc_response(raw_response) self.logger.debug("Getting response HTTP. URI: %s, " "Method: %s, Response: %s", self.endpoint_uri, method, response)
{"golden_diff": "diff --git a/web3/providers/async_base.py b/web3/providers/async_base.py\n--- a/web3/providers/async_base.py\n+++ b/web3/providers/async_base.py\n@@ -85,7 +85,7 @@\n def __init__(self) -> None:\n self.request_counter = itertools.count()\n \n- async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n+ def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n rpc_dict = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n@@ -95,7 +95,7 @@\n encoded = FriendlyJsonSerde().json_encode(rpc_dict)\n return to_bytes(text=encoded)\n \n- async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n+ def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n text_response = to_text(raw_response)\n return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))\n \ndiff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py\n--- a/web3/providers/async_rpc.py\n+++ b/web3/providers/async_rpc.py\n@@ -69,13 +69,13 @@\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\"Making request HTTP. URI: %s, Method: %s\",\n self.endpoint_uri, method)\n- request_data = await self.encode_rpc_request(method, params)\n+ request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri,\n request_data,\n **self.get_request_kwargs()\n )\n- response = await self.decode_rpc_response(raw_response)\n+ response = self.decode_rpc_response(raw_response)\n self.logger.debug(\"Getting response HTTP. URI: %s, \"\n \"Method: %s, Response: %s\",\n self.endpoint_uri, method, response)\n", "issue": "Redundant async/await in AsyncJSONBaseProvider serde methods\n* Version: 5.20\r\n* Python: 3.9.5\r\n* OS: Linux\r\n\r\n```\r\naiohttp==3.7.4.post0\r\nweb3==5.20\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nSerde methods internally do not await on any coroutines, so there is no reason to have them `async`\r\n\r\n### How can it be fixed?\r\n\r\nRemove `async/await` in those methods.\r\n\n", "before_files": [{"content": "import itertools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Sequence,\n Tuple,\n cast,\n)\nimport warnings\n\nfrom eth_utils import (\n to_bytes,\n to_text,\n)\n\nfrom web3._utils.encoding import (\n FriendlyJsonSerde,\n)\nfrom web3.middleware import (\n async_combine_middlewares,\n)\nfrom web3.types import (\n Middleware,\n MiddlewareOnion,\n RPCEndpoint,\n RPCResponse,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\nclass AsyncBaseProvider:\n _middlewares: Tuple[Middleware, ...] = ()\n # a tuple of (all_middlewares, request_func)\n _request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (None, None)\n\n def __init__(self) -> None:\n warnings.warn(\n \"Async providers are still being developed and refined. \"\n \"Expect breaking changes in minor releases.\")\n\n @property\n def middlewares(self) -> Tuple[Middleware, ...]:\n return self._middlewares\n\n @middlewares.setter\n def middlewares(\n self, values: MiddlewareOnion\n ) -> None:\n # tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...]\n self._middlewares = tuple(values) # type: ignore\n\n async def request_func(\n self, web3: \"Web3\", outer_middlewares: MiddlewareOnion\n ) -> Callable[[RPCEndpoint], Any]:\n all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501\n\n cache_key = self._request_func_cache[0]\n if cache_key is None or cache_key != all_middlewares:\n self._request_func_cache = (\n all_middlewares,\n await self._generate_request_func(web3, all_middlewares)\n )\n return self._request_func_cache[-1]\n\n async def _generate_request_func(\n self, web3: \"Web3\", middlewares: Sequence[Middleware]\n ) -> Callable[..., RPCResponse]:\n return await async_combine_middlewares(\n middlewares=middlewares,\n web3=web3,\n provider_request_fn=self.make_request,\n )\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n raise NotImplementedError(\"Providers must implement this method\")\n\n async def isConnected(self) -> bool:\n raise NotImplementedError(\"Providers must implement this method\")\n\n\nclass AsyncJSONBaseProvider(AsyncBaseProvider):\n def __init__(self) -> None:\n self.request_counter = itertools.count()\n\n async def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n rpc_dict = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n \"params\": params or [],\n \"id\": next(self.request_counter),\n }\n encoded = FriendlyJsonSerde().json_encode(rpc_dict)\n return to_bytes(text=encoded)\n\n async def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n text_response = to_text(raw_response)\n return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))\n\n async def isConnected(self) -> bool:\n try:\n response = await self.make_request(RPCEndpoint('web3_clientVersion'), [])\n except IOError:\n return False\n\n assert response['jsonrpc'] == '2.0'\n assert 'error' not in response\n\n return True\n", "path": "web3/providers/async_base.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n\n def __init__(\n self, endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n def __str__(self) -> str:\n return \"RPC connection {0}\".format(self.endpoint_uri)\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if 'headers' not in self._request_kwargs:\n yield 'headers', self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n 'Content-Type': 'application/json',\n 'User-Agent': construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\"Making request HTTP. URI: %s, Method: %s\",\n self.endpoint_uri, method)\n request_data = await self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri,\n request_data,\n **self.get_request_kwargs()\n )\n response = await self.decode_rpc_response(raw_response)\n self.logger.debug(\"Getting response HTTP. URI: %s, \"\n \"Method: %s, Response: %s\",\n self.endpoint_uri, method, response)\n return response\n", "path": "web3/providers/async_rpc.py"}], "after_files": [{"content": "import itertools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Sequence,\n Tuple,\n cast,\n)\nimport warnings\n\nfrom eth_utils import (\n to_bytes,\n to_text,\n)\n\nfrom web3._utils.encoding import (\n FriendlyJsonSerde,\n)\nfrom web3.middleware import (\n async_combine_middlewares,\n)\nfrom web3.types import (\n Middleware,\n MiddlewareOnion,\n RPCEndpoint,\n RPCResponse,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\nclass AsyncBaseProvider:\n _middlewares: Tuple[Middleware, ...] = ()\n # a tuple of (all_middlewares, request_func)\n _request_func_cache: Tuple[Tuple[Middleware, ...], Callable[..., RPCResponse]] = (None, None)\n\n def __init__(self) -> None:\n warnings.warn(\n \"Async providers are still being developed and refined. \"\n \"Expect breaking changes in minor releases.\")\n\n @property\n def middlewares(self) -> Tuple[Middleware, ...]:\n return self._middlewares\n\n @middlewares.setter\n def middlewares(\n self, values: MiddlewareOnion\n ) -> None:\n # tuple(values) converts to MiddlewareOnion -> Tuple[Middleware, ...]\n self._middlewares = tuple(values) # type: ignore\n\n async def request_func(\n self, web3: \"Web3\", outer_middlewares: MiddlewareOnion\n ) -> Callable[[RPCEndpoint], Any]:\n all_middlewares: Tuple[Middleware] = tuple(outer_middlewares) + tuple(self.middlewares) # type: ignore # noqa: E501\n\n cache_key = self._request_func_cache[0]\n if cache_key is None or cache_key != all_middlewares:\n self._request_func_cache = (\n all_middlewares,\n await self._generate_request_func(web3, all_middlewares)\n )\n return self._request_func_cache[-1]\n\n async def _generate_request_func(\n self, web3: \"Web3\", middlewares: Sequence[Middleware]\n ) -> Callable[..., RPCResponse]:\n return await async_combine_middlewares(\n middlewares=middlewares,\n web3=web3,\n provider_request_fn=self.make_request,\n )\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n raise NotImplementedError(\"Providers must implement this method\")\n\n async def isConnected(self) -> bool:\n raise NotImplementedError(\"Providers must implement this method\")\n\n\nclass AsyncJSONBaseProvider(AsyncBaseProvider):\n def __init__(self) -> None:\n self.request_counter = itertools.count()\n\n def encode_rpc_request(self, method: RPCEndpoint, params: Any) -> bytes:\n rpc_dict = {\n \"jsonrpc\": \"2.0\",\n \"method\": method,\n \"params\": params or [],\n \"id\": next(self.request_counter),\n }\n encoded = FriendlyJsonSerde().json_encode(rpc_dict)\n return to_bytes(text=encoded)\n\n def decode_rpc_response(self, raw_response: bytes) -> RPCResponse:\n text_response = to_text(raw_response)\n return cast(RPCResponse, FriendlyJsonSerde().json_decode(text_response))\n\n async def isConnected(self) -> bool:\n try:\n response = await self.make_request(RPCEndpoint('web3_clientVersion'), [])\n except IOError:\n return False\n\n assert response['jsonrpc'] == '2.0'\n assert 'error' not in response\n\n return True\n", "path": "web3/providers/async_base.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n\n def __init__(\n self, endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n def __str__(self) -> str:\n return \"RPC connection {0}\".format(self.endpoint_uri)\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if 'headers' not in self._request_kwargs:\n yield 'headers', self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n 'Content-Type': 'application/json',\n 'User-Agent': construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\"Making request HTTP. URI: %s, Method: %s\",\n self.endpoint_uri, method)\n request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri,\n request_data,\n **self.get_request_kwargs()\n )\n response = self.decode_rpc_response(raw_response)\n self.logger.debug(\"Getting response HTTP. URI: %s, \"\n \"Method: %s, Response: %s\",\n self.endpoint_uri, method, response)\n return response\n", "path": "web3/providers/async_rpc.py"}]}
2,054
450
gh_patches_debug_22997
rasdani/github-patches
git_diff
liqd__a4-opin-605
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong user in project When I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `euth/memberships/views.py` Content: ``` 1 from django.http import Http404 2 from django.shortcuts import redirect 3 from django.views import generic 4 from rules.compat import access_mixins as mixin 5 6 from adhocracy4.projects import models as prj_models 7 from adhocracy4.projects import views as prj_views 8 9 from . import forms, models 10 11 12 class RequestsProjectDetailView(prj_views.ProjectDetailView): 13 14 def handle_no_permission(self): 15 """ 16 Check if user clould join 17 """ 18 user = self.request.user 19 is_member = user.is_authenticated() and self.project.has_member(user) 20 21 if is_member: 22 return super().handle_no_permission() 23 else: 24 return self.handle_no_membership() 25 26 def handle_no_membership(self): 27 membership_impossible = ( 28 not self.request.user.is_authenticated() 29 or self.project.is_draft 30 or self.project.has_member(self.request.user) 31 ) 32 33 if membership_impossible: 34 return super().handle_no_permission() 35 else: 36 return redirect('memberships-request', 37 project_slug=self.project.slug) 38 39 40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView): 41 model = models.Invite 42 form_class = forms.InviteForm 43 slug_field = 'token' 44 slug_url_kwarg = 'invite_token' 45 46 def form_valid(self, form): 47 if form.is_accepted(): 48 form.instance.accept(self.request.user) 49 return redirect(form.instance.project.get_absolute_url()) 50 else: 51 form.instance.reject() 52 return redirect('/') 53 54 55 class RequestView(mixin.LoginRequiredMixin, generic.DetailView): 56 """ 57 Displays membership request if it exists or allows to create one. 58 """ 59 model = models.Request 60 slug_field = 'project__slug' 61 slug_url_kwarg = 'project_slug' 62 context_object_name = 'join_request' 63 64 def get_queryset(self): 65 return self.model.objects.filter(creator=self.request.user) 66 67 def get(self, request, *args, **kwargs): 68 if self.project.has_member(request.user): 69 return redirect(self.project.get_absolute_url()) 70 else: 71 return super().get(request, *args, **kwargs) 72 73 def post(self, request, *args, **kwargs): 74 user = request.user 75 project = self.project 76 models.Request.objects.request_membership(project, user) 77 return redirect(self.request.path) 78 79 def get_object(self, queryset=None): 80 try: 81 return super().get_object(queryset) 82 except Http404: 83 return None 84 85 @property 86 def project(self): 87 project_slug = self.kwargs[self.slug_url_kwarg] 88 return prj_models.Project.objects.get(slug=project_slug) 89 ``` Path: `euth/memberships/forms.py` Content: ``` 1 from django import forms 2 from django.core.exceptions import ValidationError 3 4 from . import models 5 6 7 class InviteForm(forms.ModelForm): 8 accept = forms.CharField(required=False) 9 reject = forms.CharField(required=False) 10 11 class Meta: 12 model = models.Invite 13 fields = ['accept', 'reject'] 14 15 def clean(self): 16 data = self.data 17 if 'accept' not in data and 'reject' not in data: 18 raise ValidationError('Reject or accept') 19 return data 20 21 def is_accepted(self): 22 data = self.data 23 return 'accept' in data and 'reject' not in data 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py --- a/euth/memberships/forms.py +++ b/euth/memberships/forms.py @@ -12,10 +12,17 @@ model = models.Invite fields = ['accept', 'reject'] + def __init__(self, user=None, **kwargs): + super().__init__(**kwargs) + self.user = user + def clean(self): data = self.data if 'accept' not in data and 'reject' not in data: raise ValidationError('Reject or accept') + if 'accept' in data and not self.user.email == self.instance.email: + raise ValidationError('This user has another email address than ' + 'the one that received the invitation.') return data def is_accepted(self): diff --git a/euth/memberships/views.py b/euth/memberships/views.py --- a/euth/memberships/views.py +++ b/euth/memberships/views.py @@ -43,6 +43,11 @@ slug_field = 'token' slug_url_kwarg = 'invite_token' + def get_form_kwargs(self): + kwargs = super().get_form_kwargs() + kwargs.update({'user': self.request.user}) + return kwargs + def form_valid(self, form): if form.is_accepted(): form.instance.accept(self.request.user)
{"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,10 +12,17 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n+ def __init__(self, user=None, **kwargs):\n+ super().__init__(**kwargs)\n+ self.user = user\n+\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n+ if 'accept' in data and not self.user.email == self.instance.email:\n+ raise ValidationError('This user has another email address than '\n+ 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,6 +43,11 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs.update({'user': self.request.user})\n+ return kwargs\n+\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Wrong user in project\nWhen I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def __init__(self, user=None, **kwargs):\n super().__init__(**kwargs)\n self.user = user\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n if 'accept' in data and not self.user.email == self.instance.email:\n raise ValidationError('This user has another email address than '\n 'the one that received the invitation.')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]}
1,213
316
gh_patches_debug_12885
rasdani/github-patches
git_diff
cupy__cupy-3291
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Lexsort output is incorrect The output of cupy.lexsort is incorrect. I can't see any pattern as to what is wrong with the output. ### Code to reproduce ``` import cupy import numpy # numpy output is correct: a = (numpy.random.random((10,2)) * 10).astype(int) # random ints between 0 and 9 print(a[numpy.lexsort(a.T[::-1])]) # sorted by first column, not last # cupy output is unsorted: b = cupy.array(a) print(b[cupy.lexsort(b.T[::-1])]) ``` The same thing happens for floats, so that's not the reason. ### Conditions (output of `python -c 'import cupy; cupy.show_config()'`): CuPy Version : 7.1.1 CUDA Root : /opt/cuda CUDA Build Version : 10020 CUDA Driver Version : 10020 CUDA Runtime Version : 10020 cuBLAS Version : 10202 cuFFT Version : 10102 cuRAND Version : 10102 cuSOLVER Version : (10, 3, 0) cuSPARSE Version : 10301 NVRTC Version : (10, 2) cuDNN Build Version : 7605 cuDNN Version : 7605 NCCL Build Version : 2507 NCCL Runtime Version : 2507 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/_sorting/sort.py` Content: ``` 1 import cupy 2 import numpy 3 4 if cupy.cuda.thrust_enabled: 5 from cupy.cuda import thrust 6 7 8 def sort(a, axis=-1): 9 """Returns a sorted copy of an array with a stable sorting algorithm. 10 11 Args: 12 a (cupy.ndarray): Array to be sorted. 13 axis (int or None): Axis along which to sort. Default is -1, which 14 means sort along the last axis. If None is supplied, the array is 15 flattened before sorting. 16 17 Returns: 18 cupy.ndarray: Array of the same type and shape as ``a``. 19 20 .. note:: 21 For its implementation reason, ``cupy.sort`` currently does not support 22 ``kind`` and ``order`` parameters that ``numpy.sort`` does 23 support. 24 25 .. seealso:: :func:`numpy.sort` 26 27 """ 28 if axis is None: 29 ret = a.flatten() 30 axis = -1 31 else: 32 ret = a.copy() 33 ret.sort(axis=axis) 34 return ret 35 36 37 def lexsort(keys): 38 """Perform an indirect sort using an array of keys. 39 40 Args: 41 keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped 42 arrays. The ``k`` different "rows" to be sorted. The last row is 43 the primary sort key. 44 45 Returns: 46 cupy.ndarray: Array of indices that sort the keys. 47 48 .. note:: 49 For its implementation reason, ``cupy.lexsort`` currently supports only 50 keys with their rank of one or two and does not support ``axis`` 51 parameter that ``numpy.lexsort`` supports. 52 53 .. seealso:: :func:`numpy.lexsort` 54 55 """ 56 57 # TODO(takagi): Support axis argument. 58 59 if not cupy.cuda.thrust_enabled: 60 raise RuntimeError('Thrust is needed to use cupy.lexsort. Please ' 61 'install CUDA Toolkit with Thrust then reinstall ' 62 'CuPy after uninstalling it.') 63 64 if keys.ndim == (): 65 # as numpy.lexsort() raises 66 raise TypeError('need sequence of keys with len > 0 in lexsort') 67 68 if keys.ndim == 1: 69 return 0 70 71 # TODO(takagi): Support ranks of three or more. 72 if keys.ndim > 2: 73 raise NotImplementedError('Keys with the rank of three or more is not ' 74 'supported in lexsort') 75 76 idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp) 77 k = keys._shape[0] 78 n = keys._shape[1] 79 thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n) 80 81 return idx_array 82 83 84 def argsort(a, axis=-1): 85 """Returns the indices that would sort an array with a stable sorting. 86 87 Args: 88 a (cupy.ndarray): Array to sort. 89 axis (int or None): Axis along which to sort. Default is -1, which 90 means sort along the last axis. If None is supplied, the array is 91 flattened before sorting. 92 93 Returns: 94 cupy.ndarray: Array of indices that sort ``a``. 95 96 .. note:: 97 For its implementation reason, ``cupy.argsort`` does not support 98 ``kind`` and ``order`` parameters. 99 100 .. seealso:: :func:`numpy.argsort` 101 102 """ 103 return a.argsort(axis=axis) 104 105 106 def msort(a): 107 """Returns a copy of an array sorted along the first axis. 108 109 Args: 110 a (cupy.ndarray): Array to be sorted. 111 112 Returns: 113 cupy.ndarray: Array of the same type and shape as ``a``. 114 115 .. note: 116 ``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is 117 equivalent to ``cupy.sort(a, axis=0)``. 118 119 .. seealso:: :func:`numpy.msort` 120 121 """ 122 123 # TODO(takagi): Support float16 and bool. 124 return sort(a, axis=0) 125 126 127 # TODO(okuta): Implement sort_complex 128 129 130 def partition(a, kth, axis=-1): 131 """Returns a partitioned copy of an array. 132 133 Creates a copy of the array whose elements are rearranged such that the 134 value of the element in k-th position would occur in that position in a 135 sorted array. All of the elements before the new k-th element are less 136 than or equal to the elements after the new k-th element. 137 138 Args: 139 a (cupy.ndarray): Array to be sorted. 140 kth (int or sequence of ints): Element index to partition by. If 141 supplied with a sequence of k-th it will partition all elements 142 indexed by k-th of them into their sorted position at once. 143 axis (int or None): Axis along which to sort. Default is -1, which 144 means sort along the last axis. If None is supplied, the array is 145 flattened before sorting. 146 147 Returns: 148 cupy.ndarray: Array of the same type and shape as ``a``. 149 150 .. seealso:: :func:`numpy.partition` 151 152 """ 153 if axis is None: 154 ret = a.flatten() 155 axis = -1 156 else: 157 ret = a.copy() 158 ret.partition(kth, axis=axis) 159 return ret 160 161 162 def argpartition(a, kth, axis=-1): 163 """Returns the indices that would partially sort an array. 164 165 Args: 166 a (cupy.ndarray): Array to be sorted. 167 kth (int or sequence of ints): Element index to partition by. If 168 supplied with a sequence of k-th it will partition all elements 169 indexed by k-th of them into their sorted position at once. 170 axis (int or None): Axis along which to sort. Default is -1, which 171 means sort along the last axis. If None is supplied, the array is 172 flattened before sorting. 173 174 Returns: 175 cupy.ndarray: Array of the same type and shape as ``a``. 176 177 .. note:: 178 For its implementation reason, `cupy.argpartition` fully sorts the 179 given array as `cupy.argsort` does. It also does not support ``kind`` 180 and ``order`` parameters that ``numpy.argpartition`` supports. 181 182 .. seealso:: :func:`numpy.argpartition` 183 184 """ 185 return a.argpartition(kth, axis=axis) 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/_sorting/sort.py b/cupy/_sorting/sort.py --- a/cupy/_sorting/sort.py +++ b/cupy/_sorting/sort.py @@ -73,6 +73,10 @@ raise NotImplementedError('Keys with the rank of three or more is not ' 'supported in lexsort') + # thrust.lexsort() assumes a C-contiguous array + if not keys.flags.c_contiguous: + keys = keys.copy('C') + idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp) k = keys._shape[0] n = keys._shape[1] @@ -120,7 +124,6 @@ """ - # TODO(takagi): Support float16 and bool. return sort(a, axis=0)
{"golden_diff": "diff --git a/cupy/_sorting/sort.py b/cupy/_sorting/sort.py\n--- a/cupy/_sorting/sort.py\n+++ b/cupy/_sorting/sort.py\n@@ -73,6 +73,10 @@\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n \n+ # thrust.lexsort() assumes a C-contiguous array\n+ if not keys.flags.c_contiguous:\n+ keys = keys.copy('C')\n+\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n@@ -120,7 +124,6 @@\n \n \"\"\"\n \n- # TODO(takagi): Support float16 and bool.\n return sort(a, axis=0)\n", "issue": "Lexsort output is incorrect\nThe output of cupy.lexsort is incorrect. I can't see any pattern as to what is wrong with the output.\r\n\r\n### Code to reproduce\r\n```\r\nimport cupy\r\nimport numpy\r\n\r\n# numpy output is correct:\r\na = (numpy.random.random((10,2)) * 10).astype(int) # random ints between 0 and 9\r\nprint(a[numpy.lexsort(a.T[::-1])]) # sorted by first column, not last\r\n\r\n# cupy output is unsorted:\r\nb = cupy.array(a)\r\nprint(b[cupy.lexsort(b.T[::-1])])\r\n```\r\nThe same thing happens for floats, so that's not the reason.\r\n\r\n### Conditions\r\n(output of `python -c 'import cupy; cupy.show_config()'`):\r\n\r\nCuPy Version : 7.1.1\r\nCUDA Root : /opt/cuda\r\nCUDA Build Version : 10020\r\nCUDA Driver Version : 10020\r\nCUDA Runtime Version : 10020\r\ncuBLAS Version : 10202\r\ncuFFT Version : 10102\r\ncuRAND Version : 10102\r\ncuSOLVER Version : (10, 3, 0)\r\ncuSPARSE Version : 10301\r\nNVRTC Version : (10, 2)\r\ncuDNN Build Version : 7605\r\ncuDNN Version : 7605\r\nNCCL Build Version : 2507\r\nNCCL Runtime Version : 2507\n", "before_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a, axis=-1):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently does not support\n ``kind`` and ``order`` parameters that ``numpy.sort`` does\n support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.sort(axis=axis)\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a, axis=-1):\n \"\"\"Returns the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` does not support\n ``kind`` and ``order`` parameters.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort(axis=axis)\n\n\ndef msort(a):\n \"\"\"Returns a copy of an array sorted along the first axis.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note:\n ``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is\n equivalent to ``cupy.sort(a, axis=0)``.\n\n .. seealso:: :func:`numpy.msort`\n\n \"\"\"\n\n # TODO(takagi): Support float16 and bool.\n return sort(a, axis=0)\n\n\n# TODO(okuta): Implement sort_complex\n\n\ndef partition(a, kth, axis=-1):\n \"\"\"Returns a partitioned copy of an array.\n\n Creates a copy of the array whose elements are rearranged such that the\n value of the element in k-th position would occur in that position in a\n sorted array. All of the elements before the new k-th element are less\n than or equal to the elements after the new k-th element.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. seealso:: :func:`numpy.partition`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.partition(kth, axis=axis)\n return ret\n\n\ndef argpartition(a, kth, axis=-1):\n \"\"\"Returns the indices that would partially sort an array.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, `cupy.argpartition` fully sorts the\n given array as `cupy.argsort` does. It also does not support ``kind``\n and ``order`` parameters that ``numpy.argpartition`` supports.\n\n .. seealso:: :func:`numpy.argpartition`\n\n \"\"\"\n return a.argpartition(kth, axis=axis)\n", "path": "cupy/_sorting/sort.py"}], "after_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a, axis=-1):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently does not support\n ``kind`` and ``order`` parameters that ``numpy.sort`` does\n support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.sort(axis=axis)\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n # thrust.lexsort() assumes a C-contiguous array\n if not keys.flags.c_contiguous:\n keys = keys.copy('C')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a, axis=-1):\n \"\"\"Returns the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` does not support\n ``kind`` and ``order`` parameters.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort(axis=axis)\n\n\ndef msort(a):\n \"\"\"Returns a copy of an array sorted along the first axis.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note:\n ``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is\n equivalent to ``cupy.sort(a, axis=0)``.\n\n .. seealso:: :func:`numpy.msort`\n\n \"\"\"\n\n return sort(a, axis=0)\n\n\n# TODO(okuta): Implement sort_complex\n\n\ndef partition(a, kth, axis=-1):\n \"\"\"Returns a partitioned copy of an array.\n\n Creates a copy of the array whose elements are rearranged such that the\n value of the element in k-th position would occur in that position in a\n sorted array. All of the elements before the new k-th element are less\n than or equal to the elements after the new k-th element.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. seealso:: :func:`numpy.partition`\n\n \"\"\"\n if axis is None:\n ret = a.flatten()\n axis = -1\n else:\n ret = a.copy()\n ret.partition(kth, axis=axis)\n return ret\n\n\ndef argpartition(a, kth, axis=-1):\n \"\"\"Returns the indices that would partially sort an array.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n kth (int or sequence of ints): Element index to partition by. If\n supplied with a sequence of k-th it will partition all elements\n indexed by k-th of them into their sorted position at once.\n axis (int or None): Axis along which to sort. Default is -1, which\n means sort along the last axis. If None is supplied, the array is\n flattened before sorting.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, `cupy.argpartition` fully sorts the\n given array as `cupy.argsort` does. It also does not support ``kind``\n and ``order`` parameters that ``numpy.argpartition`` supports.\n\n .. seealso:: :func:`numpy.argpartition`\n\n \"\"\"\n return a.argpartition(kth, axis=axis)\n", "path": "cupy/_sorting/sort.py"}]}
2,467
189
gh_patches_debug_19714
rasdani/github-patches
git_diff
plotly__plotly.py-2713
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- histogram() got an unexpected keyword argument 'legend' with pandas backend pandas version 1.1.0 plotly version 4.9.0 The following will raise `histogram() got an unexpected keyword argument 'legend' `: ```python import pandas as pd pd.options.plotting.backend = "plotly" df = pd.DataFrame() df.hist() ``` I suggest to add `legend` in the skip list: https://github.com/plotly/plotly.py/blob/dc9c5fdfe70367d5dc4fcdca6a0ad07125d64647/packages/python/plotly/plotly/__init__.py#L161 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packages/python/plotly/plotly/__init__.py` Content: ``` 1 """ 2 https://plot.ly/python/ 3 4 Plotly's Python API allows users to programmatically access Plotly's 5 server resources. 6 7 This package is organized as follows: 8 9 Subpackages: 10 11 - plotly: all functionality that requires access to Plotly's servers 12 13 - graph_objs: objects for designing figures and visualizing data 14 15 - matplotlylib: tools to convert matplotlib figures 16 17 Modules: 18 19 - tools: some helpful tools that do not require access to Plotly's servers 20 21 - utils: functions that you probably won't need, but that subpackages use 22 23 - version: holds the current API version 24 25 - exceptions: defines our custom exception classes 26 27 """ 28 from __future__ import absolute_import 29 import sys 30 from _plotly_utils.importers import relative_import 31 32 33 if sys.version_info < (3, 7): 34 from plotly import ( 35 graph_objs, 36 tools, 37 utils, 38 offline, 39 colors, 40 io, 41 data, 42 ) 43 from plotly.version import __version__ 44 45 __all__ = [ 46 "graph_objs", 47 "tools", 48 "utils", 49 "offline", 50 "colors", 51 "io", 52 "data", 53 "__version__", 54 ] 55 56 # Set default template (for >= 3.7 this is done in ploty/io/__init__.py) 57 from plotly.io import templates 58 59 templates._default = "plotly" 60 else: 61 __all__, __getattr__, __dir__ = relative_import( 62 __name__, 63 [ 64 ".graph_objs", 65 ".graph_objects", 66 ".tools", 67 ".utils", 68 ".offline", 69 ".colors", 70 ".io", 71 ".data", 72 ], 73 [".version.__version__"], 74 ) 75 76 77 def plot(data_frame, kind, **kwargs): 78 """ 79 Pandas plotting backend function, not meant to be called directly. 80 To activate, set pandas.options.plotting.backend="plotly" 81 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py 82 """ 83 from .express import ( 84 scatter, 85 line, 86 area, 87 bar, 88 box, 89 histogram, 90 violin, 91 strip, 92 funnel, 93 density_contour, 94 density_heatmap, 95 imshow, 96 ) 97 98 if kind == "scatter": 99 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["s", "c"]} 100 return scatter(data_frame, **new_kwargs) 101 if kind == "line": 102 return line(data_frame, **kwargs) 103 if kind == "area": 104 return area(data_frame, **kwargs) 105 if kind == "bar": 106 return bar(data_frame, **kwargs) 107 if kind == "barh": 108 return bar(data_frame, orientation="h", **kwargs) 109 if kind == "box": 110 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by"]} 111 return box(data_frame, **new_kwargs) 112 if kind in ["hist", "histogram"]: 113 new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by", "bins"]} 114 return histogram(data_frame, **new_kwargs) 115 if kind == "violin": 116 return violin(data_frame, **kwargs) 117 if kind == "strip": 118 return strip(data_frame, **kwargs) 119 if kind == "funnel": 120 return funnel(data_frame, **kwargs) 121 if kind == "density_contour": 122 return density_contour(data_frame, **kwargs) 123 if kind == "density_heatmap": 124 return density_heatmap(data_frame, **kwargs) 125 if kind == "imshow": 126 return imshow(data_frame, **kwargs) 127 if kind == "heatmap": 128 raise ValueError( 129 "kind='heatmap' not supported plotting.backend='plotly'. " 130 "Please use kind='imshow' or kind='density_heatmap'." 131 ) 132 133 raise NotImplementedError( 134 "kind='%s' not yet supported for plotting.backend='plotly'" % kind 135 ) 136 137 138 def boxplot_frame(data_frame, **kwargs): 139 """ 140 Pandas plotting backend function, not meant to be called directly. 141 To activate, set pandas.options.plotting.backend="plotly" 142 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py 143 """ 144 from .express import box 145 146 skip = ["by", "column", "ax", "fontsize", "rot", "grid", "figsize", "layout"] 147 skip += ["return_type"] 148 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip} 149 return box(data_frame, **new_kwargs) 150 151 152 def hist_frame(data_frame, **kwargs): 153 """ 154 Pandas plotting backend function, not meant to be called directly. 155 To activate, set pandas.options.plotting.backend="plotly" 156 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py 157 """ 158 from .express import histogram 159 160 skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"] 161 skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins"] 162 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip} 163 return histogram(data_frame, **new_kwargs) 164 165 166 def hist_series(data_frame, **kwargs): 167 """ 168 Pandas plotting backend function, not meant to be called directly. 169 To activate, set pandas.options.plotting.backend="plotly" 170 See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py 171 """ 172 from .express import histogram 173 174 skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"] 175 skip += ["figsize", "bins"] 176 new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip} 177 return histogram(data_frame, **new_kwargs) 178 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packages/python/plotly/plotly/__init__.py b/packages/python/plotly/plotly/__init__.py --- a/packages/python/plotly/plotly/__init__.py +++ b/packages/python/plotly/plotly/__init__.py @@ -158,7 +158,7 @@ from .express import histogram skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"] - skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins"] + skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins", "legend"] new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip} return histogram(data_frame, **new_kwargs) @@ -172,6 +172,6 @@ from .express import histogram skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"] - skip += ["figsize", "bins"] + skip += ["figsize", "bins", "legend"] new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip} return histogram(data_frame, **new_kwargs)
{"golden_diff": "diff --git a/packages/python/plotly/plotly/__init__.py b/packages/python/plotly/plotly/__init__.py\n--- a/packages/python/plotly/plotly/__init__.py\n+++ b/packages/python/plotly/plotly/__init__.py\n@@ -158,7 +158,7 @@\n from .express import histogram\n \n skip = [\"column\", \"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\"]\n- skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\"]\n+ skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n \n@@ -172,6 +172,6 @@\n from .express import histogram\n \n skip = [\"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\", \"ax\"]\n- skip += [\"figsize\", \"bins\"]\n+ skip += [\"figsize\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n", "issue": "histogram() got an unexpected keyword argument 'legend' with pandas backend\npandas version 1.1.0\r\nplotly version 4.9.0\r\n\r\nThe following will raise `histogram() got an unexpected keyword argument 'legend' `:\r\n```python\r\nimport pandas as pd\r\npd.options.plotting.backend = \"plotly\"\r\ndf = pd.DataFrame()\r\ndf.hist()\r\n```\r\nI suggest to add `legend` in the skip list:\r\nhttps://github.com/plotly/plotly.py/blob/dc9c5fdfe70367d5dc4fcdca6a0ad07125d64647/packages/python/plotly/plotly/__init__.py#L161\n", "before_files": [{"content": "\"\"\"\nhttps://plot.ly/python/\n\nPlotly's Python API allows users to programmatically access Plotly's\nserver resources.\n\nThis package is organized as follows:\n\nSubpackages:\n\n- plotly: all functionality that requires access to Plotly's servers\n\n- graph_objs: objects for designing figures and visualizing data\n\n- matplotlylib: tools to convert matplotlib figures\n\nModules:\n\n- tools: some helpful tools that do not require access to Plotly's servers\n\n- utils: functions that you probably won't need, but that subpackages use\n\n- version: holds the current API version\n\n- exceptions: defines our custom exception classes\n\n\"\"\"\nfrom __future__ import absolute_import\nimport sys\nfrom _plotly_utils.importers import relative_import\n\n\nif sys.version_info < (3, 7):\n from plotly import (\n graph_objs,\n tools,\n utils,\n offline,\n colors,\n io,\n data,\n )\n from plotly.version import __version__\n\n __all__ = [\n \"graph_objs\",\n \"tools\",\n \"utils\",\n \"offline\",\n \"colors\",\n \"io\",\n \"data\",\n \"__version__\",\n ]\n\n # Set default template (for >= 3.7 this is done in ploty/io/__init__.py)\n from plotly.io import templates\n\n templates._default = \"plotly\"\nelse:\n __all__, __getattr__, __dir__ = relative_import(\n __name__,\n [\n \".graph_objs\",\n \".graph_objects\",\n \".tools\",\n \".utils\",\n \".offline\",\n \".colors\",\n \".io\",\n \".data\",\n ],\n [\".version.__version__\"],\n )\n\n\ndef plot(data_frame, kind, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import (\n scatter,\n line,\n area,\n bar,\n box,\n histogram,\n violin,\n strip,\n funnel,\n density_contour,\n density_heatmap,\n imshow,\n )\n\n if kind == \"scatter\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"s\", \"c\"]}\n return scatter(data_frame, **new_kwargs)\n if kind == \"line\":\n return line(data_frame, **kwargs)\n if kind == \"area\":\n return area(data_frame, **kwargs)\n if kind == \"bar\":\n return bar(data_frame, **kwargs)\n if kind == \"barh\":\n return bar(data_frame, orientation=\"h\", **kwargs)\n if kind == \"box\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\"]}\n return box(data_frame, **new_kwargs)\n if kind in [\"hist\", \"histogram\"]:\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\", \"bins\"]}\n return histogram(data_frame, **new_kwargs)\n if kind == \"violin\":\n return violin(data_frame, **kwargs)\n if kind == \"strip\":\n return strip(data_frame, **kwargs)\n if kind == \"funnel\":\n return funnel(data_frame, **kwargs)\n if kind == \"density_contour\":\n return density_contour(data_frame, **kwargs)\n if kind == \"density_heatmap\":\n return density_heatmap(data_frame, **kwargs)\n if kind == \"imshow\":\n return imshow(data_frame, **kwargs)\n if kind == \"heatmap\":\n raise ValueError(\n \"kind='heatmap' not supported plotting.backend='plotly'. \"\n \"Please use kind='imshow' or kind='density_heatmap'.\"\n )\n\n raise NotImplementedError(\n \"kind='%s' not yet supported for plotting.backend='plotly'\" % kind\n )\n\n\ndef boxplot_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import box\n\n skip = [\"by\", \"column\", \"ax\", \"fontsize\", \"rot\", \"grid\", \"figsize\", \"layout\"]\n skip += [\"return_type\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return box(data_frame, **new_kwargs)\n\n\ndef hist_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"column\", \"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\"]\n skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n\n\ndef hist_series(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\", \"ax\"]\n skip += [\"figsize\", \"bins\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n", "path": "packages/python/plotly/plotly/__init__.py"}], "after_files": [{"content": "\"\"\"\nhttps://plot.ly/python/\n\nPlotly's Python API allows users to programmatically access Plotly's\nserver resources.\n\nThis package is organized as follows:\n\nSubpackages:\n\n- plotly: all functionality that requires access to Plotly's servers\n\n- graph_objs: objects for designing figures and visualizing data\n\n- matplotlylib: tools to convert matplotlib figures\n\nModules:\n\n- tools: some helpful tools that do not require access to Plotly's servers\n\n- utils: functions that you probably won't need, but that subpackages use\n\n- version: holds the current API version\n\n- exceptions: defines our custom exception classes\n\n\"\"\"\nfrom __future__ import absolute_import\nimport sys\nfrom _plotly_utils.importers import relative_import\n\n\nif sys.version_info < (3, 7):\n from plotly import (\n graph_objs,\n tools,\n utils,\n offline,\n colors,\n io,\n data,\n )\n from plotly.version import __version__\n\n __all__ = [\n \"graph_objs\",\n \"tools\",\n \"utils\",\n \"offline\",\n \"colors\",\n \"io\",\n \"data\",\n \"__version__\",\n ]\n\n # Set default template (for >= 3.7 this is done in ploty/io/__init__.py)\n from plotly.io import templates\n\n templates._default = \"plotly\"\nelse:\n __all__, __getattr__, __dir__ = relative_import(\n __name__,\n [\n \".graph_objs\",\n \".graph_objects\",\n \".tools\",\n \".utils\",\n \".offline\",\n \".colors\",\n \".io\",\n \".data\",\n ],\n [\".version.__version__\"],\n )\n\n\ndef plot(data_frame, kind, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import (\n scatter,\n line,\n area,\n bar,\n box,\n histogram,\n violin,\n strip,\n funnel,\n density_contour,\n density_heatmap,\n imshow,\n )\n\n if kind == \"scatter\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"s\", \"c\"]}\n return scatter(data_frame, **new_kwargs)\n if kind == \"line\":\n return line(data_frame, **kwargs)\n if kind == \"area\":\n return area(data_frame, **kwargs)\n if kind == \"bar\":\n return bar(data_frame, **kwargs)\n if kind == \"barh\":\n return bar(data_frame, orientation=\"h\", **kwargs)\n if kind == \"box\":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\"]}\n return box(data_frame, **new_kwargs)\n if kind in [\"hist\", \"histogram\"]:\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in [\"by\", \"bins\"]}\n return histogram(data_frame, **new_kwargs)\n if kind == \"violin\":\n return violin(data_frame, **kwargs)\n if kind == \"strip\":\n return strip(data_frame, **kwargs)\n if kind == \"funnel\":\n return funnel(data_frame, **kwargs)\n if kind == \"density_contour\":\n return density_contour(data_frame, **kwargs)\n if kind == \"density_heatmap\":\n return density_heatmap(data_frame, **kwargs)\n if kind == \"imshow\":\n return imshow(data_frame, **kwargs)\n if kind == \"heatmap\":\n raise ValueError(\n \"kind='heatmap' not supported plotting.backend='plotly'. \"\n \"Please use kind='imshow' or kind='density_heatmap'.\"\n )\n\n raise NotImplementedError(\n \"kind='%s' not yet supported for plotting.backend='plotly'\" % kind\n )\n\n\ndef boxplot_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import box\n\n skip = [\"by\", \"column\", \"ax\", \"fontsize\", \"rot\", \"grid\", \"figsize\", \"layout\"]\n skip += [\"return_type\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return box(data_frame, **new_kwargs)\n\n\ndef hist_frame(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"column\", \"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\"]\n skip += [\"ax\", \"sharex\", \"sharey\", \"figsize\", \"layout\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n\n\ndef hist_series(data_frame, **kwargs):\n \"\"\"\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend=\"plotly\"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n \"\"\"\n from .express import histogram\n\n skip = [\"by\", \"grid\", \"xlabelsize\", \"xrot\", \"ylabelsize\", \"yrot\", \"ax\"]\n skip += [\"figsize\", \"bins\", \"legend\"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n", "path": "packages/python/plotly/plotly/__init__.py"}]}
2,141
297
gh_patches_debug_3941
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1431
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [PORT] Create test for waterfall cancellation telemetry > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/3314 For https://github.com/microsoft/botbuilder-js/issues/1619 # Changed projects * Microsoft.Bot.Builder.Dialogs.Tests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 5 import uuid 6 from typing import Coroutine 7 from botbuilder.core import TurnContext 8 from botbuilder.schema import ActivityTypes 9 from .dialog_reason import DialogReason 10 from .dialog import Dialog 11 from .dialog_turn_result import DialogTurnResult 12 from .dialog_context import DialogContext 13 from .dialog_instance import DialogInstance 14 from .waterfall_step_context import WaterfallStepContext 15 16 17 class WaterfallDialog(Dialog): 18 PersistedOptions = "options" 19 StepIndex = "stepIndex" 20 PersistedValues = "values" 21 PersistedInstanceId = "instanceId" 22 23 def __init__(self, dialog_id: str, steps: [Coroutine] = None): 24 super(WaterfallDialog, self).__init__(dialog_id) 25 if not steps: 26 self._steps = [] 27 else: 28 if not isinstance(steps, list): 29 raise TypeError("WaterfallDialog(): steps must be list of steps") 30 self._steps = steps 31 32 def add_step(self, step): 33 """ 34 Adds a new step to the waterfall. 35 :param step: Step to add 36 :return: Waterfall dialog for fluent calls to `add_step()`. 37 """ 38 if not step: 39 raise TypeError("WaterfallDialog.add_step(): step cannot be None.") 40 41 self._steps.append(step) 42 return self 43 44 async def begin_dialog( 45 self, dialog_context: DialogContext, options: object = None 46 ) -> DialogTurnResult: 47 48 if not dialog_context: 49 raise TypeError("WaterfallDialog.begin_dialog(): dc cannot be None.") 50 51 # Initialize waterfall state 52 state = dialog_context.active_dialog.state 53 54 instance_id = uuid.uuid1().__str__() 55 state[self.PersistedOptions] = options 56 state[self.PersistedValues] = {} 57 state[self.PersistedInstanceId] = instance_id 58 59 properties = {} 60 properties["DialogId"] = self.id 61 properties["InstanceId"] = instance_id 62 self.telemetry_client.track_event("WaterfallStart", properties) 63 64 # Run first stepkinds 65 return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None) 66 67 async def continue_dialog( # pylint: disable=unused-argument,arguments-differ 68 self, 69 dialog_context: DialogContext = None, 70 reason: DialogReason = None, 71 result: object = NotImplementedError(), 72 ) -> DialogTurnResult: 73 if not dialog_context: 74 raise TypeError("WaterfallDialog.continue_dialog(): dc cannot be None.") 75 76 if dialog_context.context.activity.type != ActivityTypes.message: 77 return Dialog.end_of_turn 78 79 return await self.resume_dialog( 80 dialog_context, 81 DialogReason.ContinueCalled, 82 dialog_context.context.activity.text, 83 ) 84 85 async def resume_dialog( 86 self, dialog_context: DialogContext, reason: DialogReason, result: object 87 ): 88 if dialog_context is None: 89 raise TypeError("WaterfallDialog.resume_dialog(): dc cannot be None.") 90 91 # Increment step index and run step 92 state = dialog_context.active_dialog.state 93 94 # Future Me: 95 # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871 96 # for hints. 97 return await self.run_step( 98 dialog_context, state[self.StepIndex] + 1, reason, result 99 ) 100 101 async def end_dialog( # pylint: disable=unused-argument 102 self, context: TurnContext, instance: DialogInstance, reason: DialogReason 103 ) -> None: 104 if reason is DialogReason.CancelCalled: 105 index = instance.state[self.StepIndex] 106 step_name = self.get_step_name(index) 107 instance_id = str(instance.state[self.PersistedInstanceId]) 108 properties = { 109 "DialogId": self.id, 110 "StepName": step_name, 111 "InstanceId": instance_id, 112 } 113 self.telemetry_client.track_event("WaterfallCancel", properties) 114 else: 115 if reason is DialogReason.EndCalled: 116 117 instance_id = str(instance.state[self.PersistedInstanceId]) 118 properties = {"DialogId": self.id, "InstanceId": instance_id} 119 self.telemetry_client.track_event("WaterfallComplete", properties) 120 121 return 122 123 async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: 124 step_name = self.get_step_name(step_context.index) 125 instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId]) 126 properties = { 127 "DialogId": self.id, 128 "StepName": step_name, 129 "InstanceId": instance_id, 130 } 131 self.telemetry_client.track_event("WaterfallStep", properties) 132 return await self._steps[step_context.index](step_context) 133 134 async def run_step( 135 self, 136 dialog_context: DialogContext, 137 index: int, 138 reason: DialogReason, 139 result: object, 140 ) -> DialogTurnResult: 141 if not dialog_context: 142 raise TypeError( 143 "WaterfallDialog.run_steps(): dialog_context cannot be None." 144 ) 145 if index < len(self._steps): 146 # Update persisted step index 147 state = dialog_context.active_dialog.state 148 state[self.StepIndex] = index 149 150 # Create step context 151 options = state[self.PersistedOptions] 152 values = state[self.PersistedValues] 153 step_context = WaterfallStepContext( 154 self, dialog_context, options, values, index, reason, result 155 ) 156 return await self.on_step(step_context) 157 158 # End of waterfall so just return any result to parent 159 return await dialog_context.end_dialog(result) 160 161 def get_step_name(self, index: int) -> str: 162 """ 163 Give the waterfall step a unique name 164 """ 165 step_name = self._steps[index].__qualname__ 166 167 if not step_name or ">" in step_name: 168 step_name = f"Step{index + 1}of{len(self._steps)}" 169 170 return step_name 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py @@ -164,7 +164,7 @@ """ step_name = self._steps[index].__qualname__ - if not step_name or ">" in step_name: + if not step_name or step_name.endswith("<lambda>"): step_name = f"Step{index + 1}of{len(self._steps)}" return step_name
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py\n@@ -164,7 +164,7 @@\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n \r\n- if not step_name or \">\" in step_name:\r\n+ if not step_name or step_name.endswith(\"<lambda>\"):\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n \r\n return step_name\n", "issue": "[PORT] Create test for waterfall cancellation telemetry\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3314\n\nFor https://github.com/microsoft/botbuilder-js/issues/1619\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Tests\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\n\r\nimport uuid\r\nfrom typing import Coroutine\r\nfrom botbuilder.core import TurnContext\r\nfrom botbuilder.schema import ActivityTypes\r\nfrom .dialog_reason import DialogReason\r\nfrom .dialog import Dialog\r\nfrom .dialog_turn_result import DialogTurnResult\r\nfrom .dialog_context import DialogContext\r\nfrom .dialog_instance import DialogInstance\r\nfrom .waterfall_step_context import WaterfallStepContext\r\n\r\n\r\nclass WaterfallDialog(Dialog):\r\n PersistedOptions = \"options\"\r\n StepIndex = \"stepIndex\"\r\n PersistedValues = \"values\"\r\n PersistedInstanceId = \"instanceId\"\r\n\r\n def __init__(self, dialog_id: str, steps: [Coroutine] = None):\r\n super(WaterfallDialog, self).__init__(dialog_id)\r\n if not steps:\r\n self._steps = []\r\n else:\r\n if not isinstance(steps, list):\r\n raise TypeError(\"WaterfallDialog(): steps must be list of steps\")\r\n self._steps = steps\r\n\r\n def add_step(self, step):\r\n \"\"\"\r\n Adds a new step to the waterfall.\r\n :param step: Step to add\r\n :return: Waterfall dialog for fluent calls to `add_step()`.\r\n \"\"\"\r\n if not step:\r\n raise TypeError(\"WaterfallDialog.add_step(): step cannot be None.\")\r\n\r\n self._steps.append(step)\r\n return self\r\n\r\n async def begin_dialog(\r\n self, dialog_context: DialogContext, options: object = None\r\n ) -> DialogTurnResult:\r\n\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.begin_dialog(): dc cannot be None.\")\r\n\r\n # Initialize waterfall state\r\n state = dialog_context.active_dialog.state\r\n\r\n instance_id = uuid.uuid1().__str__()\r\n state[self.PersistedOptions] = options\r\n state[self.PersistedValues] = {}\r\n state[self.PersistedInstanceId] = instance_id\r\n\r\n properties = {}\r\n properties[\"DialogId\"] = self.id\r\n properties[\"InstanceId\"] = instance_id\r\n self.telemetry_client.track_event(\"WaterfallStart\", properties)\r\n\r\n # Run first stepkinds\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n\r\n async def continue_dialog( # pylint: disable=unused-argument,arguments-differ\r\n self,\r\n dialog_context: DialogContext = None,\r\n reason: DialogReason = None,\r\n result: object = NotImplementedError(),\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.continue_dialog(): dc cannot be None.\")\r\n\r\n if dialog_context.context.activity.type != ActivityTypes.message:\r\n return Dialog.end_of_turn\r\n\r\n return await self.resume_dialog(\r\n dialog_context,\r\n DialogReason.ContinueCalled,\r\n dialog_context.context.activity.text,\r\n )\r\n\r\n async def resume_dialog(\r\n self, dialog_context: DialogContext, reason: DialogReason, result: object\r\n ):\r\n if dialog_context is None:\r\n raise TypeError(\"WaterfallDialog.resume_dialog(): dc cannot be None.\")\r\n\r\n # Increment step index and run step\r\n state = dialog_context.active_dialog.state\r\n\r\n # Future Me:\r\n # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871\r\n # for hints.\r\n return await self.run_step(\r\n dialog_context, state[self.StepIndex] + 1, reason, result\r\n )\r\n\r\n async def end_dialog( # pylint: disable=unused-argument\r\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\r\n ) -> None:\r\n if reason is DialogReason.CancelCalled:\r\n index = instance.state[self.StepIndex]\r\n step_name = self.get_step_name(index)\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallCancel\", properties)\r\n else:\r\n if reason is DialogReason.EndCalled:\r\n\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\"DialogId\": self.id, \"InstanceId\": instance_id}\r\n self.telemetry_client.track_event(\"WaterfallComplete\", properties)\r\n\r\n return\r\n\r\n async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n step_name = self.get_step_name(step_context.index)\r\n instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallStep\", properties)\r\n return await self._steps[step_context.index](step_context)\r\n\r\n async def run_step(\r\n self,\r\n dialog_context: DialogContext,\r\n index: int,\r\n reason: DialogReason,\r\n result: object,\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\r\n \"WaterfallDialog.run_steps(): dialog_context cannot be None.\"\r\n )\r\n if index < len(self._steps):\r\n # Update persisted step index\r\n state = dialog_context.active_dialog.state\r\n state[self.StepIndex] = index\r\n\r\n # Create step context\r\n options = state[self.PersistedOptions]\r\n values = state[self.PersistedValues]\r\n step_context = WaterfallStepContext(\r\n self, dialog_context, options, values, index, reason, result\r\n )\r\n return await self.on_step(step_context)\r\n\r\n # End of waterfall so just return any result to parent\r\n return await dialog_context.end_dialog(result)\r\n\r\n def get_step_name(self, index: int) -> str:\r\n \"\"\"\r\n Give the waterfall step a unique name\r\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n\r\n if not step_name or \">\" in step_name:\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n\r\n return step_name\r\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\n\r\nimport uuid\r\nfrom typing import Coroutine\r\nfrom botbuilder.core import TurnContext\r\nfrom botbuilder.schema import ActivityTypes\r\nfrom .dialog_reason import DialogReason\r\nfrom .dialog import Dialog\r\nfrom .dialog_turn_result import DialogTurnResult\r\nfrom .dialog_context import DialogContext\r\nfrom .dialog_instance import DialogInstance\r\nfrom .waterfall_step_context import WaterfallStepContext\r\n\r\n\r\nclass WaterfallDialog(Dialog):\r\n PersistedOptions = \"options\"\r\n StepIndex = \"stepIndex\"\r\n PersistedValues = \"values\"\r\n PersistedInstanceId = \"instanceId\"\r\n\r\n def __init__(self, dialog_id: str, steps: [Coroutine] = None):\r\n super(WaterfallDialog, self).__init__(dialog_id)\r\n if not steps:\r\n self._steps = []\r\n else:\r\n if not isinstance(steps, list):\r\n raise TypeError(\"WaterfallDialog(): steps must be list of steps\")\r\n self._steps = steps\r\n\r\n def add_step(self, step):\r\n \"\"\"\r\n Adds a new step to the waterfall.\r\n :param step: Step to add\r\n :return: Waterfall dialog for fluent calls to `add_step()`.\r\n \"\"\"\r\n if not step:\r\n raise TypeError(\"WaterfallDialog.add_step(): step cannot be None.\")\r\n\r\n self._steps.append(step)\r\n return self\r\n\r\n async def begin_dialog(\r\n self, dialog_context: DialogContext, options: object = None\r\n ) -> DialogTurnResult:\r\n\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.begin_dialog(): dc cannot be None.\")\r\n\r\n # Initialize waterfall state\r\n state = dialog_context.active_dialog.state\r\n\r\n instance_id = uuid.uuid1().__str__()\r\n state[self.PersistedOptions] = options\r\n state[self.PersistedValues] = {}\r\n state[self.PersistedInstanceId] = instance_id\r\n\r\n properties = {}\r\n properties[\"DialogId\"] = self.id\r\n properties[\"InstanceId\"] = instance_id\r\n self.telemetry_client.track_event(\"WaterfallStart\", properties)\r\n\r\n # Run first stepkinds\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n\r\n async def continue_dialog( # pylint: disable=unused-argument,arguments-differ\r\n self,\r\n dialog_context: DialogContext = None,\r\n reason: DialogReason = None,\r\n result: object = NotImplementedError(),\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\"WaterfallDialog.continue_dialog(): dc cannot be None.\")\r\n\r\n if dialog_context.context.activity.type != ActivityTypes.message:\r\n return Dialog.end_of_turn\r\n\r\n return await self.resume_dialog(\r\n dialog_context,\r\n DialogReason.ContinueCalled,\r\n dialog_context.context.activity.text,\r\n )\r\n\r\n async def resume_dialog(\r\n self, dialog_context: DialogContext, reason: DialogReason, result: object\r\n ):\r\n if dialog_context is None:\r\n raise TypeError(\"WaterfallDialog.resume_dialog(): dc cannot be None.\")\r\n\r\n # Increment step index and run step\r\n state = dialog_context.active_dialog.state\r\n\r\n # Future Me:\r\n # If issues with CosmosDB, see https://github.com/Microsoft/botbuilder-dotnet/issues/871\r\n # for hints.\r\n return await self.run_step(\r\n dialog_context, state[self.StepIndex] + 1, reason, result\r\n )\r\n\r\n async def end_dialog( # pylint: disable=unused-argument\r\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\r\n ) -> None:\r\n if reason is DialogReason.CancelCalled:\r\n index = instance.state[self.StepIndex]\r\n step_name = self.get_step_name(index)\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallCancel\", properties)\r\n else:\r\n if reason is DialogReason.EndCalled:\r\n\r\n instance_id = str(instance.state[self.PersistedInstanceId])\r\n properties = {\"DialogId\": self.id, \"InstanceId\": instance_id}\r\n self.telemetry_client.track_event(\"WaterfallComplete\", properties)\r\n\r\n return\r\n\r\n async def on_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n step_name = self.get_step_name(step_context.index)\r\n instance_id = str(step_context.active_dialog.state[self.PersistedInstanceId])\r\n properties = {\r\n \"DialogId\": self.id,\r\n \"StepName\": step_name,\r\n \"InstanceId\": instance_id,\r\n }\r\n self.telemetry_client.track_event(\"WaterfallStep\", properties)\r\n return await self._steps[step_context.index](step_context)\r\n\r\n async def run_step(\r\n self,\r\n dialog_context: DialogContext,\r\n index: int,\r\n reason: DialogReason,\r\n result: object,\r\n ) -> DialogTurnResult:\r\n if not dialog_context:\r\n raise TypeError(\r\n \"WaterfallDialog.run_steps(): dialog_context cannot be None.\"\r\n )\r\n if index < len(self._steps):\r\n # Update persisted step index\r\n state = dialog_context.active_dialog.state\r\n state[self.StepIndex] = index\r\n\r\n # Create step context\r\n options = state[self.PersistedOptions]\r\n values = state[self.PersistedValues]\r\n step_context = WaterfallStepContext(\r\n self, dialog_context, options, values, index, reason, result\r\n )\r\n return await self.on_step(step_context)\r\n\r\n # End of waterfall so just return any result to parent\r\n return await dialog_context.end_dialog(result)\r\n\r\n def get_step_name(self, index: int) -> str:\r\n \"\"\"\r\n Give the waterfall step a unique name\r\n \"\"\"\r\n step_name = self._steps[index].__qualname__\r\n\r\n if not step_name or step_name.endswith(\"<lambda>\"):\r\n step_name = f\"Step{index + 1}of{len(self._steps)}\"\r\n\r\n return step_name\r\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py"}]}
2,036
163
gh_patches_debug_16065
rasdani/github-patches
git_diff
ultrabug__py3status-380
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- broken click events since our last merges, the click events are not working anymore :( --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `py3status/events.py` Content: ``` 1 import select 2 import sys 3 4 from threading import Thread 5 from time import time 6 from subprocess import Popen, call, PIPE 7 from json import loads 8 9 from py3status.profiling import profile 10 11 12 class IOPoller: 13 """ 14 This class implements a predictive and timing-out I/O reader 15 using select and the poll() mechanism for greater compatibility. 16 """ 17 18 def __init__(self, io, eventmask=select.POLLIN): 19 """ 20 Our default is to read (POLLIN) the specified 'io' file descriptor. 21 """ 22 self.io = io 23 self.poller = select.poll() 24 self.poller.register(io, eventmask) 25 26 def readline(self, timeout=500): 27 """ 28 Try to read our I/O for 'timeout' milliseconds, return None otherwise. 29 This makes calling and reading I/O non blocking ! 30 """ 31 poll_result = self.poller.poll(timeout) 32 if poll_result: 33 line = self.io.readline().strip() 34 if self.io == sys.stdin and line == '[': 35 # skip first event line wrt issue #19 36 line = self.io.readline().strip() 37 try: 38 # python3 compatibility code 39 line = line.decode() 40 except (AttributeError, UnicodeDecodeError): 41 pass 42 return line 43 else: 44 return None 45 46 47 class Events(Thread): 48 """ 49 This class is responsible for dispatching event JSONs sent by the i3bar. 50 """ 51 52 def __init__(self, py3_wrapper): 53 """ 54 We need to poll stdin to receive i3bar messages. 55 """ 56 Thread.__init__(self) 57 self.config = py3_wrapper.config 58 self.error = None 59 self.i3s_config = py3_wrapper.i3status_thread.config 60 self.last_refresh_ts = time() 61 self.lock = py3_wrapper.lock 62 self.modules = py3_wrapper.modules 63 self.on_click = self.i3s_config['on_click'] 64 self.output_modules = py3_wrapper.output_modules 65 self.poller_inp = IOPoller(sys.stdin) 66 self.py3_wrapper = py3_wrapper 67 68 def refresh(self, module_name): 69 """ 70 Force a cache expiration for all the methods of the given module. 71 72 We rate limit the i3status refresh to 100ms. 73 """ 74 module = self.modules.get(module_name) 75 if module is not None: 76 if self.config['debug']: 77 self.py3_wrapper.log('refresh module {}'.format(module_name)) 78 module.force_update() 79 else: 80 if time() > (self.last_refresh_ts + 0.1): 81 if self.config['debug']: 82 self.py3_wrapper.log( 83 'refresh i3status for module {}'.format(module_name)) 84 call(['killall', '-s', 'USR1', 'i3status']) 85 self.last_refresh_ts = time() 86 87 def refresh_all(self, module_name): 88 """ 89 Force a full refresh of py3status and i3status modules by sending 90 a SIGUSR1 signal to py3status. 91 92 We rate limit this command to 100ms for obvious abusive behavior. 93 """ 94 if time() > (self.last_refresh_ts + 0.1): 95 call(['killall', '-s', 'USR1', 'py3status']) 96 self.last_refresh_ts = time() 97 98 def on_click_dispatcher(self, module_name, command): 99 """ 100 Dispatch on_click config parameters to either: 101 - Our own methods for special py3status commands (listed below) 102 - The i3-msg program which is part of i3wm 103 """ 104 py3_commands = ['refresh', 'refresh_all'] 105 if command is None: 106 return 107 elif command in py3_commands: 108 # this is a py3status command handled by this class 109 method = getattr(self, command) 110 method(module_name) 111 else: 112 # this is a i3 message 113 self.i3_msg(module_name, command) 114 115 # to make the bar more responsive to users we ask for a refresh 116 # of the module or of i3status if the module is an i3status one 117 self.refresh(module_name) 118 119 def i3_msg(self, module_name, command): 120 """ 121 Execute the given i3 message and log its output. 122 """ 123 i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE) 124 self.py3_wrapper.log('i3-msg module="{}" command="{}" stdout={}'.format( 125 module_name, command, i3_msg_pipe.stdout.read())) 126 127 def process_event(self, module_name, event, top_level=True): 128 """ 129 Process the event for the named module. 130 Events may have been declared in i3status.conf, modules may have 131 on_click() functions. There is a default middle click event etc. 132 """ 133 button = event.get('button', 0) 134 default_event = False 135 # execute any configured i3-msg command 136 # we do not do this for containers 137 if top_level: 138 if self.on_click.get(module_name, {}).get(button): 139 self.on_click_dispatcher(module_name, 140 self.on_click[module_name].get(button)) 141 # otherwise setup default action on button 2 press 142 elif button == 2: 143 default_event = True 144 145 # get the module that the event is for 146 module_info = self.output_modules.get(module_name) 147 module = module_info['module'] 148 # if module is a py3status one and it has an on_click function then 149 # call it. 150 if module_info['type'] == 'py3status' and module.click_events: 151 module.click_event(event) 152 if self.config['debug']: 153 self.py3_wrapper.log('dispatching event {}'.format(event)) 154 155 # to make the bar more responsive to users we refresh the module 156 # unless the on_click event called py3.prevent_refresh() 157 if not module.prevent_refresh: 158 self.refresh(module_name) 159 default_event = False 160 161 if default_event: 162 # default button 2 action is to clear this method's cache 163 if self.config['debug']: 164 self.py3_wrapper.log( 165 'dispatching default event {}'.format(event)) 166 self.refresh(module_name) 167 168 # find container that holds the module and call its onclick 169 module_groups = self.i3s_config['.module_groups'] 170 containers = module_groups.get(module_name, []) 171 for container in containers: 172 self.process_event(container, event, top_level=False) 173 174 @profile 175 def run(self): 176 """ 177 Wait for an i3bar JSON event, then find the right module to dispatch 178 the message to based on the 'name' and 'instance' of the event. 179 180 In case the module does NOT support click_events, the default 181 implementation is to clear the module's cache 182 when the MIDDLE button (2) is pressed on it. 183 184 Example event: 185 {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'} 186 """ 187 while self.lock.is_set(): 188 event_str = self.poller_inp.readline() 189 if not event_str: 190 continue 191 try: 192 # remove leading comma if present 193 if event_str[0] == ',': 194 event_str = event_str[1:] 195 event = loads(event_str) 196 197 if self.config['debug']: 198 self.py3_wrapper.log('received event {}'.format(event)) 199 200 # usage variables 201 instance = event.get('instance', '') 202 name = event.get('name', '') 203 204 # composites have an index which is passed to i3bar with 205 # the instance. We need to separate this out here and 206 # clean up the event. If index 207 # is an integer type then cast it as such. 208 if ' ' in instance: 209 instance, index = instance.split(' ', 1) 210 try: 211 index = int(index) 212 except ValueError: 213 pass 214 event['index'] = index 215 event['instance'] = instance 216 217 if self.config['debug']: 218 self.py3_wrapper.log( 219 'trying to dispatch event to module "{}"'.format( 220 '{} {}'.format(name, instance).strip())) 221 222 # guess the module config name 223 module_name = '{} {}'.format(name, instance).strip() 224 # do the work 225 self.process_event(module_name, event) 226 227 except Exception: 228 err = sys.exc_info()[1] 229 self.error = err 230 self.py3_wrapper.log('event failed ({})'.format(err), 'warning') 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/py3status/events.py b/py3status/events.py --- a/py3status/events.py +++ b/py3status/events.py @@ -135,9 +135,13 @@ # execute any configured i3-msg command # we do not do this for containers if top_level: - if self.on_click.get(module_name, {}).get(button): - self.on_click_dispatcher(module_name, - self.on_click[module_name].get(button)) + click_module = event['name'] + if event['instance']: + click_module += ' ' + event['instance'] + btn = str(button) + if self.on_click.get(click_module, {}).get(btn): + self.on_click_dispatcher(click_module, + self.on_click[module_name].get(btn)) # otherwise setup default action on button 2 press elif button == 2: default_event = True
{"golden_diff": "diff --git a/py3status/events.py b/py3status/events.py\n--- a/py3status/events.py\n+++ b/py3status/events.py\n@@ -135,9 +135,13 @@\n # execute any configured i3-msg command\n # we do not do this for containers\n if top_level:\n- if self.on_click.get(module_name, {}).get(button):\n- self.on_click_dispatcher(module_name,\n- self.on_click[module_name].get(button))\n+ click_module = event['name']\n+ if event['instance']:\n+ click_module += ' ' + event['instance']\n+ btn = str(button)\n+ if self.on_click.get(click_module, {}).get(btn):\n+ self.on_click_dispatcher(click_module,\n+ self.on_click[module_name].get(btn))\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n", "issue": "broken click events\nsince our last merges, the click events are not working anymore :(\n\n", "before_files": [{"content": "import select\nimport sys\n\nfrom threading import Thread\nfrom time import time\nfrom subprocess import Popen, call, PIPE\nfrom json import loads\n\nfrom py3status.profiling import profile\n\n\nclass IOPoller:\n \"\"\"\n This class implements a predictive and timing-out I/O reader\n using select and the poll() mechanism for greater compatibility.\n \"\"\"\n\n def __init__(self, io, eventmask=select.POLLIN):\n \"\"\"\n Our default is to read (POLLIN) the specified 'io' file descriptor.\n \"\"\"\n self.io = io\n self.poller = select.poll()\n self.poller.register(io, eventmask)\n\n def readline(self, timeout=500):\n \"\"\"\n Try to read our I/O for 'timeout' milliseconds, return None otherwise.\n This makes calling and reading I/O non blocking !\n \"\"\"\n poll_result = self.poller.poll(timeout)\n if poll_result:\n line = self.io.readline().strip()\n if self.io == sys.stdin and line == '[':\n # skip first event line wrt issue #19\n line = self.io.readline().strip()\n try:\n # python3 compatibility code\n line = line.decode()\n except (AttributeError, UnicodeDecodeError):\n pass\n return line\n else:\n return None\n\n\nclass Events(Thread):\n \"\"\"\n This class is responsible for dispatching event JSONs sent by the i3bar.\n \"\"\"\n\n def __init__(self, py3_wrapper):\n \"\"\"\n We need to poll stdin to receive i3bar messages.\n \"\"\"\n Thread.__init__(self)\n self.config = py3_wrapper.config\n self.error = None\n self.i3s_config = py3_wrapper.i3status_thread.config\n self.last_refresh_ts = time()\n self.lock = py3_wrapper.lock\n self.modules = py3_wrapper.modules\n self.on_click = self.i3s_config['on_click']\n self.output_modules = py3_wrapper.output_modules\n self.poller_inp = IOPoller(sys.stdin)\n self.py3_wrapper = py3_wrapper\n\n def refresh(self, module_name):\n \"\"\"\n Force a cache expiration for all the methods of the given module.\n\n We rate limit the i3status refresh to 100ms.\n \"\"\"\n module = self.modules.get(module_name)\n if module is not None:\n if self.config['debug']:\n self.py3_wrapper.log('refresh module {}'.format(module_name))\n module.force_update()\n else:\n if time() > (self.last_refresh_ts + 0.1):\n if self.config['debug']:\n self.py3_wrapper.log(\n 'refresh i3status for module {}'.format(module_name))\n call(['killall', '-s', 'USR1', 'i3status'])\n self.last_refresh_ts = time()\n\n def refresh_all(self, module_name):\n \"\"\"\n Force a full refresh of py3status and i3status modules by sending\n a SIGUSR1 signal to py3status.\n\n We rate limit this command to 100ms for obvious abusive behavior.\n \"\"\"\n if time() > (self.last_refresh_ts + 0.1):\n call(['killall', '-s', 'USR1', 'py3status'])\n self.last_refresh_ts = time()\n\n def on_click_dispatcher(self, module_name, command):\n \"\"\"\n Dispatch on_click config parameters to either:\n - Our own methods for special py3status commands (listed below)\n - The i3-msg program which is part of i3wm\n \"\"\"\n py3_commands = ['refresh', 'refresh_all']\n if command is None:\n return\n elif command in py3_commands:\n # this is a py3status command handled by this class\n method = getattr(self, command)\n method(module_name)\n else:\n # this is a i3 message\n self.i3_msg(module_name, command)\n\n # to make the bar more responsive to users we ask for a refresh\n # of the module or of i3status if the module is an i3status one\n self.refresh(module_name)\n\n def i3_msg(self, module_name, command):\n \"\"\"\n Execute the given i3 message and log its output.\n \"\"\"\n i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)\n self.py3_wrapper.log('i3-msg module=\"{}\" command=\"{}\" stdout={}'.format(\n module_name, command, i3_msg_pipe.stdout.read()))\n\n def process_event(self, module_name, event, top_level=True):\n \"\"\"\n Process the event for the named module.\n Events may have been declared in i3status.conf, modules may have\n on_click() functions. There is a default middle click event etc.\n \"\"\"\n button = event.get('button', 0)\n default_event = False\n # execute any configured i3-msg command\n # we do not do this for containers\n if top_level:\n if self.on_click.get(module_name, {}).get(button):\n self.on_click_dispatcher(module_name,\n self.on_click[module_name].get(button))\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n\n # get the module that the event is for\n module_info = self.output_modules.get(module_name)\n module = module_info['module']\n # if module is a py3status one and it has an on_click function then\n # call it.\n if module_info['type'] == 'py3status' and module.click_events:\n module.click_event(event)\n if self.config['debug']:\n self.py3_wrapper.log('dispatching event {}'.format(event))\n\n # to make the bar more responsive to users we refresh the module\n # unless the on_click event called py3.prevent_refresh()\n if not module.prevent_refresh:\n self.refresh(module_name)\n default_event = False\n\n if default_event:\n # default button 2 action is to clear this method's cache\n if self.config['debug']:\n self.py3_wrapper.log(\n 'dispatching default event {}'.format(event))\n self.refresh(module_name)\n\n # find container that holds the module and call its onclick\n module_groups = self.i3s_config['.module_groups']\n containers = module_groups.get(module_name, [])\n for container in containers:\n self.process_event(container, event, top_level=False)\n\n @profile\n def run(self):\n \"\"\"\n Wait for an i3bar JSON event, then find the right module to dispatch\n the message to based on the 'name' and 'instance' of the event.\n\n In case the module does NOT support click_events, the default\n implementation is to clear the module's cache\n when the MIDDLE button (2) is pressed on it.\n\n Example event:\n {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}\n \"\"\"\n while self.lock.is_set():\n event_str = self.poller_inp.readline()\n if not event_str:\n continue\n try:\n # remove leading comma if present\n if event_str[0] == ',':\n event_str = event_str[1:]\n event = loads(event_str)\n\n if self.config['debug']:\n self.py3_wrapper.log('received event {}'.format(event))\n\n # usage variables\n instance = event.get('instance', '')\n name = event.get('name', '')\n\n # composites have an index which is passed to i3bar with\n # the instance. We need to separate this out here and\n # clean up the event. If index\n # is an integer type then cast it as such.\n if ' ' in instance:\n instance, index = instance.split(' ', 1)\n try:\n index = int(index)\n except ValueError:\n pass\n event['index'] = index\n event['instance'] = instance\n\n if self.config['debug']:\n self.py3_wrapper.log(\n 'trying to dispatch event to module \"{}\"'.format(\n '{} {}'.format(name, instance).strip()))\n\n # guess the module config name\n module_name = '{} {}'.format(name, instance).strip()\n # do the work\n self.process_event(module_name, event)\n\n except Exception:\n err = sys.exc_info()[1]\n self.error = err\n self.py3_wrapper.log('event failed ({})'.format(err), 'warning')\n", "path": "py3status/events.py"}], "after_files": [{"content": "import select\nimport sys\n\nfrom threading import Thread\nfrom time import time\nfrom subprocess import Popen, call, PIPE\nfrom json import loads\n\nfrom py3status.profiling import profile\n\n\nclass IOPoller:\n \"\"\"\n This class implements a predictive and timing-out I/O reader\n using select and the poll() mechanism for greater compatibility.\n \"\"\"\n\n def __init__(self, io, eventmask=select.POLLIN):\n \"\"\"\n Our default is to read (POLLIN) the specified 'io' file descriptor.\n \"\"\"\n self.io = io\n self.poller = select.poll()\n self.poller.register(io, eventmask)\n\n def readline(self, timeout=500):\n \"\"\"\n Try to read our I/O for 'timeout' milliseconds, return None otherwise.\n This makes calling and reading I/O non blocking !\n \"\"\"\n poll_result = self.poller.poll(timeout)\n if poll_result:\n line = self.io.readline().strip()\n if self.io == sys.stdin and line == '[':\n # skip first event line wrt issue #19\n line = self.io.readline().strip()\n try:\n # python3 compatibility code\n line = line.decode()\n except (AttributeError, UnicodeDecodeError):\n pass\n return line\n else:\n return None\n\n\nclass Events(Thread):\n \"\"\"\n This class is responsible for dispatching event JSONs sent by the i3bar.\n \"\"\"\n\n def __init__(self, py3_wrapper):\n \"\"\"\n We need to poll stdin to receive i3bar messages.\n \"\"\"\n Thread.__init__(self)\n self.config = py3_wrapper.config\n self.error = None\n self.i3s_config = py3_wrapper.i3status_thread.config\n self.last_refresh_ts = time()\n self.lock = py3_wrapper.lock\n self.modules = py3_wrapper.modules\n self.on_click = self.i3s_config['on_click']\n self.output_modules = py3_wrapper.output_modules\n self.poller_inp = IOPoller(sys.stdin)\n self.py3_wrapper = py3_wrapper\n\n def refresh(self, module_name):\n \"\"\"\n Force a cache expiration for all the methods of the given module.\n\n We rate limit the i3status refresh to 100ms.\n \"\"\"\n module = self.modules.get(module_name)\n if module is not None:\n if self.config['debug']:\n self.py3_wrapper.log('refresh module {}'.format(module_name))\n module.force_update()\n else:\n if time() > (self.last_refresh_ts + 0.1):\n if self.config['debug']:\n self.py3_wrapper.log(\n 'refresh i3status for module {}'.format(module_name))\n call(['killall', '-s', 'USR1', 'i3status'])\n self.last_refresh_ts = time()\n\n def refresh_all(self, module_name):\n \"\"\"\n Force a full refresh of py3status and i3status modules by sending\n a SIGUSR1 signal to py3status.\n\n We rate limit this command to 100ms for obvious abusive behavior.\n \"\"\"\n if time() > (self.last_refresh_ts + 0.1):\n call(['killall', '-s', 'USR1', 'py3status'])\n self.last_refresh_ts = time()\n\n def on_click_dispatcher(self, module_name, command):\n \"\"\"\n Dispatch on_click config parameters to either:\n - Our own methods for special py3status commands (listed below)\n - The i3-msg program which is part of i3wm\n \"\"\"\n py3_commands = ['refresh', 'refresh_all']\n if command is None:\n return\n elif command in py3_commands:\n # this is a py3status command handled by this class\n method = getattr(self, command)\n method(module_name)\n else:\n # this is a i3 message\n self.i3_msg(module_name, command)\n\n # to make the bar more responsive to users we ask for a refresh\n # of the module or of i3status if the module is an i3status one\n self.refresh(module_name)\n\n def i3_msg(self, module_name, command):\n \"\"\"\n Execute the given i3 message and log its output.\n \"\"\"\n i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)\n self.py3_wrapper.log('i3-msg module=\"{}\" command=\"{}\" stdout={}'.format(\n module_name, command, i3_msg_pipe.stdout.read()))\n\n def process_event(self, module_name, event, top_level=True):\n \"\"\"\n Process the event for the named module.\n Events may have been declared in i3status.conf, modules may have\n on_click() functions. There is a default middle click event etc.\n \"\"\"\n button = event.get('button', 0)\n default_event = False\n # execute any configured i3-msg command\n # we do not do this for containers\n if top_level:\n click_module = event['name']\n if event['instance']:\n click_module += ' ' + event['instance']\n btn = str(button)\n if self.on_click.get(click_module, {}).get(btn):\n self.on_click_dispatcher(click_module,\n self.on_click[module_name].get(btn))\n # otherwise setup default action on button 2 press\n elif button == 2:\n default_event = True\n\n # get the module that the event is for\n module_info = self.output_modules.get(module_name)\n module = module_info['module']\n # if module is a py3status one and it has an on_click function then\n # call it.\n if module_info['type'] == 'py3status' and module.click_events:\n module.click_event(event)\n if self.config['debug']:\n self.py3_wrapper.log('dispatching event {}'.format(event))\n\n # to make the bar more responsive to users we refresh the module\n # unless the on_click event called py3.prevent_refresh()\n if not module.prevent_refresh:\n self.refresh(module_name)\n default_event = False\n\n if default_event:\n # default button 2 action is to clear this method's cache\n if self.config['debug']:\n self.py3_wrapper.log(\n 'dispatching default event {}'.format(event))\n self.refresh(module_name)\n\n # find container that holds the module and call its onclick\n module_groups = self.i3s_config['.module_groups']\n containers = module_groups.get(module_name, [])\n for container in containers:\n self.process_event(container, event, top_level=False)\n\n @profile\n def run(self):\n \"\"\"\n Wait for an i3bar JSON event, then find the right module to dispatch\n the message to based on the 'name' and 'instance' of the event.\n\n In case the module does NOT support click_events, the default\n implementation is to clear the module's cache\n when the MIDDLE button (2) is pressed on it.\n\n Example event:\n {'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}\n \"\"\"\n while self.lock.is_set():\n event_str = self.poller_inp.readline()\n if not event_str:\n continue\n try:\n # remove leading comma if present\n if event_str[0] == ',':\n event_str = event_str[1:]\n event = loads(event_str)\n\n if self.config['debug']:\n self.py3_wrapper.log('received event {}'.format(event))\n\n # usage variables\n instance = event.get('instance', '')\n name = event.get('name', '')\n\n # composites have an index which is passed to i3bar with\n # the instance. We need to separate this out here and\n # clean up the event. If index\n # is an integer type then cast it as such.\n if ' ' in instance:\n instance, index = instance.split(' ', 1)\n try:\n index = int(index)\n except ValueError:\n pass\n event['index'] = index\n event['instance'] = instance\n\n if self.config['debug']:\n self.py3_wrapper.log(\n 'trying to dispatch event to module \"{}\"'.format(\n '{} {}'.format(name, instance).strip()))\n\n # guess the module config name\n module_name = '{} {}'.format(name, instance).strip()\n # do the work\n self.process_event(module_name, event)\n\n except Exception:\n err = sys.exc_info()[1]\n self.error = err\n self.py3_wrapper.log('event failed ({})'.format(err), 'warning')\n", "path": "py3status/events.py"}]}
2,706
202
gh_patches_debug_7523
rasdani/github-patches
git_diff
tensorflow__addons-758
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow passing in a tensor to tfa.optimizers.MovingAverage num_updates Currently, `tfa.optimizers.MovingAverage` has an assert that requires `num_updates` to be of type int, [shown here](https://github.com/tensorflow/addons/blob/604a70de563f8797984c9c3f002aff70bef6c90b/tensorflow_addons/optimizers/moving_average.py#L81). This prevents me from passing in an integer tensor that changes with the global step, which is officially supported by `tf.train.ExponentialMovingAverage`. Can this assert be updated to handle this use case? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tensorflow_addons/optimizers/moving_average.py` Content: ``` 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 from __future__ import absolute_import 17 from __future__ import division 18 from __future__ import print_function 19 20 import tensorflow as tf 21 22 23 @tf.keras.utils.register_keras_serializable(package='Addons') 24 class MovingAverage(tf.keras.optimizers.Optimizer): 25 """Optimizer that computes a moving average of the variables. 26 27 Empirically it has been found that using the moving average of the trained 28 parameters of a deep network is better than using its trained parameters 29 directly. This optimizer allows you to compute this moving average and swap 30 the variables at save time so that any code outside of the training loop 31 will use by default the average values instead of the original ones. 32 33 Example of usage: 34 35 ```python 36 opt = tf.keras.optimizers.SGD(learning_rate) 37 opt = tfa.optimizers.MovingAverage(opt) 38 39 ``` 40 """ 41 42 def __init__(self, 43 optimizer, 44 average_decay=0.1, 45 num_updates=None, 46 sequential_update=True, 47 name="MovingAverage", 48 **kwargs): 49 """Construct a new MovingAverage optimizer. 50 51 Args: 52 optimizer: str or `tf.keras.optimizers.Optimizer` that will be 53 used to compute and apply gradients. 54 average_decay: float. Decay to use to maintain the moving averages 55 of trained variables. See `tf.train.ExponentialMovingAverage` 56 for details. 57 num_updates: Optional count of the number of updates applied to 58 variables. See `tf.train.ExponentialMovingAverage` for details. 59 sequential_update: Bool. If False, will compute the moving average 60 at the same time as the model is updated, potentially doing 61 benign data races. If True, will update the moving average 62 after gradient updates. 63 name: Optional name for the operations created when applying 64 gradients. Defaults to "MovingAverage". 65 **kwargs: keyword arguments. Allowed to be {`clipnorm`, 66 `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by 67 norm; `clipvalue` is clip gradients by value, `decay` is 68 included for backward compatibility to allow time inverse 69 decay of learning rate. `lr` is included for backward 70 compatibility, recommended to use `learning_rate` instead. 71 """ 72 super(MovingAverage, self).__init__(name, **kwargs) 73 74 if isinstance(optimizer, str): 75 optimizer = tf.keras.optimizers.get(optimizer) 76 77 if not isinstance(optimizer, tf.keras.optimizers.Optimizer): 78 raise TypeError( 79 "optimizer is not an object of tf.keras.optimizers.Optimizer") 80 81 if num_updates is not None and not isinstance(num_updates, int): 82 raise TypeError("num_updates must be None or of integer type") 83 84 if not isinstance(sequential_update, bool): 85 raise TypeError("sequential_update must be of bool type") 86 87 with tf.name_scope(name): 88 self._ema = tf.train.ExponentialMovingAverage( 89 average_decay, num_updates=num_updates) 90 91 self._optimizer = optimizer 92 self._set_hyper("average_decay", average_decay) 93 self._num_updates = num_updates 94 self._sequential_update = sequential_update 95 self._initialized = False 96 97 def apply_gradients(self, grads_and_vars, name=None): 98 var_list = [v for (_, v) in grads_and_vars] 99 100 if tf.executing_eagerly() and not self._initialized: 101 # this to ensure that var_list is registered initially 102 self._ema.apply(var_list) 103 self._initialized = True 104 105 train_op = self._optimizer.apply_gradients(grads_and_vars, name=name) 106 107 if self._sequential_update: 108 with tf.control_dependencies([train_op]): 109 ma_op = self._ema.apply(var_list) 110 else: 111 ma_op = self._ema.apply(var_list) 112 113 return tf.group(train_op, ma_op, name="train_with_avg") 114 115 def get_config(self): 116 config = { 117 'optimizer': tf.keras.optimizers.serialize(self._optimizer), 118 'average_decay': self._serialize_hyperparameter('average_decay'), 119 'num_updates': self._num_updates, 120 'sequential_update': self._sequential_update 121 } 122 base_config = super(MovingAverage, self).get_config() 123 return dict(list(base_config.items()) + list(config.items())) 124 125 @classmethod 126 def from_config(cls, config, custom_objects=None): 127 optimizer = tf.keras.optimizers.deserialize( 128 config.pop('optimizer'), custom_objects=custom_objects) 129 return cls(optimizer, **config) 130 131 def assign_average_vars(self, var_list): 132 """Assign variables in var_list with their respective moving averages. 133 134 Example: 135 ```python 136 model = tf.Sequential([...]) 137 opt = tfa.optimizers.MovingAverage( 138 tf.keras.optimizers.SGD(lr=2.0), 0.5) 139 140 model.compile(opt, ...) 141 model.fit(x, y, ...) 142 143 # Update the weights to their mean before saving 144 opt.assign_average_vars(model.variables) 145 146 model.save('model.h5') 147 ``` 148 """ 149 assign = tf.group([v.assign(self._ema.average(v)) for v in var_list]) 150 return assign 151 152 @property 153 def weights(self): 154 return self._optimizer.weights 155 156 def _resource_apply_dense(self, grad, var): 157 return self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access 158 159 def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): 160 return self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access 161 grad, var, indices) 162 163 def _resource_apply_sparse(self, grad, var, indices): 164 return self._optimizer._resource_apply_sparse(grad, var, indices) # pylint: disable=protected-access 165 166 @property 167 def learning_rate(self): 168 return self._optimizer._get_hyper('learning_rate') 169 170 @learning_rate.setter 171 def learning_rate(self, learning_rate): 172 self._optimizer._set_hyper('learning_rate', learning_rate) 173 174 @property 175 def lr(self): 176 return self.learning_rate 177 178 @lr.setter 179 def lr(self, lr): 180 self.learning_rate = lr 181 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py --- a/tensorflow_addons/optimizers/moving_average.py +++ b/tensorflow_addons/optimizers/moving_average.py @@ -78,9 +78,6 @@ raise TypeError( "optimizer is not an object of tf.keras.optimizers.Optimizer") - if num_updates is not None and not isinstance(num_updates, int): - raise TypeError("num_updates must be None or of integer type") - if not isinstance(sequential_update, bool): raise TypeError("sequential_update must be of bool type")
{"golden_diff": "diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py\n--- a/tensorflow_addons/optimizers/moving_average.py\n+++ b/tensorflow_addons/optimizers/moving_average.py\n@@ -78,9 +78,6 @@\n raise TypeError(\n \"optimizer is not an object of tf.keras.optimizers.Optimizer\")\n \n- if num_updates is not None and not isinstance(num_updates, int):\n- raise TypeError(\"num_updates must be None or of integer type\")\n-\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n", "issue": "Allow passing in a tensor to tfa.optimizers.MovingAverage num_updates\nCurrently, `tfa.optimizers.MovingAverage` has an assert that requires `num_updates` to be of type int, [shown here](https://github.com/tensorflow/addons/blob/604a70de563f8797984c9c3f002aff70bef6c90b/tensorflow_addons/optimizers/moving_average.py#L81). This prevents me from passing in an integer tensor that changes with the global step, which is officially supported by `tf.train.ExponentialMovingAverage`.\r\n\r\nCan this assert be updated to handle this use case?\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass MovingAverage(tf.keras.optimizers.Optimizer):\n \"\"\"Optimizer that computes a moving average of the variables.\n\n Empirically it has been found that using the moving average of the trained\n parameters of a deep network is better than using its trained parameters\n directly. This optimizer allows you to compute this moving average and swap\n the variables at save time so that any code outside of the training loop\n will use by default the average values instead of the original ones.\n\n Example of usage:\n\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.MovingAverage(opt)\n\n ```\n \"\"\"\n\n def __init__(self,\n optimizer,\n average_decay=0.1,\n num_updates=None,\n sequential_update=True,\n name=\"MovingAverage\",\n **kwargs):\n \"\"\"Construct a new MovingAverage optimizer.\n\n Args:\n optimizer: str or `tf.keras.optimizers.Optimizer` that will be\n used to compute and apply gradients.\n average_decay: float. Decay to use to maintain the moving averages\n of trained variables. See `tf.train.ExponentialMovingAverage`\n for details.\n num_updates: Optional count of the number of updates applied to\n variables. See `tf.train.ExponentialMovingAverage` for details.\n sequential_update: Bool. If False, will compute the moving average\n at the same time as the model is updated, potentially doing\n benign data races. If True, will update the moving average\n after gradient updates.\n name: Optional name for the operations created when applying\n gradients. Defaults to \"MovingAverage\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by\n norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n \"\"\"\n super(MovingAverage, self).__init__(name, **kwargs)\n\n if isinstance(optimizer, str):\n optimizer = tf.keras.optimizers.get(optimizer)\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n \"optimizer is not an object of tf.keras.optimizers.Optimizer\")\n\n if num_updates is not None and not isinstance(num_updates, int):\n raise TypeError(\"num_updates must be None or of integer type\")\n\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n\n with tf.name_scope(name):\n self._ema = tf.train.ExponentialMovingAverage(\n average_decay, num_updates=num_updates)\n\n self._optimizer = optimizer\n self._set_hyper(\"average_decay\", average_decay)\n self._num_updates = num_updates\n self._sequential_update = sequential_update\n self._initialized = False\n\n def apply_gradients(self, grads_and_vars, name=None):\n var_list = [v for (_, v) in grads_and_vars]\n\n if tf.executing_eagerly() and not self._initialized:\n # this to ensure that var_list is registered initially\n self._ema.apply(var_list)\n self._initialized = True\n\n train_op = self._optimizer.apply_gradients(grads_and_vars, name=name)\n\n if self._sequential_update:\n with tf.control_dependencies([train_op]):\n ma_op = self._ema.apply(var_list)\n else:\n ma_op = self._ema.apply(var_list)\n\n return tf.group(train_op, ma_op, name=\"train_with_avg\")\n\n def get_config(self):\n config = {\n 'optimizer': tf.keras.optimizers.serialize(self._optimizer),\n 'average_decay': self._serialize_hyperparameter('average_decay'),\n 'num_updates': self._num_updates,\n 'sequential_update': self._sequential_update\n }\n base_config = super(MovingAverage, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop('optimizer'), custom_objects=custom_objects)\n return cls(optimizer, **config)\n\n def assign_average_vars(self, var_list):\n \"\"\"Assign variables in var_list with their respective moving averages.\n\n Example:\n ```python\n model = tf.Sequential([...])\n opt = tfa.optimizers.MovingAverage(\n tf.keras.optimizers.SGD(lr=2.0), 0.5)\n\n model.compile(opt, ...)\n model.fit(x, y, ...)\n\n # Update the weights to their mean before saving\n opt.assign_average_vars(model.variables)\n\n model.save('model.h5')\n ```\n \"\"\"\n assign = tf.group([v.assign(self._ema.average(v)) for v in var_list])\n return assign\n\n @property\n def weights(self):\n return self._optimizer.weights\n\n def _resource_apply_dense(self, grad, var):\n return self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access\n grad, var, indices)\n\n def _resource_apply_sparse(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse(grad, var, indices) # pylint: disable=protected-access\n\n @property\n def learning_rate(self):\n return self._optimizer._get_hyper('learning_rate')\n\n @learning_rate.setter\n def learning_rate(self, learning_rate):\n self._optimizer._set_hyper('learning_rate', learning_rate)\n\n @property\n def lr(self):\n return self.learning_rate\n\n @lr.setter\n def lr(self, lr):\n self.learning_rate = lr\n", "path": "tensorflow_addons/optimizers/moving_average.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass MovingAverage(tf.keras.optimizers.Optimizer):\n \"\"\"Optimizer that computes a moving average of the variables.\n\n Empirically it has been found that using the moving average of the trained\n parameters of a deep network is better than using its trained parameters\n directly. This optimizer allows you to compute this moving average and swap\n the variables at save time so that any code outside of the training loop\n will use by default the average values instead of the original ones.\n\n Example of usage:\n\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.MovingAverage(opt)\n\n ```\n \"\"\"\n\n def __init__(self,\n optimizer,\n average_decay=0.1,\n num_updates=None,\n sequential_update=True,\n name=\"MovingAverage\",\n **kwargs):\n \"\"\"Construct a new MovingAverage optimizer.\n\n Args:\n optimizer: str or `tf.keras.optimizers.Optimizer` that will be\n used to compute and apply gradients.\n average_decay: float. Decay to use to maintain the moving averages\n of trained variables. See `tf.train.ExponentialMovingAverage`\n for details.\n num_updates: Optional count of the number of updates applied to\n variables. See `tf.train.ExponentialMovingAverage` for details.\n sequential_update: Bool. If False, will compute the moving average\n at the same time as the model is updated, potentially doing\n benign data races. If True, will update the moving average\n after gradient updates.\n name: Optional name for the operations created when applying\n gradients. Defaults to \"MovingAverage\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by\n norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n \"\"\"\n super(MovingAverage, self).__init__(name, **kwargs)\n\n if isinstance(optimizer, str):\n optimizer = tf.keras.optimizers.get(optimizer)\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise TypeError(\n \"optimizer is not an object of tf.keras.optimizers.Optimizer\")\n\n if not isinstance(sequential_update, bool):\n raise TypeError(\"sequential_update must be of bool type\")\n\n with tf.name_scope(name):\n self._ema = tf.train.ExponentialMovingAverage(\n average_decay, num_updates=num_updates)\n\n self._optimizer = optimizer\n self._set_hyper(\"average_decay\", average_decay)\n self._num_updates = num_updates\n self._sequential_update = sequential_update\n self._initialized = False\n\n def apply_gradients(self, grads_and_vars, name=None):\n var_list = [v for (_, v) in grads_and_vars]\n\n if tf.executing_eagerly() and not self._initialized:\n # this to ensure that var_list is registered initially\n self._ema.apply(var_list)\n self._initialized = True\n\n train_op = self._optimizer.apply_gradients(grads_and_vars, name=name)\n\n if self._sequential_update:\n with tf.control_dependencies([train_op]):\n ma_op = self._ema.apply(var_list)\n else:\n ma_op = self._ema.apply(var_list)\n\n return tf.group(train_op, ma_op, name=\"train_with_avg\")\n\n def get_config(self):\n config = {\n 'optimizer': tf.keras.optimizers.serialize(self._optimizer),\n 'average_decay': self._serialize_hyperparameter('average_decay'),\n 'num_updates': self._num_updates,\n 'sequential_update': self._sequential_update\n }\n base_config = super(MovingAverage, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n optimizer = tf.keras.optimizers.deserialize(\n config.pop('optimizer'), custom_objects=custom_objects)\n return cls(optimizer, **config)\n\n def assign_average_vars(self, var_list):\n \"\"\"Assign variables in var_list with their respective moving averages.\n\n Example:\n ```python\n model = tf.Sequential([...])\n opt = tfa.optimizers.MovingAverage(\n tf.keras.optimizers.SGD(lr=2.0), 0.5)\n\n model.compile(opt, ...)\n model.fit(x, y, ...)\n\n # Update the weights to their mean before saving\n opt.assign_average_vars(model.variables)\n\n model.save('model.h5')\n ```\n \"\"\"\n assign = tf.group([v.assign(self._ema.average(v)) for v in var_list])\n return assign\n\n @property\n def weights(self):\n return self._optimizer.weights\n\n def _resource_apply_dense(self, grad, var):\n return self._optimizer._resource_apply_dense(grad, var) # pylint: disable=protected-access\n\n def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse_duplicate_indices( # pylint: disable=protected-access\n grad, var, indices)\n\n def _resource_apply_sparse(self, grad, var, indices):\n return self._optimizer._resource_apply_sparse(grad, var, indices) # pylint: disable=protected-access\n\n @property\n def learning_rate(self):\n return self._optimizer._get_hyper('learning_rate')\n\n @learning_rate.setter\n def learning_rate(self, learning_rate):\n self._optimizer._set_hyper('learning_rate', learning_rate)\n\n @property\n def lr(self):\n return self.learning_rate\n\n @lr.setter\n def lr(self, lr):\n self.learning_rate = lr\n", "path": "tensorflow_addons/optimizers/moving_average.py"}]}
2,317
147
gh_patches_debug_1689
rasdani/github-patches
git_diff
ibis-project__ibis-3630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bug(duckdb): duckdb backend should add in CAST for some bind parameters DuckDB casts bind parameters `?` to strings which leads to binder errors with some queries If we have a small tpch dataset: ```python import duckdb con = duckdb.connect("tpch.ddb") con.execute("CALL dbgen(sf=0.1)") import ibis con = ibis.duckdb.connect("tpch.ddb") t = con.table('orders') expr = t.aggregate(high_line_count=(t.o_orderpriority.case().when('1-URGENT', 1).else_(0).end().sum() expr.execute() ``` raises ``` RuntimeError: Binder Error: No function matches the given name and argument types 'sum(VARCHAR)'. You might need to add explicit type casts. Candidate functions: sum(DECIMAL) -> DECIMAL sum(SMALLINT) -> HUGEINT sum(INTEGER) -> HUGEINT sum(BIGINT) -> HUGEINT sum(HUGEINT) -> HUGEINT sum(DOUBLE) -> DOUBLE LINE 1: SELECT sum(CASE WHEN (t0.o_orderpriority = ?) ... ``` because our generated SQL doesn't have explicit casts: ``` print(expr.compile()) SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN ? ELSE ? END) AS high_line_count FROM orders AS t0 ``` we want to generate ``` SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN cast(? as INTEGER) ELSE cast(? as INTEGER) END) AS high_line_count FROM orders as t0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ibis/backends/duckdb/registry.py` Content: ``` 1 import collections 2 import operator 3 4 import numpy as np 5 import sqlalchemy as sa 6 7 import ibis.expr.datatypes as dt 8 import ibis.expr.operations as ops 9 from ibis.backends.base.sql.alchemy import to_sqla_type, unary 10 11 from ..base.sql.alchemy.registry import _geospatial_functions, _table_column 12 from ..postgres.registry import fixed_arity, operation_registry 13 14 operation_registry = { 15 op: operation_registry[op] 16 # duckdb does not support geospatial operations, but shares most of the 17 # remaining postgres rules 18 for op in operation_registry.keys() - _geospatial_functions.keys() 19 } 20 21 22 def _round(t, expr): 23 arg, digits = expr.op().args 24 sa_arg = t.translate(arg) 25 26 if digits is None: 27 return sa.func.round(sa_arg) 28 29 return sa.func.round(sa_arg, t.translate(digits)) 30 31 32 _LOG_BASE_FUNCS = { 33 2: sa.func.log2, 34 10: sa.func.log, 35 } 36 37 38 def _generic_log(arg, base): 39 return sa.func.ln(arg) / sa.func.ln(base) 40 41 42 def _log(t, expr): 43 arg, base = expr.op().args 44 sa_arg = t.translate(arg) 45 if base is not None: 46 sa_base = t.translate(base) 47 try: 48 base_value = sa_base.value 49 except AttributeError: 50 return _generic_log(sa_arg, sa_base) 51 else: 52 func = _LOG_BASE_FUNCS.get(base_value, _generic_log) 53 return func(sa_arg) 54 return sa.func.ln(sa_arg) 55 56 57 def _timestamp_from_unix(t, expr): 58 op = expr.op() 59 arg, unit = op.args 60 arg = t.translate(arg) 61 62 if unit in {"us", "ns"}: 63 raise ValueError(f"`{unit}` unit is not supported!") 64 65 if unit == "ms": 66 return sa.func.epoch_ms(arg) 67 elif unit == "s": 68 return sa.func.to_timestamp(arg) 69 70 71 def _literal(_, expr): 72 dtype = expr.type() 73 sqla_type = to_sqla_type(dtype) 74 op = expr.op() 75 value = op.value 76 77 if isinstance(dtype, dt.Interval): 78 return sa.text(f"INTERVAL '{value} {dtype.resolution}'") 79 elif isinstance(dtype, dt.Set) or ( 80 isinstance(value, collections.abc.Sequence) 81 and not isinstance(value, str) 82 ): 83 return sa.cast(sa.func.list_value(*value), sqla_type) 84 elif isinstance(value, np.ndarray): 85 return sa.cast(sa.func.list_value(*value.tolist()), sqla_type) 86 elif isinstance(value, collections.abc.Mapping): 87 if isinstance(dtype, dt.Struct): 88 placeholders = ", ".join( 89 f"{key!r}: :v{i}" for i, key in enumerate(value.keys()) 90 ) 91 return sa.text(f"{{{placeholders}}}").bindparams( 92 *( 93 sa.bindparam(f"v{i:d}", val) 94 for i, val in enumerate(value.values()) 95 ) 96 ) 97 raise NotImplementedError( 98 f"Ibis dtype `{dtype}` with mapping type " 99 f"`{type(value).__name__}` isn't yet supported with the duckdb " 100 "backend" 101 ) 102 return sa.literal(value) 103 104 105 def _array_column(t, expr): 106 (arg,) = expr.op().args 107 sqla_type = to_sqla_type(expr.type()) 108 return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type) 109 110 111 def _struct_field(t, expr): 112 op = expr.op() 113 return sa.func.struct_extract( 114 t.translate(op.arg), 115 sa.text(repr(op.field)), 116 type_=to_sqla_type(expr.type()), 117 ) 118 119 120 def _regex_extract(t, expr): 121 string, pattern, index = map(t.translate, expr.op().args) 122 result = sa.case( 123 [ 124 ( 125 sa.func.regexp_matches(string, pattern), 126 sa.func.regexp_extract( 127 string, 128 pattern, 129 # DuckDB requires the index to be a constant so we compile 130 # the value and inline it using sa.text 131 sa.text( 132 str( 133 (index + 1).compile( 134 compile_kwargs=dict(literal_binds=True) 135 ) 136 ) 137 ), 138 ), 139 ) 140 ], 141 else_="", 142 ) 143 return result 144 145 146 operation_registry.update( 147 { 148 ops.ArrayColumn: _array_column, 149 ops.ArrayConcat: fixed_arity('array_concat', 2), 150 ops.ArrayIndex: fixed_arity('list_element', 2), 151 ops.DayOfWeekName: unary(sa.func.dayname), 152 ops.Literal: _literal, 153 ops.Log2: unary(sa.func.log2), 154 ops.Ln: unary(sa.func.ln), 155 ops.Log: _log, 156 # TODO: map operations, but DuckDB's maps are multimaps 157 ops.Modulus: fixed_arity(operator.mod, 2), 158 ops.Round: _round, 159 ops.StructField: _struct_field, 160 ops.TableColumn: _table_column, 161 ops.TimestampDiff: fixed_arity('age', 2), 162 ops.TimestampFromUNIX: _timestamp_from_unix, 163 ops.Translate: fixed_arity('replace', 3), 164 ops.TimestampNow: fixed_arity('now', 0), 165 ops.RegexExtract: _regex_extract, 166 ops.RegexReplace: fixed_arity("regexp_replace", 3), 167 } 168 ) 169 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ibis/backends/duckdb/registry.py b/ibis/backends/duckdb/registry.py --- a/ibis/backends/duckdb/registry.py +++ b/ibis/backends/duckdb/registry.py @@ -99,7 +99,7 @@ f"`{type(value).__name__}` isn't yet supported with the duckdb " "backend" ) - return sa.literal(value) + return sa.cast(sa.literal(value), sqla_type) def _array_column(t, expr):
{"golden_diff": "diff --git a/ibis/backends/duckdb/registry.py b/ibis/backends/duckdb/registry.py\n--- a/ibis/backends/duckdb/registry.py\n+++ b/ibis/backends/duckdb/registry.py\n@@ -99,7 +99,7 @@\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n- return sa.literal(value)\n+ return sa.cast(sa.literal(value), sqla_type)\n \n \n def _array_column(t, expr):\n", "issue": "bug(duckdb): duckdb backend should add in CAST for some bind parameters\nDuckDB casts bind parameters `?` to strings which leads to binder errors with some queries\r\n\r\nIf we have a small tpch dataset:\r\n\r\n```python\r\nimport duckdb\r\ncon = duckdb.connect(\"tpch.ddb\")\r\ncon.execute(\"CALL dbgen(sf=0.1)\")\r\n\r\nimport ibis\r\ncon = ibis.duckdb.connect(\"tpch.ddb\")\r\n\r\nt = con.table('orders')\r\nexpr = t.aggregate(high_line_count=(t.o_orderpriority.case().when('1-URGENT', 1).else_(0).end().sum()\r\n\r\nexpr.execute()\r\n```\r\n\r\nraises\r\n\r\n```\r\nRuntimeError: Binder Error: No function matches the given name and argument types 'sum(VARCHAR)'. You might need to add explicit type casts.\r\n\tCandidate functions:\r\n\tsum(DECIMAL) -> DECIMAL\r\n\tsum(SMALLINT) -> HUGEINT\r\n\tsum(INTEGER) -> HUGEINT\r\n\tsum(BIGINT) -> HUGEINT\r\n\tsum(HUGEINT) -> HUGEINT\r\n\tsum(DOUBLE) -> DOUBLE\r\n\r\nLINE 1: SELECT sum(CASE WHEN (t0.o_orderpriority = ?) ...\r\n```\r\n\r\nbecause our generated SQL doesn't have explicit casts:\r\n\r\n```\r\nprint(expr.compile())\r\nSELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN ? ELSE ? END) AS high_line_count \r\nFROM orders AS t0\r\n```\r\n\r\nwe want to generate\r\n\r\n```\r\nSELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN cast(? as INTEGER) ELSE cast(? as INTEGER) END) AS high_line_count FROM orders as t0\r\n```\n", "before_files": [{"content": "import collections\nimport operator\n\nimport numpy as np\nimport sqlalchemy as sa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.backends.base.sql.alchemy import to_sqla_type, unary\n\nfrom ..base.sql.alchemy.registry import _geospatial_functions, _table_column\nfrom ..postgres.registry import fixed_arity, operation_registry\n\noperation_registry = {\n op: operation_registry[op]\n # duckdb does not support geospatial operations, but shares most of the\n # remaining postgres rules\n for op in operation_registry.keys() - _geospatial_functions.keys()\n}\n\n\ndef _round(t, expr):\n arg, digits = expr.op().args\n sa_arg = t.translate(arg)\n\n if digits is None:\n return sa.func.round(sa_arg)\n\n return sa.func.round(sa_arg, t.translate(digits))\n\n\n_LOG_BASE_FUNCS = {\n 2: sa.func.log2,\n 10: sa.func.log,\n}\n\n\ndef _generic_log(arg, base):\n return sa.func.ln(arg) / sa.func.ln(base)\n\n\ndef _log(t, expr):\n arg, base = expr.op().args\n sa_arg = t.translate(arg)\n if base is not None:\n sa_base = t.translate(base)\n try:\n base_value = sa_base.value\n except AttributeError:\n return _generic_log(sa_arg, sa_base)\n else:\n func = _LOG_BASE_FUNCS.get(base_value, _generic_log)\n return func(sa_arg)\n return sa.func.ln(sa_arg)\n\n\ndef _timestamp_from_unix(t, expr):\n op = expr.op()\n arg, unit = op.args\n arg = t.translate(arg)\n\n if unit in {\"us\", \"ns\"}:\n raise ValueError(f\"`{unit}` unit is not supported!\")\n\n if unit == \"ms\":\n return sa.func.epoch_ms(arg)\n elif unit == \"s\":\n return sa.func.to_timestamp(arg)\n\n\ndef _literal(_, expr):\n dtype = expr.type()\n sqla_type = to_sqla_type(dtype)\n op = expr.op()\n value = op.value\n\n if isinstance(dtype, dt.Interval):\n return sa.text(f\"INTERVAL '{value} {dtype.resolution}'\")\n elif isinstance(dtype, dt.Set) or (\n isinstance(value, collections.abc.Sequence)\n and not isinstance(value, str)\n ):\n return sa.cast(sa.func.list_value(*value), sqla_type)\n elif isinstance(value, np.ndarray):\n return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)\n elif isinstance(value, collections.abc.Mapping):\n if isinstance(dtype, dt.Struct):\n placeholders = \", \".join(\n f\"{key!r}: :v{i}\" for i, key in enumerate(value.keys())\n )\n return sa.text(f\"{{{placeholders}}}\").bindparams(\n *(\n sa.bindparam(f\"v{i:d}\", val)\n for i, val in enumerate(value.values())\n )\n )\n raise NotImplementedError(\n f\"Ibis dtype `{dtype}` with mapping type \"\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n return sa.literal(value)\n\n\ndef _array_column(t, expr):\n (arg,) = expr.op().args\n sqla_type = to_sqla_type(expr.type())\n return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)\n\n\ndef _struct_field(t, expr):\n op = expr.op()\n return sa.func.struct_extract(\n t.translate(op.arg),\n sa.text(repr(op.field)),\n type_=to_sqla_type(expr.type()),\n )\n\n\ndef _regex_extract(t, expr):\n string, pattern, index = map(t.translate, expr.op().args)\n result = sa.case(\n [\n (\n sa.func.regexp_matches(string, pattern),\n sa.func.regexp_extract(\n string,\n pattern,\n # DuckDB requires the index to be a constant so we compile\n # the value and inline it using sa.text\n sa.text(\n str(\n (index + 1).compile(\n compile_kwargs=dict(literal_binds=True)\n )\n )\n ),\n ),\n )\n ],\n else_=\"\",\n )\n return result\n\n\noperation_registry.update(\n {\n ops.ArrayColumn: _array_column,\n ops.ArrayConcat: fixed_arity('array_concat', 2),\n ops.ArrayIndex: fixed_arity('list_element', 2),\n ops.DayOfWeekName: unary(sa.func.dayname),\n ops.Literal: _literal,\n ops.Log2: unary(sa.func.log2),\n ops.Ln: unary(sa.func.ln),\n ops.Log: _log,\n # TODO: map operations, but DuckDB's maps are multimaps\n ops.Modulus: fixed_arity(operator.mod, 2),\n ops.Round: _round,\n ops.StructField: _struct_field,\n ops.TableColumn: _table_column,\n ops.TimestampDiff: fixed_arity('age', 2),\n ops.TimestampFromUNIX: _timestamp_from_unix,\n ops.Translate: fixed_arity('replace', 3),\n ops.TimestampNow: fixed_arity('now', 0),\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: fixed_arity(\"regexp_replace\", 3),\n }\n)\n", "path": "ibis/backends/duckdb/registry.py"}], "after_files": [{"content": "import collections\nimport operator\n\nimport numpy as np\nimport sqlalchemy as sa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.backends.base.sql.alchemy import to_sqla_type, unary\n\nfrom ..base.sql.alchemy.registry import _geospatial_functions, _table_column\nfrom ..postgres.registry import fixed_arity, operation_registry\n\noperation_registry = {\n op: operation_registry[op]\n # duckdb does not support geospatial operations, but shares most of the\n # remaining postgres rules\n for op in operation_registry.keys() - _geospatial_functions.keys()\n}\n\n\ndef _round(t, expr):\n arg, digits = expr.op().args\n sa_arg = t.translate(arg)\n\n if digits is None:\n return sa.func.round(sa_arg)\n\n return sa.func.round(sa_arg, t.translate(digits))\n\n\n_LOG_BASE_FUNCS = {\n 2: sa.func.log2,\n 10: sa.func.log,\n}\n\n\ndef _generic_log(arg, base):\n return sa.func.ln(arg) / sa.func.ln(base)\n\n\ndef _log(t, expr):\n arg, base = expr.op().args\n sa_arg = t.translate(arg)\n if base is not None:\n sa_base = t.translate(base)\n try:\n base_value = sa_base.value\n except AttributeError:\n return _generic_log(sa_arg, sa_base)\n else:\n func = _LOG_BASE_FUNCS.get(base_value, _generic_log)\n return func(sa_arg)\n return sa.func.ln(sa_arg)\n\n\ndef _timestamp_from_unix(t, expr):\n op = expr.op()\n arg, unit = op.args\n arg = t.translate(arg)\n\n if unit in {\"us\", \"ns\"}:\n raise ValueError(f\"`{unit}` unit is not supported!\")\n\n if unit == \"ms\":\n return sa.func.epoch_ms(arg)\n elif unit == \"s\":\n return sa.func.to_timestamp(arg)\n\n\ndef _literal(_, expr):\n dtype = expr.type()\n sqla_type = to_sqla_type(dtype)\n op = expr.op()\n value = op.value\n\n if isinstance(dtype, dt.Interval):\n return sa.text(f\"INTERVAL '{value} {dtype.resolution}'\")\n elif isinstance(dtype, dt.Set) or (\n isinstance(value, collections.abc.Sequence)\n and not isinstance(value, str)\n ):\n return sa.cast(sa.func.list_value(*value), sqla_type)\n elif isinstance(value, np.ndarray):\n return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)\n elif isinstance(value, collections.abc.Mapping):\n if isinstance(dtype, dt.Struct):\n placeholders = \", \".join(\n f\"{key!r}: :v{i}\" for i, key in enumerate(value.keys())\n )\n return sa.text(f\"{{{placeholders}}}\").bindparams(\n *(\n sa.bindparam(f\"v{i:d}\", val)\n for i, val in enumerate(value.values())\n )\n )\n raise NotImplementedError(\n f\"Ibis dtype `{dtype}` with mapping type \"\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n return sa.cast(sa.literal(value), sqla_type)\n\n\ndef _array_column(t, expr):\n (arg,) = expr.op().args\n sqla_type = to_sqla_type(expr.type())\n return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)\n\n\ndef _struct_field(t, expr):\n op = expr.op()\n return sa.func.struct_extract(\n t.translate(op.arg),\n sa.text(repr(op.field)),\n type_=to_sqla_type(expr.type()),\n )\n\n\ndef _regex_extract(t, expr):\n string, pattern, index = map(t.translate, expr.op().args)\n result = sa.case(\n [\n (\n sa.func.regexp_matches(string, pattern),\n sa.func.regexp_extract(\n string,\n pattern,\n # DuckDB requires the index to be a constant so we compile\n # the value and inline it using sa.text\n sa.text(\n str(\n (index + 1).compile(\n compile_kwargs=dict(literal_binds=True)\n )\n )\n ),\n ),\n )\n ],\n else_=\"\",\n )\n return result\n\n\noperation_registry.update(\n {\n ops.ArrayColumn: _array_column,\n ops.ArrayConcat: fixed_arity('array_concat', 2),\n ops.ArrayIndex: fixed_arity('list_element', 2),\n ops.DayOfWeekName: unary(sa.func.dayname),\n ops.Literal: _literal,\n ops.Log2: unary(sa.func.log2),\n ops.Ln: unary(sa.func.ln),\n ops.Log: _log,\n # TODO: map operations, but DuckDB's maps are multimaps\n ops.Modulus: fixed_arity(operator.mod, 2),\n ops.Round: _round,\n ops.StructField: _struct_field,\n ops.TableColumn: _table_column,\n ops.TimestampDiff: fixed_arity('age', 2),\n ops.TimestampFromUNIX: _timestamp_from_unix,\n ops.Translate: fixed_arity('replace', 3),\n ops.TimestampNow: fixed_arity('now', 0),\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: fixed_arity(\"regexp_replace\", 3),\n }\n)\n", "path": "ibis/backends/duckdb/registry.py"}]}
2,176
122
gh_patches_debug_37493
rasdani/github-patches
git_diff
localstack__localstack-2593
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Kinesis stream records do not offer AWS-defined optional values <!-- Love localstack? Please consider supporting our collective: 👉 https://opencollective.com/localstack/donate --> # Type of request: This is a ... [ ] bug report [x] feature request # Detailed description Kinesis does not populate/offer to populate all potential values in a record. I have a lambda being triggered by kinesis events. The records being passed in are only those required by AWS, but there are no optional values being used. ## Expected behavior An event's kinesis object should be configurable to look as follows ``` { "ApproximateArrivalTimestamp": number, "Data": blob, "EncryptionType": "string", "PartitionKey": "string", "SequenceNumber": "string" } ``` ## Actual behavior In its current state, only required values are present. ``` { "Data": blob, "PartitionKey": "string", "SequenceNumber": "string" } ``` # Steps to reproduce ## Command used to start LocalStack `docker-compose up` with given `docker-compose.yml` ## Client code (AWS SDK code snippet, or sequence of "awslocal" commands) The source of this issue/feature request lies in the kinesis listener. AWS documentation of a kinesis stream record lists all values here: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Record.html We are only returning the required values. https://github.com/localstack/localstack/blob/78031dd65da9394f8b1b020be01ef02c63c433ee/localstack/services/kinesis/kinesis_listener.py#L85-L94 If there is no implicit reason for not offering optional values, I'm more than happy to submit a PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/services/kinesis/kinesis_listener.py` Content: ``` 1 import json 2 import random 3 from datetime import datetime 4 from requests.models import Response 5 from localstack import config 6 from localstack.utils.common import to_str, json_safe, clone 7 from localstack.utils.analytics import event_publisher 8 from localstack.services.awslambda import lambda_api 9 from localstack.services.generic_proxy import ProxyListener 10 11 # action headers 12 ACTION_PREFIX = 'Kinesis_20131202' 13 ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX 14 ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX 15 ACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX 16 ACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX 17 ACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX 18 ACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX 19 20 # list of stream consumer details 21 STREAM_CONSUMERS = [] 22 23 24 class ProxyListenerKinesis(ProxyListener): 25 26 def forward_request(self, method, path, data, headers): 27 global STREAM_CONSUMERS 28 data = json.loads(to_str(data or '{}')) 29 action = headers.get('X-Amz-Target') 30 31 if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX: 32 consumer = clone(data) 33 consumer['ConsumerStatus'] = 'ACTIVE' 34 consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName']) 35 consumer['ConsumerCreationTimestamp'] = datetime.now() 36 consumer = json_safe(consumer) 37 STREAM_CONSUMERS.append(consumer) 38 return {'Consumer': consumer} 39 elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX: 40 def consumer_matches(c): 41 stream_arn = data.get('StreamARN') 42 cons_name = data.get('ConsumerName') 43 cons_arn = data.get('ConsumerARN') 44 return (c.get('ConsumerARN') == cons_arn or 45 (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name)) 46 STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)] 47 return {} 48 elif action == '%s.ListStreamConsumers' % ACTION_PREFIX: 49 result = { 50 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')] 51 } 52 return result 53 elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX: 54 consumer_arn = data.get('ConsumerARN') or data['ConsumerName'] 55 consumer_name = data.get('ConsumerName') or data['ConsumerARN'] 56 result = { 57 'ConsumerDescription': { 58 'ConsumerARN': consumer_arn, 59 # 'ConsumerCreationTimestamp': number, 60 'ConsumerName': consumer_name, 61 'ConsumerStatus': 'ACTIVE', 62 'StreamARN': data.get('StreamARN') 63 } 64 } 65 return result 66 67 if random.random() < config.KINESIS_ERROR_PROBABILITY: 68 action = headers.get('X-Amz-Target') 69 if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]: 70 return kinesis_error_response(data, action) 71 return True 72 73 def return_response(self, method, path, data, headers, response): 74 action = headers.get('X-Amz-Target') 75 data = json.loads(to_str(data or '{}')) 76 77 records = [] 78 if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM): 79 event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM 80 else event_publisher.EVENT_KINESIS_DELETE_STREAM) 81 payload = {'n': event_publisher.get_hash(data.get('StreamName'))} 82 if action == ACTION_CREATE_STREAM: 83 payload['s'] = data.get('ShardCount') 84 event_publisher.fire_event(event_type, payload=payload) 85 elif action == ACTION_PUT_RECORD: 86 response_body = json.loads(to_str(response.content)) 87 event_record = { 88 'data': data['Data'], 89 'partitionKey': data['PartitionKey'], 90 'sequenceNumber': response_body.get('SequenceNumber') 91 } 92 event_records = [event_record] 93 stream_name = data['StreamName'] 94 lambda_api.process_kinesis_records(event_records, stream_name) 95 elif action == ACTION_PUT_RECORDS: 96 event_records = [] 97 response_body = json.loads(to_str(response.content)) 98 if 'Records' in response_body: 99 response_records = response_body['Records'] 100 records = data['Records'] 101 for index in range(0, len(records)): 102 record = records[index] 103 event_record = { 104 'data': record['Data'], 105 'partitionKey': record['PartitionKey'], 106 'sequenceNumber': response_records[index].get('SequenceNumber') 107 } 108 event_records.append(event_record) 109 stream_name = data['StreamName'] 110 lambda_api.process_kinesis_records(event_records, stream_name) 111 elif action == ACTION_UPDATE_SHARD_COUNT: 112 # Currently kinesalite, which backs the Kinesis implementation for localstack, does 113 # not support UpdateShardCount: 114 # https://github.com/mhart/kinesalite/issues/61 115 # 116 # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it 117 # applies Kinesis resources. A Terraform run fails when this is not present. 118 # 119 # The code that follows just returns a successful response, bypassing the 400 120 # response that kinesalite returns. 121 # 122 response = Response() 123 response.status_code = 200 124 content = { 125 'CurrentShardCount': 1, 126 'StreamName': data['StreamName'], 127 'TargetShardCount': data['TargetShardCount'] 128 } 129 response.encoding = 'UTF-8' 130 response._content = json.dumps(content) 131 return response 132 133 134 # instantiate listener 135 UPDATE_KINESIS = ProxyListenerKinesis() 136 137 138 def kinesis_error_response(data, action): 139 error_response = Response() 140 141 if action == ACTION_PUT_RECORD: 142 error_response.status_code = 400 143 content = { 144 'ErrorCode': 'ProvisionedThroughputExceededException', 145 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' 146 } 147 else: 148 error_response.status_code = 200 149 content = {'FailedRecordCount': 1, 'Records': []} 150 for record in data.get('Records', []): 151 content['Records'].append({ 152 'ErrorCode': 'ProvisionedThroughputExceededException', 153 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' 154 }) 155 156 error_response._content = json.dumps(content) 157 return error_response 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py --- a/localstack/services/kinesis/kinesis_listener.py +++ b/localstack/services/kinesis/kinesis_listener.py @@ -1,9 +1,8 @@ import json import random -from datetime import datetime from requests.models import Response from localstack import config -from localstack.utils.common import to_str, json_safe, clone +from localstack.utils.common import to_str, json_safe, clone, timestamp_millis from localstack.utils.analytics import event_publisher from localstack.services.awslambda import lambda_api from localstack.services.generic_proxy import ProxyListener @@ -32,7 +31,7 @@ consumer = clone(data) consumer['ConsumerStatus'] = 'ACTIVE' consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName']) - consumer['ConsumerCreationTimestamp'] = datetime.now() + consumer['ConsumerCreationTimestamp'] = timestamp_millis() consumer = json_safe(consumer) STREAM_CONSUMERS.append(consumer) return {'Consumer': consumer} @@ -85,7 +84,9 @@ elif action == ACTION_PUT_RECORD: response_body = json.loads(to_str(response.content)) event_record = { + 'approximateArrivalTimestamp': timestamp_millis(), 'data': data['Data'], + 'encryptionType': 'NONE', 'partitionKey': data['PartitionKey'], 'sequenceNumber': response_body.get('SequenceNumber') } @@ -101,7 +102,9 @@ for index in range(0, len(records)): record = records[index] event_record = { + 'approximateArrivalTimestamp': timestamp_millis(), 'data': record['Data'], + 'encryptionType': 'NONE', 'partitionKey': record['PartitionKey'], 'sequenceNumber': response_records[index].get('SequenceNumber') }
{"golden_diff": "diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py\n--- a/localstack/services/kinesis/kinesis_listener.py\n+++ b/localstack/services/kinesis/kinesis_listener.py\n@@ -1,9 +1,8 @@\n import json\n import random\n-from datetime import datetime\n from requests.models import Response\n from localstack import config\n-from localstack.utils.common import to_str, json_safe, clone\n+from localstack.utils.common import to_str, json_safe, clone, timestamp_millis\n from localstack.utils.analytics import event_publisher\n from localstack.services.awslambda import lambda_api\n from localstack.services.generic_proxy import ProxyListener\n@@ -32,7 +31,7 @@\n consumer = clone(data)\n consumer['ConsumerStatus'] = 'ACTIVE'\n consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])\n- consumer['ConsumerCreationTimestamp'] = datetime.now()\n+ consumer['ConsumerCreationTimestamp'] = timestamp_millis()\n consumer = json_safe(consumer)\n STREAM_CONSUMERS.append(consumer)\n return {'Consumer': consumer}\n@@ -85,7 +84,9 @@\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n+ 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': data['Data'],\n+ 'encryptionType': 'NONE',\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n@@ -101,7 +102,9 @@\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n+ 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': record['Data'],\n+ 'encryptionType': 'NONE',\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n", "issue": "Kinesis stream records do not offer AWS-defined optional values\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\r\n\r\n# Type of request: This is a ...\r\n\r\n[ ] bug report\r\n[x] feature request\r\n\r\n# Detailed description\r\n\r\nKinesis does not populate/offer to populate all potential values in a record. I have a lambda being triggered by kinesis events. The records being passed in are only those required by AWS, but there are no optional values being used.\r\n\r\n## Expected behavior\r\n\r\nAn event's kinesis object should be configurable to look as follows\r\n```\r\n{ \r\n \"ApproximateArrivalTimestamp\": number,\r\n \"Data\": blob,\r\n \"EncryptionType\": \"string\",\r\n \"PartitionKey\": \"string\",\r\n \"SequenceNumber\": \"string\"\r\n}\r\n```\r\n\r\n## Actual behavior\r\n\r\nIn its current state, only required values are present.\r\n```\r\n{ \r\n \"Data\": blob,\r\n \"PartitionKey\": \"string\",\r\n \"SequenceNumber\": \"string\"\r\n}\r\n```\r\n\r\n# Steps to reproduce\r\n\r\n## Command used to start LocalStack\r\n\r\n`docker-compose up` with given `docker-compose.yml`\r\n\r\n## Client code (AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\nThe source of this issue/feature request lies in the kinesis listener. AWS documentation of a kinesis stream record lists all values here: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Record.html\r\n\r\nWe are only returning the required values.\r\nhttps://github.com/localstack/localstack/blob/78031dd65da9394f8b1b020be01ef02c63c433ee/localstack/services/kinesis/kinesis_listener.py#L85-L94\r\n\r\nIf there is no implicit reason for not offering optional values, I'm more than happy to submit a PR.\r\n\n", "before_files": [{"content": "import json\nimport random\nfrom datetime import datetime\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str, json_safe, clone\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n# list of stream consumer details\nSTREAM_CONSUMERS = []\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n global STREAM_CONSUMERS\n data = json.loads(to_str(data or '{}'))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX:\n consumer = clone(data)\n consumer['ConsumerStatus'] = 'ACTIVE'\n consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])\n consumer['ConsumerCreationTimestamp'] = datetime.now()\n consumer = json_safe(consumer)\n STREAM_CONSUMERS.append(consumer)\n return {'Consumer': consumer}\n elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX:\n def consumer_matches(c):\n stream_arn = data.get('StreamARN')\n cons_name = data.get('ConsumerName')\n cons_arn = data.get('ConsumerARN')\n return (c.get('ConsumerARN') == cons_arn or\n (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name))\n STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)]\n return {}\n elif action == '%s.ListStreamConsumers' % ACTION_PREFIX:\n result = {\n 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')]\n }\n return result\n elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data or '{}'))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'data': data['Data'],\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'data': record['Data'],\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}], "after_files": [{"content": "import json\nimport random\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str, json_safe, clone, timestamp_millis\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n# list of stream consumer details\nSTREAM_CONSUMERS = []\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n global STREAM_CONSUMERS\n data = json.loads(to_str(data or '{}'))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX:\n consumer = clone(data)\n consumer['ConsumerStatus'] = 'ACTIVE'\n consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])\n consumer['ConsumerCreationTimestamp'] = timestamp_millis()\n consumer = json_safe(consumer)\n STREAM_CONSUMERS.append(consumer)\n return {'Consumer': consumer}\n elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX:\n def consumer_matches(c):\n stream_arn = data.get('StreamARN')\n cons_name = data.get('ConsumerName')\n cons_arn = data.get('ConsumerARN')\n return (c.get('ConsumerARN') == cons_arn or\n (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name))\n STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)]\n return {}\n elif action == '%s.ListStreamConsumers' % ACTION_PREFIX:\n result = {\n 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')]\n }\n return result\n elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data or '{}'))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': data['Data'],\n 'encryptionType': 'NONE',\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'approximateArrivalTimestamp': timestamp_millis(),\n 'data': record['Data'],\n 'encryptionType': 'NONE',\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}]}
2,458
429
gh_patches_debug_10304
rasdani/github-patches
git_diff
google__openhtf-393
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nicer failure mode for misuse of StoreInModule. See #389. > ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here? > @grybmadsci Document util/argv.py There are no docs on how to use this module. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openhtf/util/argv.py` Content: ``` 1 """Utilities for handling command line arguments. 2 3 StoreInModule: 4 Enables emulating a gflags-esque API (flag affects global value), but one 5 doesn't necessarily need to use flags to set values. 6 7 Example usage: 8 DEFAULT_VALUE = 0 9 ARG_PARSER = argv.ModuleParser() 10 ARG_PARSER.add_argument( 11 '--override-value', action=argv.StoreInModule, 12 default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__) 13 14 Then in an entry point (main() function), use that parser as a parent: 15 parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER]) 16 parser.parse_args() 17 """ 18 19 import argparse 20 21 22 def ModuleParser(): 23 return argparse.ArgumentParser(add_help=False) 24 25 26 class StoreInModule(argparse.Action): 27 28 def __init__(self, *args, **kwargs): 29 self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1) 30 proxy_cls = kwargs.pop('proxy', None) 31 if proxy_cls is not None: 32 self._proxy = proxy_cls(*args, **kwargs) 33 super(StoreInModule, self).__init__(*args, **kwargs) 34 35 def __call__(self, parser, namespace, values, option_string=None): 36 if hasattr(self, '_proxy'): 37 values = self._proxy(parser, namespace, values) 38 base, mod = self._tgt_mod.rsplit('.', 1) 39 module = getattr(__import__(base, fromlist=[mod]), mod) 40 setattr(module, self._tgt_attr, values) 41 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py --- a/openhtf/util/argv.py +++ b/openhtf/util/argv.py @@ -35,7 +35,10 @@ def __call__(self, parser, namespace, values, option_string=None): if hasattr(self, '_proxy'): values = self._proxy(parser, namespace, values) - base, mod = self._tgt_mod.rsplit('.', 1) - module = getattr(__import__(base, fromlist=[mod]), mod) + if '.' in self._tgt_mod: + base, mod = self._tgt_mod.rsplit('.', 1) + module = getattr(__import__(base, fromlist=[mod]), mod) + else: + module = __import__(self._tgt_mod) setattr(module, self._tgt_attr, values)
{"golden_diff": "diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py\n--- a/openhtf/util/argv.py\n+++ b/openhtf/util/argv.py\n@@ -35,7 +35,10 @@\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n- base, mod = self._tgt_mod.rsplit('.', 1)\n- module = getattr(__import__(base, fromlist=[mod]), mod)\n+ if '.' in self._tgt_mod:\n+ base, mod = self._tgt_mod.rsplit('.', 1)\n+ module = getattr(__import__(base, fromlist=[mod]), mod)\n+ else:\n+ module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n", "issue": "Nicer failure mode for misuse of StoreInModule.\nSee #389.\n\n> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?\n> @grybmadsci\n\nDocument util/argv.py\nThere are no docs on how to use this module.\n\n", "before_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}], "after_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n if '.' in self._tgt_mod:\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n else:\n module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}]}
761
193
gh_patches_debug_8626
rasdani/github-patches
git_diff
holoviz__panel-2539
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- panel serve --autoreload does not work consistently with imported modules #### ALL software version info - python: 3.9.2 - holoviews: 1.14.4 - jupyter_bokeh: 3.0.2 - ipython 7.22.0 - jupyterlab: 3.0.16 - pyviz-comms: 2.0.1 - MacOS: 11.4 - VSCode Insiders: - Version: 1.59.0-insider - Commit: f6df685c62da50886f7540cbf768ed4333d58bea - Date: 2021-07-13T05:15:28.368Z - Electron: 12.0.13 - Chrome: 89.0.4389.128 - Node.js: 14.16.0 - V8: 8.9.255.25-electron.0 - OS: Darwin x64 20.5.0 #### Description of expected behavior and the observed behavior `panel --autoreload` does not trigger a reload when saving changes to an imported module. However, if I first save the actual served module (triggering a reload), *then* subsequent saves of imported modules will also trigger reloads. It's as if the "registry" of imports is not populated when you first start serving, and only becomes populated after the first reload. #### Complete, minimal, self-contained example code that reproduces the issue ``` # test_panel.py import holoviews as hv import panel as pn data = [('a', 10), ('b', 7)] def test(): return pn.panel(hv.Bars(data)) # test.py import test_panel test_panel.test().servable() ``` Try `panel serve --autoreload` with `test.py`. While serving, change the data values in `test_panel.py` and save. No reload. Try the same thing again, but this time save `test.py` immediately after you begin serving (you don't have to change anytime, just save it to trigger a server reload). Now if you change the values in `test_panel.py`, it will reload. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `panel/io/reload.py` Content: ``` 1 import fnmatch 2 import os 3 import sys 4 import types 5 6 from contextlib import contextmanager 7 from functools import partial 8 9 from .callbacks import PeriodicCallback 10 from .state import state 11 12 _watched_files = set() 13 _modules = set() 14 _callbacks = {} 15 16 # List of paths to ignore 17 DEFAULT_FOLDER_BLACKLIST = [ 18 "**/.*", 19 "**/anaconda", 20 "**/anaconda2", 21 "**/anaconda3", 22 "**/dist-packages", 23 "**/miniconda", 24 "**/miniconda2", 25 "**/miniconda3", 26 "**/node_modules", 27 "**/pyenv", 28 "**/site-packages", 29 "**/venv", 30 "**/virtualenv", 31 ] 32 33 34 def in_blacklist(filepath): 35 return any( 36 file_is_in_folder_glob(filepath, blacklisted_folder) 37 for blacklisted_folder in DEFAULT_FOLDER_BLACKLIST 38 ) 39 40 def file_is_in_folder_glob(filepath, folderpath_glob): 41 """ 42 Test whether a file is in some folder with globbing support. 43 44 Parameters 45 ---------- 46 filepath : str 47 A file path. 48 folderpath_glob: str 49 A path to a folder that may include globbing. 50 """ 51 # Make the glob always end with "/*" so we match files inside subfolders of 52 # folderpath_glob. 53 if not folderpath_glob.endswith("*"): 54 if folderpath_glob.endswith("/"): 55 folderpath_glob += "*" 56 else: 57 folderpath_glob += "/*" 58 59 file_dir = os.path.dirname(filepath) + "/" 60 return fnmatch.fnmatch(file_dir, folderpath_glob) 61 62 def autoreload_watcher(): 63 """ 64 Installs a periodic callback which checks for changes in watched 65 files and sys.modules. 66 """ 67 cb = partial(_reload_on_update, {}) 68 _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb) 69 pcb.start() 70 71 def watch(filename): 72 """ 73 Add a file to the watch list. 74 75 All imported modules are watched by default. 76 """ 77 _watched_files.add(filename) 78 79 @contextmanager 80 def record_modules(): 81 """ 82 Records modules which are currently imported. 83 """ 84 modules = set(sys.modules) 85 yield 86 if _modules: 87 return 88 for module_name in set(sys.modules).difference(modules): 89 if module_name.startswith('bokeh_app'): 90 continue 91 module = sys.modules[module_name] 92 try: 93 spec = getattr(module, "__spec__", None) 94 if spec is None: 95 filepath = getattr(module, "__file__", None) 96 if filepath is None: # no user 97 continue 98 else: 99 filepath = spec.origin 100 101 filepath = os.path.abspath(filepath) 102 103 if filepath is None or in_blacklist(filepath): 104 continue 105 106 if not os.path.isfile(filepath): # e.g. built-in 107 continue 108 _modules.add(module_name) 109 except Exception: 110 continue 111 112 def _reload(module=None): 113 if module is not None: 114 for module in _modules: 115 del sys.modules[module] 116 if state.curdoc in _callbacks: 117 _callbacks[state.curdoc].stop() 118 del _callbacks[state.curdoc] 119 if state.location: 120 state.location.reload = True 121 122 def _check_file(modify_times, path, module=None): 123 try: 124 modified = os.stat(path).st_mtime 125 except Exception: 126 return 127 if path not in modify_times: 128 modify_times[path] = modified 129 return 130 if modify_times[path] != modified: 131 _reload(module) 132 modify_times[path] = modified 133 134 def _reload_on_update(modify_times): 135 for module_name in _modules: 136 # Some modules play games with sys.modules (e.g. email/__init__.py 137 # in the standard library), and occasionally this can cause strange 138 # failures in getattr. Just ignore anything that's not an ordinary 139 # module. 140 if not module_name in sys.modules: 141 continue 142 module = sys.modules[module_name] 143 if not isinstance(module, types.ModuleType): 144 continue 145 path = getattr(module, "__file__", None) 146 if not path: 147 continue 148 if path.endswith(".pyc") or path.endswith(".pyo"): 149 path = path[:-1] 150 _check_file(modify_times, path, module_name) 151 for path in _watched_files: 152 _check_file(modify_times, path) 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/panel/io/reload.py b/panel/io/reload.py --- a/panel/io/reload.py +++ b/panel/io/reload.py @@ -113,11 +113,12 @@ if module is not None: for module in _modules: del sys.modules[module] - if state.curdoc in _callbacks: - _callbacks[state.curdoc].stop() - del _callbacks[state.curdoc] - if state.location: - state.location.reload = True + for cb in _callbacks.values(): + cb.stop() + _callbacks.clear() + state.location.reload = True + for loc in state._locations.values(): + loc.reload = True def _check_file(modify_times, path, module=None): try:
{"golden_diff": "diff --git a/panel/io/reload.py b/panel/io/reload.py\n--- a/panel/io/reload.py\n+++ b/panel/io/reload.py\n@@ -113,11 +113,12 @@\n if module is not None:\n for module in _modules:\n del sys.modules[module]\n- if state.curdoc in _callbacks:\n- _callbacks[state.curdoc].stop()\n- del _callbacks[state.curdoc]\n- if state.location:\n- state.location.reload = True\n+ for cb in _callbacks.values():\n+ cb.stop()\n+ _callbacks.clear()\n+ state.location.reload = True\n+ for loc in state._locations.values():\n+ loc.reload = True\n \n def _check_file(modify_times, path, module=None):\n try:\n", "issue": "panel serve --autoreload does not work consistently with imported modules\n#### ALL software version info\r\n\r\n- python: 3.9.2\r\n - holoviews: 1.14.4\r\n - jupyter_bokeh: 3.0.2\r\n - ipython 7.22.0\r\n - jupyterlab: 3.0.16\r\n - pyviz-comms: 2.0.1\r\n\r\n- MacOS: 11.4\r\n\r\n- VSCode Insiders: \r\n - Version: 1.59.0-insider\r\n - Commit: f6df685c62da50886f7540cbf768ed4333d58bea\r\n - Date: 2021-07-13T05:15:28.368Z\r\n - Electron: 12.0.13\r\n - Chrome: 89.0.4389.128\r\n - Node.js: 14.16.0\r\n - V8: 8.9.255.25-electron.0\r\n - OS: Darwin x64 20.5.0\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\n`panel --autoreload` does not trigger a reload when saving changes to an imported module.\r\n\r\nHowever, if I first save the actual served module (triggering a reload), *then* subsequent saves of imported modules will also trigger reloads. It's as if the \"registry\" of imports is not populated when you first start serving, and only becomes populated after the first reload.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\n# test_panel.py\r\nimport holoviews as hv\r\nimport panel as pn\r\n\r\ndata = [('a', 10), ('b', 7)]\r\ndef test():\r\n return pn.panel(hv.Bars(data))\r\n\r\n# test.py\r\nimport test_panel\r\ntest_panel.test().servable()\r\n```\r\n\r\nTry `panel serve --autoreload` with `test.py`. While serving, change the data values in `test_panel.py` and save. No reload.\r\n\r\nTry the same thing again, but this time save `test.py` immediately after you begin serving (you don't have to change anytime, just save it to trigger a server reload). Now if you change the values in `test_panel.py`, it will reload.\n", "before_files": [{"content": "import fnmatch\nimport os\nimport sys\nimport types\n\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom .callbacks import PeriodicCallback\nfrom .state import state\n\n_watched_files = set()\n_modules = set()\n_callbacks = {}\n\n# List of paths to ignore\nDEFAULT_FOLDER_BLACKLIST = [\n \"**/.*\",\n \"**/anaconda\",\n \"**/anaconda2\",\n \"**/anaconda3\",\n \"**/dist-packages\",\n \"**/miniconda\",\n \"**/miniconda2\",\n \"**/miniconda3\",\n \"**/node_modules\",\n \"**/pyenv\",\n \"**/site-packages\",\n \"**/venv\",\n \"**/virtualenv\",\n]\n\n\ndef in_blacklist(filepath):\n return any(\n file_is_in_folder_glob(filepath, blacklisted_folder)\n for blacklisted_folder in DEFAULT_FOLDER_BLACKLIST\n )\n\ndef file_is_in_folder_glob(filepath, folderpath_glob):\n \"\"\"\n Test whether a file is in some folder with globbing support.\n\n Parameters\n ----------\n filepath : str\n A file path.\n folderpath_glob: str\n A path to a folder that may include globbing.\n \"\"\"\n # Make the glob always end with \"/*\" so we match files inside subfolders of\n # folderpath_glob.\n if not folderpath_glob.endswith(\"*\"):\n if folderpath_glob.endswith(\"/\"):\n folderpath_glob += \"*\"\n else:\n folderpath_glob += \"/*\"\n\n file_dir = os.path.dirname(filepath) + \"/\"\n return fnmatch.fnmatch(file_dir, folderpath_glob)\n\ndef autoreload_watcher():\n \"\"\"\n Installs a periodic callback which checks for changes in watched\n files and sys.modules.\n \"\"\"\n cb = partial(_reload_on_update, {})\n _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb)\n pcb.start()\n\ndef watch(filename):\n \"\"\"\n Add a file to the watch list.\n\n All imported modules are watched by default.\n \"\"\"\n _watched_files.add(filename)\n\n@contextmanager\ndef record_modules():\n \"\"\"\n Records modules which are currently imported.\n \"\"\"\n modules = set(sys.modules)\n yield\n if _modules:\n return\n for module_name in set(sys.modules).difference(modules):\n if module_name.startswith('bokeh_app'):\n continue\n module = sys.modules[module_name]\n try:\n spec = getattr(module, \"__spec__\", None)\n if spec is None:\n filepath = getattr(module, \"__file__\", None)\n if filepath is None: # no user\n continue\n else:\n filepath = spec.origin\n\n filepath = os.path.abspath(filepath)\n\n if filepath is None or in_blacklist(filepath):\n continue\n\n if not os.path.isfile(filepath): # e.g. built-in\n continue\n _modules.add(module_name)\n except Exception:\n continue\n\ndef _reload(module=None):\n if module is not None:\n for module in _modules:\n del sys.modules[module]\n if state.curdoc in _callbacks:\n _callbacks[state.curdoc].stop()\n del _callbacks[state.curdoc]\n if state.location:\n state.location.reload = True\n\ndef _check_file(modify_times, path, module=None):\n try:\n modified = os.stat(path).st_mtime\n except Exception:\n return\n if path not in modify_times:\n modify_times[path] = modified\n return\n if modify_times[path] != modified:\n _reload(module)\n modify_times[path] = modified\n\ndef _reload_on_update(modify_times):\n for module_name in _modules:\n # Some modules play games with sys.modules (e.g. email/__init__.py\n # in the standard library), and occasionally this can cause strange\n # failures in getattr. Just ignore anything that's not an ordinary\n # module.\n if not module_name in sys.modules:\n continue\n module = sys.modules[module_name]\n if not isinstance(module, types.ModuleType):\n continue\n path = getattr(module, \"__file__\", None)\n if not path:\n continue\n if path.endswith(\".pyc\") or path.endswith(\".pyo\"):\n path = path[:-1]\n _check_file(modify_times, path, module_name)\n for path in _watched_files:\n _check_file(modify_times, path)\n", "path": "panel/io/reload.py"}], "after_files": [{"content": "import fnmatch\nimport os\nimport sys\nimport types\n\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom .callbacks import PeriodicCallback\nfrom .state import state\n\n_watched_files = set()\n_modules = set()\n_callbacks = {}\n\n# List of paths to ignore\nDEFAULT_FOLDER_BLACKLIST = [\n \"**/.*\",\n \"**/anaconda\",\n \"**/anaconda2\",\n \"**/anaconda3\",\n \"**/dist-packages\",\n \"**/miniconda\",\n \"**/miniconda2\",\n \"**/miniconda3\",\n \"**/node_modules\",\n \"**/pyenv\",\n \"**/site-packages\",\n \"**/venv\",\n \"**/virtualenv\",\n]\n\n\ndef in_blacklist(filepath):\n return any(\n file_is_in_folder_glob(filepath, blacklisted_folder)\n for blacklisted_folder in DEFAULT_FOLDER_BLACKLIST\n )\n\ndef file_is_in_folder_glob(filepath, folderpath_glob):\n \"\"\"\n Test whether a file is in some folder with globbing support.\n\n Parameters\n ----------\n filepath : str\n A file path.\n folderpath_glob: str\n A path to a folder that may include globbing.\n \"\"\"\n # Make the glob always end with \"/*\" so we match files inside subfolders of\n # folderpath_glob.\n if not folderpath_glob.endswith(\"*\"):\n if folderpath_glob.endswith(\"/\"):\n folderpath_glob += \"*\"\n else:\n folderpath_glob += \"/*\"\n\n file_dir = os.path.dirname(filepath) + \"/\"\n return fnmatch.fnmatch(file_dir, folderpath_glob)\n\ndef autoreload_watcher():\n \"\"\"\n Installs a periodic callback which checks for changes in watched\n files and sys.modules.\n \"\"\"\n cb = partial(_reload_on_update, {})\n _callbacks[state.curdoc] = pcb = PeriodicCallback(callback=cb)\n pcb.start()\n\ndef watch(filename):\n \"\"\"\n Add a file to the watch list.\n\n All imported modules are watched by default.\n \"\"\"\n _watched_files.add(filename)\n\n@contextmanager\ndef record_modules():\n \"\"\"\n Records modules which are currently imported.\n \"\"\"\n modules = set(sys.modules)\n yield\n if _modules:\n return\n for module_name in set(sys.modules).difference(modules):\n if module_name.startswith('bokeh_app'):\n continue\n module = sys.modules[module_name]\n try:\n spec = getattr(module, \"__spec__\", None)\n if spec is None:\n filepath = getattr(module, \"__file__\", None)\n if filepath is None: # no user\n continue\n else:\n filepath = spec.origin\n\n filepath = os.path.abspath(filepath)\n\n if filepath is None or in_blacklist(filepath):\n continue\n\n if not os.path.isfile(filepath): # e.g. built-in\n continue\n _modules.add(module_name)\n except Exception:\n continue\n\ndef _reload(module=None):\n if module is not None:\n for module in _modules:\n del sys.modules[module]\n for cb in _callbacks.values():\n cb.stop()\n _callbacks.clear()\n state.location.reload = True\n for loc in state._locations.values():\n loc.reload = True\n\ndef _check_file(modify_times, path, module=None):\n try:\n modified = os.stat(path).st_mtime\n except Exception:\n return\n if path not in modify_times:\n modify_times[path] = modified\n return\n if modify_times[path] != modified:\n _reload(module)\n modify_times[path] = modified\n\ndef _reload_on_update(modify_times):\n for module_name in _modules:\n # Some modules play games with sys.modules (e.g. email/__init__.py\n # in the standard library), and occasionally this can cause strange\n # failures in getattr. Just ignore anything that's not an ordinary\n # module.\n if not module_name in sys.modules:\n continue\n module = sys.modules[module_name]\n if not isinstance(module, types.ModuleType):\n continue\n path = getattr(module, \"__file__\", None)\n if not path:\n continue\n if path.endswith(\".pyc\") or path.endswith(\".pyo\"):\n path = path[:-1]\n _check_file(modify_times, path, module_name)\n for path in _watched_files:\n _check_file(modify_times, path)\n", "path": "panel/io/reload.py"}]}
2,096
177
gh_patches_debug_45174
rasdani/github-patches
git_diff
pyodide__pyodide-1215
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Performance issues with buffer conversions from python to javascript Hello @hoodmane, thank you for the magnificent APIs (deep/shallowCopyToJavaScript) that would eliminate the heavy cost of big array conversion from python to Javascript. I think the shallowCopy version is designed for memory address reference instead of bulks of content copy. But I find the perfomance not so good as I've imagined, which is displayed as the following: ![image](https://user-images.githubusercontent.com/34263430/107121264-74600900-68cc-11eb-8e92-da3b8b09b5f4.png) It takes about 1~7 seconds for shallowCopyToJavascript() to complete the memory address reference and maybe some necessary meta data copy I guess. However, it's not adequate for a realtime computation. Any suggestions for better conversion performance? _Originally posted by @daoxian in https://github.com/iodide-project/pyodide/issues/1167#issuecomment-774488338_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py` Content: ``` 1 from docutils import nodes 2 from docutils.parsers.rst import Directive, Parser as RstParser 3 from docutils.statemachine import StringList 4 from docutils.utils import new_document 5 6 from collections import OrderedDict 7 8 from sphinx import addnodes 9 from sphinx.util import rst 10 from sphinx.util.docutils import switch_source_input 11 from sphinx.ext.autosummary import autosummary_table, extract_summary 12 13 from sphinx_js.jsdoc import Analyzer as JsAnalyzer 14 from sphinx_js.ir import Function 15 from sphinx_js.parsers import path_and_formal_params, PathVisitor 16 from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer 17 18 19 class PyodideAnalyzer: 20 """JsDoc automatically instantiates the JsAnalyzer. Rather than subclassing 21 or monkey patching it, we use composition (see getattr impl). 22 23 The main extra thing we do is reorganize the doclets based on our globals / 24 functions / attributes scheme. This we use to subdivide the sections in our 25 summary. We store these in the "js_docs" field which is the only field that 26 we access later. 27 """ 28 29 def __init__(self, analyzer: JsAnalyzer) -> None: 30 self.inner = analyzer 31 self.create_js_doclets() 32 33 def __getattr__(self, key): 34 return getattr(self.inner, key) 35 36 def longname_to_path(self, name): 37 """Convert the longname field produced by jsdoc to a path appropriate to use 38 with _sphinxjs_analyzer.get_object. Based on: 39 https://github.com/mozilla/sphinx-js/blob/3.1/sphinx_js/jsdoc.py#L181 40 """ 41 return PathVisitor().visit(path_and_formal_params["path"].parse(name)) 42 43 def get_object_from_json(self, json): 44 """Look up the JsDoc IR object corresponding to this object. We use the 45 "kind" field to decide whether the object is a "function" or an 46 "attribute". We use longname_to_path to convert the path into a list of 47 path components which JsAnalyzer.get_object requires. 48 """ 49 path = self.longname_to_path(json["longname"]) 50 kind = "function" if json["kind"] == "function" else "attribute" 51 obj = self.inner.get_object(path, kind) 52 obj.kind = kind 53 return obj 54 55 def create_js_doclets(self): 56 """Search through the doclets generated by JsDoc and categorize them by 57 summary section. Skip docs labeled as "@private". 58 """ 59 60 def get_val(): 61 return OrderedDict([["attribute", []], ["function", []]]) 62 63 self.js_docs = {key: get_val() for key in ["globals", "pyodide", "PyProxy"]} 64 items = {"PyProxy": []} 65 for (key, group) in self._doclets_by_class.items(): 66 key = [x for x in key if "/" not in x] 67 if key[-1] == "globalThis": 68 items["globals"] = group 69 if key[0] == "pyodide." and key[-1] == "Module": 70 items["pyodide"] = group 71 if key[0] == "pyproxy.": 72 items["PyProxy"] += group 73 74 for key, value in items.items(): 75 for json in value: 76 if json.get("access", None) == "private": 77 continue 78 obj = self.get_object_from_json(json) 79 if obj.name[0] == '"' and obj.name[-1] == '"': 80 obj.name = "[" + obj.name[1:-1] + "]" 81 self.js_docs[key][obj.kind].append(obj) 82 83 84 def get_jsdoc_content_directive(app): 85 """These directives need to close over app """ 86 87 class JsDocContent(Directive): 88 """A directive that just dumps a summary table in place. There are no 89 options, it only prints the one thing, we control the behavior from 90 here 91 """ 92 93 required_arguments = 1 94 95 def get_rst(self, obj): 96 """Grab the appropriate renderer and render us to rst. 97 JsDoc also has an AutoClassRenderer which may be useful in the future.""" 98 if isinstance(obj, Function): 99 renderer = AutoFunctionRenderer 100 else: 101 renderer = AutoAttributeRenderer 102 return renderer(self, app, arguments=["dummy"]).rst( 103 [obj.name], obj, use_short_name=False 104 ) 105 106 def get_rst_for_group(self, objects): 107 return [self.get_rst(obj) for obj in objects] 108 109 def parse_rst(self, rst): 110 """We produce a bunch of rst but directives are supposed to output 111 docutils trees. This is a helper that converts the rst to docutils. 112 """ 113 settings = self.state.document.settings 114 doc = new_document("", settings) 115 RstParser().parse(rst, doc) 116 return doc.children 117 118 def run(self): 119 module = self.arguments[0] 120 values = app._sphinxjs_analyzer.js_docs[module] 121 rst = [] 122 rst.append([f".. js:module:: {module}"]) 123 for group in values.values(): 124 rst.append(self.get_rst_for_group(group)) 125 joined_rst = "\n\n".join(["\n\n".join(r) for r in rst]) 126 return self.parse_rst(joined_rst) 127 128 return JsDocContent 129 130 131 def get_jsdoc_summary_directive(app): 132 class JsDocSummary(Directive): 133 """A directive that just dumps the Js API docs in place. There are no 134 options, it only prints the one thing, we control the behavior from 135 here 136 """ 137 138 required_arguments = 1 139 140 def run(self): 141 result = [] 142 module = self.arguments[0] 143 value = app._sphinxjs_analyzer.js_docs[module] 144 for group_name, group_objects in value.items(): 145 if not group_objects: 146 continue 147 result.append(self.format_heading(group_name.title() + "s:")) 148 table_items = self.get_summary_table(module, group_objects) 149 table_markup = self.format_table(table_items) 150 result.extend(table_markup) 151 return result 152 153 def format_heading(self, text): 154 """Make a section heading. This corresponds to the rst: "**Heading:**" 155 autodocsumm uses headings like that, so this will match that style. 156 """ 157 heading = nodes.paragraph("") 158 strong = nodes.strong("") 159 strong.append(nodes.Text(text)) 160 heading.append(strong) 161 return heading 162 163 def extract_summary(self, descr): 164 """Wrapper around autosummary extract_summary that is easier to use. 165 It seems like colons need escaping for some reason. 166 """ 167 colon_esc = "esccolon\\\xafhoa:" 168 return extract_summary( 169 [descr.replace(":", colon_esc)], self.state.document 170 ).replace(colon_esc, ":") 171 172 def get_sig(self, obj): 173 """If the object is a function, get its signature (as figured by JsDoc)""" 174 if isinstance(obj, Function): 175 return AutoFunctionRenderer( 176 self, app, arguments=["dummy"] 177 )._formal_params(obj) 178 else: 179 return "" 180 181 def get_summary_row(self, pkgname, obj): 182 """Get the summary table row for obj. 183 184 The output is designed to be input to format_table. The link name 185 needs to be set up so that :any:`link_name` makes a link to the 186 actual api docs for this object. 187 """ 188 sig = self.get_sig(obj) 189 display_name = obj.name 190 summary = self.extract_summary(obj.description) 191 link_name = pkgname + "." + display_name 192 return (display_name, sig, summary, link_name) 193 194 def get_summary_table(self, pkgname, group): 195 """Get the data for a summary table. Return value is set up to be an 196 argument of format_table. 197 """ 198 return [self.get_summary_row(pkgname, obj) for obj in group] 199 200 # This following method is copied almost verbatim from autosummary 201 # (where it is called get_table). 202 # 203 # We have to change the value of one string: qualifier = 'obj ==> 204 # qualifier = 'any' 205 # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/autosummary/__init__.py#L392 206 def format_table(self, items): 207 """Generate a proper list of table nodes for autosummary:: directive. 208 209 *items* is a list produced by :meth:`get_items`. 210 """ 211 table_spec = addnodes.tabular_col_spec() 212 table_spec["spec"] = r"\X{1}{2}\X{1}{2}" 213 214 table = autosummary_table("") 215 real_table = nodes.table("", classes=["longtable"]) 216 table.append(real_table) 217 group = nodes.tgroup("", cols=2) 218 real_table.append(group) 219 group.append(nodes.colspec("", colwidth=10)) 220 group.append(nodes.colspec("", colwidth=90)) 221 body = nodes.tbody("") 222 group.append(body) 223 224 def append_row(*column_texts: str) -> None: 225 row = nodes.row("") 226 source, line = self.state_machine.get_source_and_line() 227 for text in column_texts: 228 node = nodes.paragraph("") 229 vl = StringList() 230 vl.append(text, "%s:%d:<autosummary>" % (source, line)) 231 with switch_source_input(self.state, vl): 232 self.state.nested_parse(vl, 0, node) 233 try: 234 if isinstance(node[0], nodes.paragraph): 235 node = node[0] 236 except IndexError: 237 pass 238 row.append(nodes.entry("", node)) 239 body.append(row) 240 241 for name, sig, summary, real_name in items: 242 qualifier = "any" # <== Only thing changed from autosummary version 243 if "nosignatures" not in self.options: 244 col1 = ":%s:`%s <%s>`\\ %s" % ( 245 qualifier, 246 name, 247 real_name, 248 rst.escape(sig), 249 ) 250 else: 251 col1 = ":%s:`%s <%s>`" % (qualifier, name, real_name) 252 col2 = summary 253 append_row(col1, col2) 254 255 return [table_spec, table] 256 257 return JsDocSummary 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py --- a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py +++ b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py @@ -4,6 +4,7 @@ from docutils.utils import new_document from collections import OrderedDict +import re from sphinx import addnodes from sphinx.util import rst @@ -11,9 +12,13 @@ from sphinx.ext.autosummary import autosummary_table, extract_summary from sphinx_js.jsdoc import Analyzer as JsAnalyzer -from sphinx_js.ir import Function +from sphinx_js.ir import Class, Function from sphinx_js.parsers import path_and_formal_params, PathVisitor -from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer +from sphinx_js.renderers import ( + AutoFunctionRenderer, + AutoAttributeRenderer, + AutoClassRenderer, +) class PyodideAnalyzer: @@ -47,7 +52,12 @@ path components which JsAnalyzer.get_object requires. """ path = self.longname_to_path(json["longname"]) - kind = "function" if json["kind"] == "function" else "attribute" + if json["kind"] == "function": + kind = "function" + elif json["kind"] == "class": + kind = "class" + else: + kind = "attribute" obj = self.inner.get_object(path, kind) obj.kind = kind return obj @@ -58,12 +68,16 @@ """ def get_val(): - return OrderedDict([["attribute", []], ["function", []]]) + return OrderedDict([["attribute", []], ["function", []], ["class", []]]) self.js_docs = {key: get_val() for key in ["globals", "pyodide", "PyProxy"]} items = {"PyProxy": []} for (key, group) in self._doclets_by_class.items(): key = [x for x in key if "/" not in x] + if key[-1] == "PyBuffer": + # PyBuffer stuff is documented as a class. Would be nice to have + # a less ad hoc way to deal with this... + continue if key[-1] == "globalThis": items["globals"] = group if key[0] == "pyodide." and key[-1] == "Module": @@ -76,7 +90,13 @@ if json.get("access", None) == "private": continue obj = self.get_object_from_json(json) + if isinstance(obj, Class): + # sphinx-jsdoc messes up array types. Fix them. + for x in obj.members: + if hasattr(x, "type"): + x.type = re.sub("Array\.<([a-zA-Z_0-9]*)>", r"\1[]", x.type) if obj.name[0] == '"' and obj.name[-1] == '"': + # sphinx-jsdoc messes up Symbol attributes. Fix them. obj.name = "[" + obj.name[1:-1] + "]" self.js_docs[key][obj.kind].append(obj) @@ -97,11 +117,13 @@ JsDoc also has an AutoClassRenderer which may be useful in the future.""" if isinstance(obj, Function): renderer = AutoFunctionRenderer + elif isinstance(obj, Class): + renderer = AutoClassRenderer else: renderer = AutoAttributeRenderer - return renderer(self, app, arguments=["dummy"]).rst( - [obj.name], obj, use_short_name=False - ) + return renderer( + self, app, arguments=["dummy"], options={"members": ["*"]} + ).rst([obj.name], obj, use_short_name=False) def get_rst_for_group(self, objects): return [self.get_rst(obj) for obj in objects] @@ -144,6 +166,9 @@ for group_name, group_objects in value.items(): if not group_objects: continue + if group_name == "class": + # Plural of class is "classes" not "classs" + group_name += "e" result.append(self.format_heading(group_name.title() + "s:")) table_items = self.get_summary_table(module, group_objects) table_markup = self.format_table(table_items)
{"golden_diff": "diff --git a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n--- a/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n+++ b/docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py\n@@ -4,6 +4,7 @@\n from docutils.utils import new_document\n \n from collections import OrderedDict\n+import re\n \n from sphinx import addnodes\n from sphinx.util import rst\n@@ -11,9 +12,13 @@\n from sphinx.ext.autosummary import autosummary_table, extract_summary\n \n from sphinx_js.jsdoc import Analyzer as JsAnalyzer\n-from sphinx_js.ir import Function\n+from sphinx_js.ir import Class, Function\n from sphinx_js.parsers import path_and_formal_params, PathVisitor\n-from sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer\n+from sphinx_js.renderers import (\n+ AutoFunctionRenderer,\n+ AutoAttributeRenderer,\n+ AutoClassRenderer,\n+)\n \n \n class PyodideAnalyzer:\n@@ -47,7 +52,12 @@\n path components which JsAnalyzer.get_object requires.\n \"\"\"\n path = self.longname_to_path(json[\"longname\"])\n- kind = \"function\" if json[\"kind\"] == \"function\" else \"attribute\"\n+ if json[\"kind\"] == \"function\":\n+ kind = \"function\"\n+ elif json[\"kind\"] == \"class\":\n+ kind = \"class\"\n+ else:\n+ kind = \"attribute\"\n obj = self.inner.get_object(path, kind)\n obj.kind = kind\n return obj\n@@ -58,12 +68,16 @@\n \"\"\"\n \n def get_val():\n- return OrderedDict([[\"attribute\", []], [\"function\", []]])\n+ return OrderedDict([[\"attribute\", []], [\"function\", []], [\"class\", []]])\n \n self.js_docs = {key: get_val() for key in [\"globals\", \"pyodide\", \"PyProxy\"]}\n items = {\"PyProxy\": []}\n for (key, group) in self._doclets_by_class.items():\n key = [x for x in key if \"/\" not in x]\n+ if key[-1] == \"PyBuffer\":\n+ # PyBuffer stuff is documented as a class. Would be nice to have\n+ # a less ad hoc way to deal with this...\n+ continue\n if key[-1] == \"globalThis\":\n items[\"globals\"] = group\n if key[0] == \"pyodide.\" and key[-1] == \"Module\":\n@@ -76,7 +90,13 @@\n if json.get(\"access\", None) == \"private\":\n continue\n obj = self.get_object_from_json(json)\n+ if isinstance(obj, Class):\n+ # sphinx-jsdoc messes up array types. Fix them.\n+ for x in obj.members:\n+ if hasattr(x, \"type\"):\n+ x.type = re.sub(\"Array\\.<([a-zA-Z_0-9]*)>\", r\"\\1[]\", x.type)\n if obj.name[0] == '\"' and obj.name[-1] == '\"':\n+ # sphinx-jsdoc messes up Symbol attributes. Fix them.\n obj.name = \"[\" + obj.name[1:-1] + \"]\"\n self.js_docs[key][obj.kind].append(obj)\n \n@@ -97,11 +117,13 @@\n JsDoc also has an AutoClassRenderer which may be useful in the future.\"\"\"\n if isinstance(obj, Function):\n renderer = AutoFunctionRenderer\n+ elif isinstance(obj, Class):\n+ renderer = AutoClassRenderer\n else:\n renderer = AutoAttributeRenderer\n- return renderer(self, app, arguments=[\"dummy\"]).rst(\n- [obj.name], obj, use_short_name=False\n- )\n+ return renderer(\n+ self, app, arguments=[\"dummy\"], options={\"members\": [\"*\"]}\n+ ).rst([obj.name], obj, use_short_name=False)\n \n def get_rst_for_group(self, objects):\n return [self.get_rst(obj) for obj in objects]\n@@ -144,6 +166,9 @@\n for group_name, group_objects in value.items():\n if not group_objects:\n continue\n+ if group_name == \"class\":\n+ # Plural of class is \"classes\" not \"classs\"\n+ group_name += \"e\"\n result.append(self.format_heading(group_name.title() + \"s:\"))\n table_items = self.get_summary_table(module, group_objects)\n table_markup = self.format_table(table_items)\n", "issue": "Performance issues with buffer conversions from python to javascript\nHello @hoodmane, thank you for the magnificent APIs (deep/shallowCopyToJavaScript) that would eliminate the heavy cost of big array conversion from python to Javascript. I think the shallowCopy version is designed for memory address reference instead of bulks of content copy. But I find the perfomance not so good as I've imagined, which is displayed as the following:\r\n![image](https://user-images.githubusercontent.com/34263430/107121264-74600900-68cc-11eb-8e92-da3b8b09b5f4.png)\r\n\r\nIt takes about 1~7 seconds for shallowCopyToJavascript() to complete the memory address reference and maybe some necessary meta data copy I guess. However, it's not adequate for a realtime computation. Any suggestions for better conversion performance?\r\n\r\n_Originally posted by @daoxian in https://github.com/iodide-project/pyodide/issues/1167#issuecomment-774488338_\n", "before_files": [{"content": "from docutils import nodes\nfrom docutils.parsers.rst import Directive, Parser as RstParser\nfrom docutils.statemachine import StringList\nfrom docutils.utils import new_document\n\nfrom collections import OrderedDict\n\nfrom sphinx import addnodes\nfrom sphinx.util import rst\nfrom sphinx.util.docutils import switch_source_input\nfrom sphinx.ext.autosummary import autosummary_table, extract_summary\n\nfrom sphinx_js.jsdoc import Analyzer as JsAnalyzer\nfrom sphinx_js.ir import Function\nfrom sphinx_js.parsers import path_and_formal_params, PathVisitor\nfrom sphinx_js.renderers import AutoFunctionRenderer, AutoAttributeRenderer\n\n\nclass PyodideAnalyzer:\n \"\"\"JsDoc automatically instantiates the JsAnalyzer. Rather than subclassing\n or monkey patching it, we use composition (see getattr impl).\n\n The main extra thing we do is reorganize the doclets based on our globals /\n functions / attributes scheme. This we use to subdivide the sections in our\n summary. We store these in the \"js_docs\" field which is the only field that\n we access later.\n \"\"\"\n\n def __init__(self, analyzer: JsAnalyzer) -> None:\n self.inner = analyzer\n self.create_js_doclets()\n\n def __getattr__(self, key):\n return getattr(self.inner, key)\n\n def longname_to_path(self, name):\n \"\"\"Convert the longname field produced by jsdoc to a path appropriate to use\n with _sphinxjs_analyzer.get_object. Based on:\n https://github.com/mozilla/sphinx-js/blob/3.1/sphinx_js/jsdoc.py#L181\n \"\"\"\n return PathVisitor().visit(path_and_formal_params[\"path\"].parse(name))\n\n def get_object_from_json(self, json):\n \"\"\"Look up the JsDoc IR object corresponding to this object. We use the\n \"kind\" field to decide whether the object is a \"function\" or an\n \"attribute\". We use longname_to_path to convert the path into a list of\n path components which JsAnalyzer.get_object requires.\n \"\"\"\n path = self.longname_to_path(json[\"longname\"])\n kind = \"function\" if json[\"kind\"] == \"function\" else \"attribute\"\n obj = self.inner.get_object(path, kind)\n obj.kind = kind\n return obj\n\n def create_js_doclets(self):\n \"\"\"Search through the doclets generated by JsDoc and categorize them by\n summary section. Skip docs labeled as \"@private\".\n \"\"\"\n\n def get_val():\n return OrderedDict([[\"attribute\", []], [\"function\", []]])\n\n self.js_docs = {key: get_val() for key in [\"globals\", \"pyodide\", \"PyProxy\"]}\n items = {\"PyProxy\": []}\n for (key, group) in self._doclets_by_class.items():\n key = [x for x in key if \"/\" not in x]\n if key[-1] == \"globalThis\":\n items[\"globals\"] = group\n if key[0] == \"pyodide.\" and key[-1] == \"Module\":\n items[\"pyodide\"] = group\n if key[0] == \"pyproxy.\":\n items[\"PyProxy\"] += group\n\n for key, value in items.items():\n for json in value:\n if json.get(\"access\", None) == \"private\":\n continue\n obj = self.get_object_from_json(json)\n if obj.name[0] == '\"' and obj.name[-1] == '\"':\n obj.name = \"[\" + obj.name[1:-1] + \"]\"\n self.js_docs[key][obj.kind].append(obj)\n\n\ndef get_jsdoc_content_directive(app):\n \"\"\"These directives need to close over app \"\"\"\n\n class JsDocContent(Directive):\n \"\"\"A directive that just dumps a summary table in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def get_rst(self, obj):\n \"\"\"Grab the appropriate renderer and render us to rst.\n JsDoc also has an AutoClassRenderer which may be useful in the future.\"\"\"\n if isinstance(obj, Function):\n renderer = AutoFunctionRenderer\n else:\n renderer = AutoAttributeRenderer\n return renderer(self, app, arguments=[\"dummy\"]).rst(\n [obj.name], obj, use_short_name=False\n )\n\n def get_rst_for_group(self, objects):\n return [self.get_rst(obj) for obj in objects]\n\n def parse_rst(self, rst):\n \"\"\"We produce a bunch of rst but directives are supposed to output\n docutils trees. This is a helper that converts the rst to docutils.\n \"\"\"\n settings = self.state.document.settings\n doc = new_document(\"\", settings)\n RstParser().parse(rst, doc)\n return doc.children\n\n def run(self):\n module = self.arguments[0]\n values = app._sphinxjs_analyzer.js_docs[module]\n rst = []\n rst.append([f\".. js:module:: {module}\"])\n for group in values.values():\n rst.append(self.get_rst_for_group(group))\n joined_rst = \"\\n\\n\".join([\"\\n\\n\".join(r) for r in rst])\n return self.parse_rst(joined_rst)\n\n return JsDocContent\n\n\ndef get_jsdoc_summary_directive(app):\n class JsDocSummary(Directive):\n \"\"\"A directive that just dumps the Js API docs in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def run(self):\n result = []\n module = self.arguments[0]\n value = app._sphinxjs_analyzer.js_docs[module]\n for group_name, group_objects in value.items():\n if not group_objects:\n continue\n result.append(self.format_heading(group_name.title() + \"s:\"))\n table_items = self.get_summary_table(module, group_objects)\n table_markup = self.format_table(table_items)\n result.extend(table_markup)\n return result\n\n def format_heading(self, text):\n \"\"\"Make a section heading. This corresponds to the rst: \"**Heading:**\"\n autodocsumm uses headings like that, so this will match that style.\n \"\"\"\n heading = nodes.paragraph(\"\")\n strong = nodes.strong(\"\")\n strong.append(nodes.Text(text))\n heading.append(strong)\n return heading\n\n def extract_summary(self, descr):\n \"\"\"Wrapper around autosummary extract_summary that is easier to use.\n It seems like colons need escaping for some reason.\n \"\"\"\n colon_esc = \"esccolon\\\\\\xafhoa:\"\n return extract_summary(\n [descr.replace(\":\", colon_esc)], self.state.document\n ).replace(colon_esc, \":\")\n\n def get_sig(self, obj):\n \"\"\"If the object is a function, get its signature (as figured by JsDoc)\"\"\"\n if isinstance(obj, Function):\n return AutoFunctionRenderer(\n self, app, arguments=[\"dummy\"]\n )._formal_params(obj)\n else:\n return \"\"\n\n def get_summary_row(self, pkgname, obj):\n \"\"\"Get the summary table row for obj.\n\n The output is designed to be input to format_table. The link name\n needs to be set up so that :any:`link_name` makes a link to the\n actual api docs for this object.\n \"\"\"\n sig = self.get_sig(obj)\n display_name = obj.name\n summary = self.extract_summary(obj.description)\n link_name = pkgname + \".\" + display_name\n return (display_name, sig, summary, link_name)\n\n def get_summary_table(self, pkgname, group):\n \"\"\"Get the data for a summary table. Return value is set up to be an\n argument of format_table.\n \"\"\"\n return [self.get_summary_row(pkgname, obj) for obj in group]\n\n # This following method is copied almost verbatim from autosummary\n # (where it is called get_table).\n #\n # We have to change the value of one string: qualifier = 'obj ==>\n # qualifier = 'any'\n # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/autosummary/__init__.py#L392\n def format_table(self, items):\n \"\"\"Generate a proper list of table nodes for autosummary:: directive.\n\n *items* is a list produced by :meth:`get_items`.\n \"\"\"\n table_spec = addnodes.tabular_col_spec()\n table_spec[\"spec\"] = r\"\\X{1}{2}\\X{1}{2}\"\n\n table = autosummary_table(\"\")\n real_table = nodes.table(\"\", classes=[\"longtable\"])\n table.append(real_table)\n group = nodes.tgroup(\"\", cols=2)\n real_table.append(group)\n group.append(nodes.colspec(\"\", colwidth=10))\n group.append(nodes.colspec(\"\", colwidth=90))\n body = nodes.tbody(\"\")\n group.append(body)\n\n def append_row(*column_texts: str) -> None:\n row = nodes.row(\"\")\n source, line = self.state_machine.get_source_and_line()\n for text in column_texts:\n node = nodes.paragraph(\"\")\n vl = StringList()\n vl.append(text, \"%s:%d:<autosummary>\" % (source, line))\n with switch_source_input(self.state, vl):\n self.state.nested_parse(vl, 0, node)\n try:\n if isinstance(node[0], nodes.paragraph):\n node = node[0]\n except IndexError:\n pass\n row.append(nodes.entry(\"\", node))\n body.append(row)\n\n for name, sig, summary, real_name in items:\n qualifier = \"any\" # <== Only thing changed from autosummary version\n if \"nosignatures\" not in self.options:\n col1 = \":%s:`%s <%s>`\\\\ %s\" % (\n qualifier,\n name,\n real_name,\n rst.escape(sig),\n )\n else:\n col1 = \":%s:`%s <%s>`\" % (qualifier, name, real_name)\n col2 = summary\n append_row(col1, col2)\n\n return [table_spec, table]\n\n return JsDocSummary\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py"}], "after_files": [{"content": "from docutils import nodes\nfrom docutils.parsers.rst import Directive, Parser as RstParser\nfrom docutils.statemachine import StringList\nfrom docutils.utils import new_document\n\nfrom collections import OrderedDict\nimport re\n\nfrom sphinx import addnodes\nfrom sphinx.util import rst\nfrom sphinx.util.docutils import switch_source_input\nfrom sphinx.ext.autosummary import autosummary_table, extract_summary\n\nfrom sphinx_js.jsdoc import Analyzer as JsAnalyzer\nfrom sphinx_js.ir import Class, Function\nfrom sphinx_js.parsers import path_and_formal_params, PathVisitor\nfrom sphinx_js.renderers import (\n AutoFunctionRenderer,\n AutoAttributeRenderer,\n AutoClassRenderer,\n)\n\n\nclass PyodideAnalyzer:\n \"\"\"JsDoc automatically instantiates the JsAnalyzer. Rather than subclassing\n or monkey patching it, we use composition (see getattr impl).\n\n The main extra thing we do is reorganize the doclets based on our globals /\n functions / attributes scheme. This we use to subdivide the sections in our\n summary. We store these in the \"js_docs\" field which is the only field that\n we access later.\n \"\"\"\n\n def __init__(self, analyzer: JsAnalyzer) -> None:\n self.inner = analyzer\n self.create_js_doclets()\n\n def __getattr__(self, key):\n return getattr(self.inner, key)\n\n def longname_to_path(self, name):\n \"\"\"Convert the longname field produced by jsdoc to a path appropriate to use\n with _sphinxjs_analyzer.get_object. Based on:\n https://github.com/mozilla/sphinx-js/blob/3.1/sphinx_js/jsdoc.py#L181\n \"\"\"\n return PathVisitor().visit(path_and_formal_params[\"path\"].parse(name))\n\n def get_object_from_json(self, json):\n \"\"\"Look up the JsDoc IR object corresponding to this object. We use the\n \"kind\" field to decide whether the object is a \"function\" or an\n \"attribute\". We use longname_to_path to convert the path into a list of\n path components which JsAnalyzer.get_object requires.\n \"\"\"\n path = self.longname_to_path(json[\"longname\"])\n if json[\"kind\"] == \"function\":\n kind = \"function\"\n elif json[\"kind\"] == \"class\":\n kind = \"class\"\n else:\n kind = \"attribute\"\n obj = self.inner.get_object(path, kind)\n obj.kind = kind\n return obj\n\n def create_js_doclets(self):\n \"\"\"Search through the doclets generated by JsDoc and categorize them by\n summary section. Skip docs labeled as \"@private\".\n \"\"\"\n\n def get_val():\n return OrderedDict([[\"attribute\", []], [\"function\", []], [\"class\", []]])\n\n self.js_docs = {key: get_val() for key in [\"globals\", \"pyodide\", \"PyProxy\"]}\n items = {\"PyProxy\": []}\n for (key, group) in self._doclets_by_class.items():\n key = [x for x in key if \"/\" not in x]\n if key[-1] == \"PyBuffer\":\n # PyBuffer stuff is documented as a class. Would be nice to have\n # a less ad hoc way to deal with this...\n continue\n if key[-1] == \"globalThis\":\n items[\"globals\"] = group\n if key[0] == \"pyodide.\" and key[-1] == \"Module\":\n items[\"pyodide\"] = group\n if key[0] == \"pyproxy.\":\n items[\"PyProxy\"] += group\n\n for key, value in items.items():\n for json in value:\n if json.get(\"access\", None) == \"private\":\n continue\n obj = self.get_object_from_json(json)\n if isinstance(obj, Class):\n # sphinx-jsdoc messes up array types. Fix them.\n for x in obj.members:\n if hasattr(x, \"type\"):\n x.type = re.sub(\"Array\\.<([a-zA-Z_0-9]*)>\", r\"\\1[]\", x.type)\n if obj.name[0] == '\"' and obj.name[-1] == '\"':\n # sphinx-jsdoc messes up Symbol attributes. Fix them.\n obj.name = \"[\" + obj.name[1:-1] + \"]\"\n self.js_docs[key][obj.kind].append(obj)\n\n\ndef get_jsdoc_content_directive(app):\n \"\"\"These directives need to close over app \"\"\"\n\n class JsDocContent(Directive):\n \"\"\"A directive that just dumps a summary table in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def get_rst(self, obj):\n \"\"\"Grab the appropriate renderer and render us to rst.\n JsDoc also has an AutoClassRenderer which may be useful in the future.\"\"\"\n if isinstance(obj, Function):\n renderer = AutoFunctionRenderer\n elif isinstance(obj, Class):\n renderer = AutoClassRenderer\n else:\n renderer = AutoAttributeRenderer\n return renderer(\n self, app, arguments=[\"dummy\"], options={\"members\": [\"*\"]}\n ).rst([obj.name], obj, use_short_name=False)\n\n def get_rst_for_group(self, objects):\n return [self.get_rst(obj) for obj in objects]\n\n def parse_rst(self, rst):\n \"\"\"We produce a bunch of rst but directives are supposed to output\n docutils trees. This is a helper that converts the rst to docutils.\n \"\"\"\n settings = self.state.document.settings\n doc = new_document(\"\", settings)\n RstParser().parse(rst, doc)\n return doc.children\n\n def run(self):\n module = self.arguments[0]\n values = app._sphinxjs_analyzer.js_docs[module]\n rst = []\n rst.append([f\".. js:module:: {module}\"])\n for group in values.values():\n rst.append(self.get_rst_for_group(group))\n joined_rst = \"\\n\\n\".join([\"\\n\\n\".join(r) for r in rst])\n return self.parse_rst(joined_rst)\n\n return JsDocContent\n\n\ndef get_jsdoc_summary_directive(app):\n class JsDocSummary(Directive):\n \"\"\"A directive that just dumps the Js API docs in place. There are no\n options, it only prints the one thing, we control the behavior from\n here\n \"\"\"\n\n required_arguments = 1\n\n def run(self):\n result = []\n module = self.arguments[0]\n value = app._sphinxjs_analyzer.js_docs[module]\n for group_name, group_objects in value.items():\n if not group_objects:\n continue\n if group_name == \"class\":\n # Plural of class is \"classes\" not \"classs\"\n group_name += \"e\"\n result.append(self.format_heading(group_name.title() + \"s:\"))\n table_items = self.get_summary_table(module, group_objects)\n table_markup = self.format_table(table_items)\n result.extend(table_markup)\n return result\n\n def format_heading(self, text):\n \"\"\"Make a section heading. This corresponds to the rst: \"**Heading:**\"\n autodocsumm uses headings like that, so this will match that style.\n \"\"\"\n heading = nodes.paragraph(\"\")\n strong = nodes.strong(\"\")\n strong.append(nodes.Text(text))\n heading.append(strong)\n return heading\n\n def extract_summary(self, descr):\n \"\"\"Wrapper around autosummary extract_summary that is easier to use.\n It seems like colons need escaping for some reason.\n \"\"\"\n colon_esc = \"esccolon\\\\\\xafhoa:\"\n return extract_summary(\n [descr.replace(\":\", colon_esc)], self.state.document\n ).replace(colon_esc, \":\")\n\n def get_sig(self, obj):\n \"\"\"If the object is a function, get its signature (as figured by JsDoc)\"\"\"\n if isinstance(obj, Function):\n return AutoFunctionRenderer(\n self, app, arguments=[\"dummy\"]\n )._formal_params(obj)\n else:\n return \"\"\n\n def get_summary_row(self, pkgname, obj):\n \"\"\"Get the summary table row for obj.\n\n The output is designed to be input to format_table. The link name\n needs to be set up so that :any:`link_name` makes a link to the\n actual api docs for this object.\n \"\"\"\n sig = self.get_sig(obj)\n display_name = obj.name\n summary = self.extract_summary(obj.description)\n link_name = pkgname + \".\" + display_name\n return (display_name, sig, summary, link_name)\n\n def get_summary_table(self, pkgname, group):\n \"\"\"Get the data for a summary table. Return value is set up to be an\n argument of format_table.\n \"\"\"\n return [self.get_summary_row(pkgname, obj) for obj in group]\n\n # This following method is copied almost verbatim from autosummary\n # (where it is called get_table).\n #\n # We have to change the value of one string: qualifier = 'obj ==>\n # qualifier = 'any'\n # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/autosummary/__init__.py#L392\n def format_table(self, items):\n \"\"\"Generate a proper list of table nodes for autosummary:: directive.\n\n *items* is a list produced by :meth:`get_items`.\n \"\"\"\n table_spec = addnodes.tabular_col_spec()\n table_spec[\"spec\"] = r\"\\X{1}{2}\\X{1}{2}\"\n\n table = autosummary_table(\"\")\n real_table = nodes.table(\"\", classes=[\"longtable\"])\n table.append(real_table)\n group = nodes.tgroup(\"\", cols=2)\n real_table.append(group)\n group.append(nodes.colspec(\"\", colwidth=10))\n group.append(nodes.colspec(\"\", colwidth=90))\n body = nodes.tbody(\"\")\n group.append(body)\n\n def append_row(*column_texts: str) -> None:\n row = nodes.row(\"\")\n source, line = self.state_machine.get_source_and_line()\n for text in column_texts:\n node = nodes.paragraph(\"\")\n vl = StringList()\n vl.append(text, \"%s:%d:<autosummary>\" % (source, line))\n with switch_source_input(self.state, vl):\n self.state.nested_parse(vl, 0, node)\n try:\n if isinstance(node[0], nodes.paragraph):\n node = node[0]\n except IndexError:\n pass\n row.append(nodes.entry(\"\", node))\n body.append(row)\n\n for name, sig, summary, real_name in items:\n qualifier = \"any\" # <== Only thing changed from autosummary version\n if \"nosignatures\" not in self.options:\n col1 = \":%s:`%s <%s>`\\\\ %s\" % (\n qualifier,\n name,\n real_name,\n rst.escape(sig),\n )\n else:\n col1 = \":%s:`%s <%s>`\" % (qualifier, name, real_name)\n col2 = summary\n append_row(col1, col2)\n\n return [table_spec, table]\n\n return JsDocSummary\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/jsdoc.py"}]}
3,425
1,015
gh_patches_debug_39557
rasdani/github-patches
git_diff
geopandas__geopandas-1093
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add a show_versions function Similarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples. It could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `geopandas/tools/_show_versions.py` Content: ``` 1 import platform 2 import sys 3 import importlib 4 5 6 def _get_sys_info(): 7 """System information 8 9 Returns 10 ------- 11 sys_info : dict 12 system and Python version information 13 """ 14 python = sys.version.replace('\n', ' ') 15 16 blob = [ 17 ("python", python), 18 ('executable', sys.executable), 19 ("machine", platform.platform()), 20 ] 21 22 return dict(blob) 23 24 25 def _get_deps_info(): 26 """Overview of the installed version of main dependencies 27 28 Returns 29 ------- 30 deps_info: dict 31 version information on relevant Python libraries 32 """ 33 deps = [ 34 "geopandas", 35 "pandas", 36 "fiona", 37 "osgeo.gdal", 38 "numpy", 39 "shapely", 40 "rtree", 41 "pyproj", 42 "matplotlib", 43 "mapclassify", 44 "pysal", 45 "geopy", 46 "psycopg2", 47 "descartes" 48 ] 49 50 def get_version(module): 51 return module.__version__ 52 53 deps_info = {} 54 55 for modname in deps: 56 try: 57 if modname in sys.modules: 58 mod = sys.modules[modname] 59 else: 60 mod = importlib.import_module(modname) 61 ver = get_version(mod) 62 deps_info[modname] = ver 63 except ImportError: 64 deps_info[modname] = None 65 except AttributeError: 66 deps_info[modname] = None 67 68 return deps_info 69 70 71 def show_versions(): 72 """ 73 Print system information and installed module versions. 74 75 Example 76 ------- 77 > python -c "import geopandas; geopandas.show_versions()" 78 """ 79 sys_info = _get_sys_info() 80 deps_info = _get_deps_info() 81 82 maxlen = max(len(x) for x in deps_info) 83 tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen) 84 print("\nSYSTEM INFO") 85 print("-----------") 86 for k, stat in sys_info.items(): 87 print(tpl.format(k=k, stat=stat)) 88 print("\nPYTHON DEPENDENCIES") 89 print("-------------------") 90 for k, stat in deps_info.items(): 91 print(tpl.format(k=k, stat=stat)) 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py --- a/geopandas/tools/_show_versions.py +++ b/geopandas/tools/_show_versions.py @@ -22,6 +22,53 @@ return dict(blob) +def _get_C_info(): + """Information on system PROJ, GDAL, GEOS + Returns + ------- + c_info: dict + system PROJ information + """ + try: + import pyproj + from pyproj.exceptions import DataDirError + proj = pyproj.proj_version_str + try: + proj_dir = pyproj.datadir.get_data_dir() + except DataDirError: + proj_dir = None + except Exception: + proj = None + proj_dir = None + + try: + import shapely._buildcfg + geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version) + geos_dir = shapely._buildcfg.geos_library_path + except Exception: + geos = None + geos_dir = None + + try: + import fiona + gdal = fiona.env.get_gdal_release_name() + gdal_dir = fiona.env.GDALDataFinder().search() + except Exception: + gdal = None + gdal_dir = None + + blob = [ + ("GEOS", geos), + ("GEOS lib", geos_dir), + ("GDAL", gdal), + ("GDAL dir", gdal_dir), + ("PROJ", proj), + ("PROJ data dir", proj_dir) + ] + + return dict(blob) + + def _get_deps_info(): """Overview of the installed version of main dependencies @@ -34,7 +81,6 @@ "geopandas", "pandas", "fiona", - "osgeo.gdal", "numpy", "shapely", "rtree", @@ -43,8 +89,7 @@ "mapclassify", "pysal", "geopy", - "psycopg2", - "descartes" + "psycopg2" ] def get_version(module): @@ -60,9 +105,7 @@ mod = importlib.import_module(modname) ver = get_version(mod) deps_info[modname] = ver - except ImportError: - deps_info[modname] = None - except AttributeError: + except Exception: deps_info[modname] = None return deps_info @@ -78,6 +121,7 @@ """ sys_info = _get_sys_info() deps_info = _get_deps_info() + proj_info = _get_C_info() maxlen = max(len(x) for x in deps_info) tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen) @@ -85,6 +129,10 @@ print("-----------") for k, stat in sys_info.items(): print(tpl.format(k=k, stat=stat)) + print("\nGEOS, GDAL, PROJ INFO") + print("---------------------") + for k, stat in proj_info.items(): + print(tpl.format(k=k, stat=stat)) print("\nPYTHON DEPENDENCIES") print("-------------------") for k, stat in deps_info.items():
{"golden_diff": "diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py\n--- a/geopandas/tools/_show_versions.py\n+++ b/geopandas/tools/_show_versions.py\n@@ -22,6 +22,53 @@\n return dict(blob)\n \n \n+def _get_C_info():\n+ \"\"\"Information on system PROJ, GDAL, GEOS\n+ Returns\n+ -------\n+ c_info: dict\n+ system PROJ information\n+ \"\"\"\n+ try:\n+ import pyproj\n+ from pyproj.exceptions import DataDirError\n+ proj = pyproj.proj_version_str\n+ try:\n+ proj_dir = pyproj.datadir.get_data_dir()\n+ except DataDirError:\n+ proj_dir = None\n+ except Exception:\n+ proj = None\n+ proj_dir = None\n+\n+ try:\n+ import shapely._buildcfg\n+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n+ geos_dir = shapely._buildcfg.geos_library_path\n+ except Exception:\n+ geos = None\n+ geos_dir = None\n+\n+ try:\n+ import fiona\n+ gdal = fiona.env.get_gdal_release_name()\n+ gdal_dir = fiona.env.GDALDataFinder().search()\n+ except Exception:\n+ gdal = None\n+ gdal_dir = None\n+\n+ blob = [\n+ (\"GEOS\", geos),\n+ (\"GEOS lib\", geos_dir),\n+ (\"GDAL\", gdal),\n+ (\"GDAL dir\", gdal_dir),\n+ (\"PROJ\", proj),\n+ (\"PROJ data dir\", proj_dir)\n+ ]\n+\n+ return dict(blob)\n+\n+\n def _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n \n@@ -34,7 +81,6 @@\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n- \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n@@ -43,8 +89,7 @@\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n- \"psycopg2\",\n- \"descartes\"\n+ \"psycopg2\"\n ]\n \n def get_version(module):\n@@ -60,9 +105,7 @@\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n- except ImportError:\n- deps_info[modname] = None\n- except AttributeError:\n+ except Exception:\n deps_info[modname] = None\n \n return deps_info\n@@ -78,6 +121,7 @@\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n+ proj_info = _get_C_info()\n \n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n@@ -85,6 +129,10 @@\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n+ print(\"\\nGEOS, GDAL, PROJ INFO\")\n+ print(\"---------------------\")\n+ for k, stat in proj_info.items():\n+ print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n", "issue": "Add a show_versions function\nSimilarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples. \r\n\r\nIt could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.\n", "before_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\",\n \"descartes\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except ImportError:\n deps_info[modname] = None\n except AttributeError:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}], "after_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_C_info():\n \"\"\"Information on system PROJ, GDAL, GEOS\n Returns\n -------\n c_info: dict\n system PROJ information\n \"\"\"\n try:\n import pyproj\n from pyproj.exceptions import DataDirError\n proj = pyproj.proj_version_str\n try:\n proj_dir = pyproj.datadir.get_data_dir()\n except DataDirError:\n proj_dir = None\n except Exception:\n proj = None\n proj_dir = None\n\n try:\n import shapely._buildcfg\n geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n geos_dir = shapely._buildcfg.geos_library_path\n except Exception:\n geos = None\n geos_dir = None\n\n try:\n import fiona\n gdal = fiona.env.get_gdal_release_name()\n gdal_dir = fiona.env.GDALDataFinder().search()\n except Exception:\n gdal = None\n gdal_dir = None\n\n blob = [\n (\"GEOS\", geos),\n (\"GEOS lib\", geos_dir),\n (\"GDAL\", gdal),\n (\"GDAL dir\", gdal_dir),\n (\"PROJ\", proj),\n (\"PROJ data dir\", proj_dir)\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except Exception:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n proj_info = _get_C_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nGEOS, GDAL, PROJ INFO\")\n print(\"---------------------\")\n for k, stat in proj_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}]}
1,000
795
gh_patches_debug_32877
rasdani/github-patches
git_diff
sopel-irc__sopel-1425
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dice: .choose output is messy I started this thought process after noticing that `.choose thing1, thing2, thing3` will yield choices that include the leading space (`thing1`, ` thing2`, and ` thing3`), but it's not that simple. Since `.choose` just uses a simple `str.split()` as-is, without cleaning the values at all, it's already starting with messy data. Then it swaps the delimiter used for no particular reason ([comment claims](https://github.com/sopel-irc/sopel/blob/d73a410717d6cb2b164bf5411f20a995210bbc22/sopel/modules/dice.py#L248) it's "to prevent ambiguity", but I think it actually does the opposite). What comes out the other end: ``` <dgw> .choose thing1, thing2, thing3 <Sopel> dgw: Your options: thing1| thing2| thing3. My choice: thing2 ``` Extra spaces everywhere, delimiters that don't really make sense with the spacing around them… Ugly. Current plans to sort this out: - [x] **In 6.6.0:** Clean the input to avoid extra spaces in the output. ~~Maybe switch to always use `, `&nbsp;as delimiter in output.~~ [#1421] - [x] **In 7.0.0:** ~~Drop support for everything but `,` (and maybe `/`) in the input, and~~ always use `, `&nbsp;in output ~~(if not already changed for 6.6.0)~~. [#1425] There's considerable lead time before Sopel 7, so any user objections to dropping `\` and `|` as supported delimiters may be dropped in the comments here, or in the comments of the PR I will open with the change some time in early 2019, after 6.6.0 is out. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/dice.py` Content: ``` 1 # coding=utf-8 2 """ 3 dice.py - Dice Module 4 Copyright 2010-2013, Dimitri "Tyrope" Molenaars, TyRope.nl 5 Copyright 2013, Ari Koivula, <[email protected]> 6 Licensed under the Eiffel Forum License 2. 7 8 https://sopel.chat/ 9 """ 10 from __future__ import unicode_literals, absolute_import, print_function, division 11 12 import random 13 import re 14 import operator 15 16 import sopel.module 17 from sopel.tools.calculation import eval_equation 18 19 20 class DicePouch: 21 def __init__(self, num_of_die, type_of_die, addition): 22 """Initialize dice pouch and roll the dice. 23 24 Args: 25 num_of_die: number of dice in the pouch. 26 type_of_die: how many faces the dice have. 27 addition: how much is added to the result of the dice. 28 """ 29 self.num = num_of_die 30 self.type = type_of_die 31 self.addition = addition 32 33 self.dice = {} 34 self.dropped = {} 35 36 self.roll_dice() 37 38 def roll_dice(self): 39 """Roll all the dice in the pouch.""" 40 self.dice = {} 41 self.dropped = {} 42 for __ in range(self.num): 43 number = random.randint(1, self.type) 44 count = self.dice.setdefault(number, 0) 45 self.dice[number] = count + 1 46 47 def drop_lowest(self, n): 48 """Drop n lowest dice from the result. 49 50 Args: 51 n: the number of dice to drop. 52 """ 53 54 sorted_x = sorted(self.dice.items(), key=operator.itemgetter(0)) 55 56 for i, count in sorted_x: 57 count = self.dice[i] 58 if n == 0: 59 break 60 elif n < count: 61 self.dice[i] = count - n 62 self.dropped[i] = n 63 break 64 else: 65 self.dice[i] = 0 66 self.dropped[i] = count 67 n = n - count 68 69 for i, count in self.dropped.items(): 70 if self.dice[i] == 0: 71 del self.dice[i] 72 73 def get_simple_string(self): 74 """Return the values of the dice like (2+2+2[+1+1])+1.""" 75 dice = self.dice.items() 76 faces = ("+".join([str(face)] * times) for face, times in dice) 77 dice_str = "+".join(faces) 78 79 dropped_str = "" 80 if self.dropped: 81 dropped = self.dropped.items() 82 dfaces = ("+".join([str(face)] * times) for face, times in dropped) 83 dropped_str = "[+%s]" % ("+".join(dfaces),) 84 85 plus_str = "" 86 if self.addition: 87 plus_str = "{:+d}".format(self.addition) 88 89 return "(%s%s)%s" % (dice_str, dropped_str, plus_str) 90 91 def get_compressed_string(self): 92 """Return the values of the dice like (3x2[+2x1])+1.""" 93 dice = self.dice.items() 94 faces = ("%dx%d" % (times, face) for face, times in dice) 95 dice_str = "+".join(faces) 96 97 dropped_str = "" 98 if self.dropped: 99 dropped = self.dropped.items() 100 dfaces = ("%dx%d" % (times, face) for face, times in dropped) 101 dropped_str = "[+%s]" % ("+".join(dfaces),) 102 103 plus_str = "" 104 if self.addition: 105 plus_str = "{:+d}".format(self.addition) 106 107 return "(%s%s)%s" % (dice_str, dropped_str, plus_str) 108 109 def get_sum(self): 110 """Get the sum of non-dropped dice and the addition.""" 111 result = self.addition 112 for face, times in self.dice.items(): 113 result += face * times 114 return result 115 116 def get_number_of_faces(self): 117 """Returns sum of different faces for dropped and not dropped dice 118 119 This can be used to estimate, whether the result can be shown in 120 compressed form in a reasonable amount of space. 121 """ 122 return len(self.dice) + len(self.dropped) 123 124 125 def _roll_dice(bot, dice_expression): 126 result = re.search( 127 r""" 128 (?P<dice_num>-?\d*) 129 d 130 (?P<dice_type>-?\d+) 131 (v(?P<drop_lowest>-?\d+))? 132 $""", 133 dice_expression, 134 re.IGNORECASE | re.VERBOSE) 135 136 dice_num = int(result.group('dice_num') or 1) 137 dice_type = int(result.group('dice_type')) 138 139 # Dice can't have zero or a negative number of sides. 140 if dice_type <= 0: 141 bot.reply("I don't have any dice with %d sides. =(" % dice_type) 142 return None # Signal there was a problem 143 144 # Can't roll a negative number of dice. 145 if dice_num < 0: 146 bot.reply("I'd rather not roll a negative amount of dice. =(") 147 return None # Signal there was a problem 148 149 # Upper limit for dice should be at most a million. Creating a dict with 150 # more than a million elements already takes a noticeable amount of time 151 # on a fast computer and ~55kB of memory. 152 if dice_num > 1000: 153 bot.reply('I only have 1000 dice. =(') 154 return None # Signal there was a problem 155 156 dice = DicePouch(dice_num, dice_type, 0) 157 158 if result.group('drop_lowest'): 159 drop = int(result.group('drop_lowest')) 160 if drop >= 0: 161 dice.drop_lowest(drop) 162 else: 163 bot.reply("I can't drop the lowest %d dice. =(" % drop) 164 165 return dice 166 167 168 @sopel.module.commands("roll") 169 @sopel.module.commands("dice") 170 @sopel.module.commands("d") 171 @sopel.module.priority("medium") 172 @sopel.module.example(".roll 3d1+1", 'You roll 3d1+1: (1+1+1)+1 = 4') 173 @sopel.module.example(".roll 3d1v2+1", 'You roll 3d1v2+1: (1[+1+1])+1 = 2') 174 @sopel.module.example(".roll 2d4", r'You roll 2d4: \(\d\+\d\) = \d', re=True) 175 @sopel.module.example(".roll 100d1", r'[^:]*: \(100x1\) = 100', re=True) 176 @sopel.module.example(".roll 1001d1", 'I only have 1000 dice. =(') 177 @sopel.module.example(".roll 1d1 + 1d1", 'You roll 1d1 + 1d1: (1) + (1) = 2') 178 @sopel.module.example(".roll 1d1+1d1", 'You roll 1d1+1d1: (1)+(1) = 2') 179 def roll(bot, trigger): 180 """.dice XdY[vZ][+N], rolls dice and reports the result. 181 182 X is the number of dice. Y is the number of faces in the dice. Z is the 183 number of lowest dice to be dropped from the result. N is the constant to 184 be applied to the end result. 185 """ 186 # This regexp is only allowed to have one captured group, because having 187 # more would alter the output of re.findall. 188 dice_regexp = r"-?\d*[dD]-?\d+(?:[vV]-?\d+)?" 189 190 # Get a list of all dice expressions, evaluate them and then replace the 191 # expressions in the original string with the results. Replacing is done 192 # using string formatting, so %-characters must be escaped. 193 if not trigger.group(2): 194 return bot.reply("No dice to roll.") 195 arg_str = trigger.group(2) 196 dice_expressions = re.findall(dice_regexp, arg_str) 197 arg_str = arg_str.replace("%", "%%") 198 arg_str = re.sub(dice_regexp, "%s", arg_str) 199 200 f = lambda dice_expr: _roll_dice(bot, dice_expr) 201 dice = list(map(f, dice_expressions)) 202 203 if None in dice: 204 # Stop computing roll if there was a problem rolling dice. 205 return 206 207 def _get_eval_str(dice): 208 return "(%d)" % (dice.get_sum(),) 209 210 def _get_pretty_str(dice): 211 if dice.num <= 10: 212 return dice.get_simple_string() 213 elif dice.get_number_of_faces() <= 10: 214 return dice.get_compressed_string() 215 else: 216 return "(...)" 217 218 eval_str = arg_str % (tuple(map(_get_eval_str, dice))) 219 pretty_str = arg_str % (tuple(map(_get_pretty_str, dice))) 220 221 try: 222 result = eval_equation(eval_str) 223 except TypeError: 224 bot.reply("The type of this equation is, apparently, not a string. " + 225 "How did you do that, anyway?") 226 except ValueError: 227 # As it seems that ValueError is raised if the resulting equation would 228 # be too big, give a semi-serious answer to reflect on this. 229 bot.reply("You roll %s: %s = very big" % ( 230 trigger.group(2), pretty_str)) 231 return 232 except (SyntaxError, eval_equation.Error): 233 bot.reply("I don't know how to process that. " + 234 "Are the dice as well as the algorithms correct?") 235 return 236 237 bot.reply("You roll %s: %s = %d" % ( 238 trigger.group(2), pretty_str, result)) 239 240 241 @sopel.module.commands("choice") 242 @sopel.module.commands("ch") 243 @sopel.module.commands("choose") 244 @sopel.module.priority("medium") 245 def choose(bot, trigger): 246 """ 247 .choice option1|option2|option3 - Makes a difficult choice easy. 248 """ 249 if not trigger.group(2): 250 return bot.reply('I\'d choose an option, but you didn\'t give me any.') 251 choices = [trigger.group(2)] 252 for delim in '|\\/,': 253 choices = trigger.group(2).split(delim) 254 if len(choices) > 1: 255 break 256 choices = [choice.strip() for choice in choices] 257 # Use a different delimiter in the output, to prevent ambiguity. 258 for show_delim in ',|/\\': 259 if show_delim not in trigger.group(2): 260 show_delim += ' ' 261 break 262 263 pick = random.choice(choices) 264 return bot.reply('Your options: %s. My choice: %s' % (show_delim.join(choices), pick)) 265 266 267 if __name__ == "__main__": 268 from sopel.test_tools import run_example_tests 269 run_example_tests(__file__) 270 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/dice.py b/sopel/modules/dice.py --- a/sopel/modules/dice.py +++ b/sopel/modules/dice.py @@ -242,6 +242,15 @@ @sopel.module.commands("ch") @sopel.module.commands("choose") @sopel.module.priority("medium") [email protected](".choose a, b, c", r'Your options: a, b, c. My choice: (a|b|c)', re=True) [email protected](".choose a | b | c", r'Your options: a, b, c. My choice: (a|b|c)', re=True) [email protected](".choose a,b,c", r'Your options: a, b, c. My choice: (a|b|c)', re=True) [email protected](".choose a|b|c", r'Your options: a, b, c. My choice: (a|b|c)', re=True) [email protected](".choose a b c", r'Your options: a, b, c. My choice: (a|b|c)', re=True) [email protected](".choose a, b | just a", + r'Your options: "a, b", just a. My choice: ((a, b)|(just a))', + re=True) [email protected](".choose a", 'Your options: a. My choice: a') def choose(bot, trigger): """ .choice option1|option2|option3 - Makes a difficult choice easy. @@ -249,19 +258,19 @@ if not trigger.group(2): return bot.reply('I\'d choose an option, but you didn\'t give me any.') choices = [trigger.group(2)] - for delim in '|\\/,': + for delim in '|\\/, ': choices = trigger.group(2).split(delim) if len(choices) > 1: break choices = [choice.strip() for choice in choices] - # Use a different delimiter in the output, to prevent ambiguity. - for show_delim in ',|/\\': - if show_delim not in trigger.group(2): - show_delim += ' ' - break - pick = random.choice(choices) - return bot.reply('Your options: %s. My choice: %s' % (show_delim.join(choices), pick)) + + # Always use a comma in the output + display_options = ', '.join( + choice if ',' not in choice else '"%s"' % choice + for choice in choices + ) + return bot.reply('Your options: %s. My choice: %s' % (display_options, pick)) if __name__ == "__main__":
{"golden_diff": "diff --git a/sopel/modules/dice.py b/sopel/modules/dice.py\n--- a/sopel/modules/dice.py\n+++ b/sopel/modules/dice.py\n@@ -242,6 +242,15 @@\n @sopel.module.commands(\"ch\")\n @sopel.module.commands(\"choose\")\n @sopel.module.priority(\"medium\")\[email protected](\".choose a, b, c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a | b | c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a,b,c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a|b|c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a b c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a, b | just a\",\n+ r'Your options: \"a, b\", just a. My choice: ((a, b)|(just a))',\n+ re=True)\[email protected](\".choose a\", 'Your options: a. My choice: a')\n def choose(bot, trigger):\n \"\"\"\n .choice option1|option2|option3 - Makes a difficult choice easy.\n@@ -249,19 +258,19 @@\n if not trigger.group(2):\n return bot.reply('I\\'d choose an option, but you didn\\'t give me any.')\n choices = [trigger.group(2)]\n- for delim in '|\\\\/,':\n+ for delim in '|\\\\/, ':\n choices = trigger.group(2).split(delim)\n if len(choices) > 1:\n break\n choices = [choice.strip() for choice in choices]\n- # Use a different delimiter in the output, to prevent ambiguity.\n- for show_delim in ',|/\\\\':\n- if show_delim not in trigger.group(2):\n- show_delim += ' '\n- break\n-\n pick = random.choice(choices)\n- return bot.reply('Your options: %s. My choice: %s' % (show_delim.join(choices), pick))\n+\n+ # Always use a comma in the output\n+ display_options = ', '.join(\n+ choice if ',' not in choice else '\"%s\"' % choice\n+ for choice in choices\n+ )\n+ return bot.reply('Your options: %s. My choice: %s' % (display_options, pick))\n \n \n if __name__ == \"__main__\":\n", "issue": "dice: .choose output is messy\nI started this thought process after noticing that `.choose thing1, thing2, thing3` will yield choices that include the leading space (`thing1`, ` thing2`, and ` thing3`), but it's not that simple.\r\n\r\nSince `.choose` just uses a simple `str.split()` as-is, without cleaning the values at all, it's already starting with messy data. Then it swaps the delimiter used for no particular reason ([comment claims](https://github.com/sopel-irc/sopel/blob/d73a410717d6cb2b164bf5411f20a995210bbc22/sopel/modules/dice.py#L248) it's \"to prevent ambiguity\", but I think it actually does the opposite).\r\n\r\nWhat comes out the other end:\r\n\r\n```\r\n<dgw> .choose thing1, thing2, thing3\r\n<Sopel> dgw: Your options: thing1| thing2| thing3. My choice: thing2\r\n```\r\n\r\nExtra spaces everywhere, delimiters that don't really make sense with the spacing around them\u2026 Ugly.\r\n\r\nCurrent plans to sort this out:\r\n\r\n - [x] **In 6.6.0:** Clean the input to avoid extra spaces in the output. ~~Maybe switch to always use `, `&nbsp;as delimiter in output.~~ [#1421]\r\n - [x] **In 7.0.0:** ~~Drop support for everything but `,` (and maybe `/`) in the input, and~~ always use `, `&nbsp;in output ~~(if not already changed for 6.6.0)~~. [#1425]\r\n\r\nThere's considerable lead time before Sopel 7, so any user objections to dropping `\\` and `|` as supported delimiters may be dropped in the comments here, or in the comments of the PR I will open with the change some time in early 2019, after 6.6.0 is out.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\ndice.py - Dice Module\nCopyright 2010-2013, Dimitri \"Tyrope\" Molenaars, TyRope.nl\nCopyright 2013, Ari Koivula, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat/\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport random\nimport re\nimport operator\n\nimport sopel.module\nfrom sopel.tools.calculation import eval_equation\n\n\nclass DicePouch:\n def __init__(self, num_of_die, type_of_die, addition):\n \"\"\"Initialize dice pouch and roll the dice.\n\n Args:\n num_of_die: number of dice in the pouch.\n type_of_die: how many faces the dice have.\n addition: how much is added to the result of the dice.\n \"\"\"\n self.num = num_of_die\n self.type = type_of_die\n self.addition = addition\n\n self.dice = {}\n self.dropped = {}\n\n self.roll_dice()\n\n def roll_dice(self):\n \"\"\"Roll all the dice in the pouch.\"\"\"\n self.dice = {}\n self.dropped = {}\n for __ in range(self.num):\n number = random.randint(1, self.type)\n count = self.dice.setdefault(number, 0)\n self.dice[number] = count + 1\n\n def drop_lowest(self, n):\n \"\"\"Drop n lowest dice from the result.\n\n Args:\n n: the number of dice to drop.\n \"\"\"\n\n sorted_x = sorted(self.dice.items(), key=operator.itemgetter(0))\n\n for i, count in sorted_x:\n count = self.dice[i]\n if n == 0:\n break\n elif n < count:\n self.dice[i] = count - n\n self.dropped[i] = n\n break\n else:\n self.dice[i] = 0\n self.dropped[i] = count\n n = n - count\n\n for i, count in self.dropped.items():\n if self.dice[i] == 0:\n del self.dice[i]\n\n def get_simple_string(self):\n \"\"\"Return the values of the dice like (2+2+2[+1+1])+1.\"\"\"\n dice = self.dice.items()\n faces = (\"+\".join([str(face)] * times) for face, times in dice)\n dice_str = \"+\".join(faces)\n\n dropped_str = \"\"\n if self.dropped:\n dropped = self.dropped.items()\n dfaces = (\"+\".join([str(face)] * times) for face, times in dropped)\n dropped_str = \"[+%s]\" % (\"+\".join(dfaces),)\n\n plus_str = \"\"\n if self.addition:\n plus_str = \"{:+d}\".format(self.addition)\n\n return \"(%s%s)%s\" % (dice_str, dropped_str, plus_str)\n\n def get_compressed_string(self):\n \"\"\"Return the values of the dice like (3x2[+2x1])+1.\"\"\"\n dice = self.dice.items()\n faces = (\"%dx%d\" % (times, face) for face, times in dice)\n dice_str = \"+\".join(faces)\n\n dropped_str = \"\"\n if self.dropped:\n dropped = self.dropped.items()\n dfaces = (\"%dx%d\" % (times, face) for face, times in dropped)\n dropped_str = \"[+%s]\" % (\"+\".join(dfaces),)\n\n plus_str = \"\"\n if self.addition:\n plus_str = \"{:+d}\".format(self.addition)\n\n return \"(%s%s)%s\" % (dice_str, dropped_str, plus_str)\n\n def get_sum(self):\n \"\"\"Get the sum of non-dropped dice and the addition.\"\"\"\n result = self.addition\n for face, times in self.dice.items():\n result += face * times\n return result\n\n def get_number_of_faces(self):\n \"\"\"Returns sum of different faces for dropped and not dropped dice\n\n This can be used to estimate, whether the result can be shown in\n compressed form in a reasonable amount of space.\n \"\"\"\n return len(self.dice) + len(self.dropped)\n\n\ndef _roll_dice(bot, dice_expression):\n result = re.search(\n r\"\"\"\n (?P<dice_num>-?\\d*)\n d\n (?P<dice_type>-?\\d+)\n (v(?P<drop_lowest>-?\\d+))?\n $\"\"\",\n dice_expression,\n re.IGNORECASE | re.VERBOSE)\n\n dice_num = int(result.group('dice_num') or 1)\n dice_type = int(result.group('dice_type'))\n\n # Dice can't have zero or a negative number of sides.\n if dice_type <= 0:\n bot.reply(\"I don't have any dice with %d sides. =(\" % dice_type)\n return None # Signal there was a problem\n\n # Can't roll a negative number of dice.\n if dice_num < 0:\n bot.reply(\"I'd rather not roll a negative amount of dice. =(\")\n return None # Signal there was a problem\n\n # Upper limit for dice should be at most a million. Creating a dict with\n # more than a million elements already takes a noticeable amount of time\n # on a fast computer and ~55kB of memory.\n if dice_num > 1000:\n bot.reply('I only have 1000 dice. =(')\n return None # Signal there was a problem\n\n dice = DicePouch(dice_num, dice_type, 0)\n\n if result.group('drop_lowest'):\n drop = int(result.group('drop_lowest'))\n if drop >= 0:\n dice.drop_lowest(drop)\n else:\n bot.reply(\"I can't drop the lowest %d dice. =(\" % drop)\n\n return dice\n\n\[email protected](\"roll\")\[email protected](\"dice\")\[email protected](\"d\")\[email protected](\"medium\")\[email protected](\".roll 3d1+1\", 'You roll 3d1+1: (1+1+1)+1 = 4')\[email protected](\".roll 3d1v2+1\", 'You roll 3d1v2+1: (1[+1+1])+1 = 2')\[email protected](\".roll 2d4\", r'You roll 2d4: \\(\\d\\+\\d\\) = \\d', re=True)\[email protected](\".roll 100d1\", r'[^:]*: \\(100x1\\) = 100', re=True)\[email protected](\".roll 1001d1\", 'I only have 1000 dice. =(')\[email protected](\".roll 1d1 + 1d1\", 'You roll 1d1 + 1d1: (1) + (1) = 2')\[email protected](\".roll 1d1+1d1\", 'You roll 1d1+1d1: (1)+(1) = 2')\ndef roll(bot, trigger):\n \"\"\".dice XdY[vZ][+N], rolls dice and reports the result.\n\n X is the number of dice. Y is the number of faces in the dice. Z is the\n number of lowest dice to be dropped from the result. N is the constant to\n be applied to the end result.\n \"\"\"\n # This regexp is only allowed to have one captured group, because having\n # more would alter the output of re.findall.\n dice_regexp = r\"-?\\d*[dD]-?\\d+(?:[vV]-?\\d+)?\"\n\n # Get a list of all dice expressions, evaluate them and then replace the\n # expressions in the original string with the results. Replacing is done\n # using string formatting, so %-characters must be escaped.\n if not trigger.group(2):\n return bot.reply(\"No dice to roll.\")\n arg_str = trigger.group(2)\n dice_expressions = re.findall(dice_regexp, arg_str)\n arg_str = arg_str.replace(\"%\", \"%%\")\n arg_str = re.sub(dice_regexp, \"%s\", arg_str)\n\n f = lambda dice_expr: _roll_dice(bot, dice_expr)\n dice = list(map(f, dice_expressions))\n\n if None in dice:\n # Stop computing roll if there was a problem rolling dice.\n return\n\n def _get_eval_str(dice):\n return \"(%d)\" % (dice.get_sum(),)\n\n def _get_pretty_str(dice):\n if dice.num <= 10:\n return dice.get_simple_string()\n elif dice.get_number_of_faces() <= 10:\n return dice.get_compressed_string()\n else:\n return \"(...)\"\n\n eval_str = arg_str % (tuple(map(_get_eval_str, dice)))\n pretty_str = arg_str % (tuple(map(_get_pretty_str, dice)))\n\n try:\n result = eval_equation(eval_str)\n except TypeError:\n bot.reply(\"The type of this equation is, apparently, not a string. \" +\n \"How did you do that, anyway?\")\n except ValueError:\n # As it seems that ValueError is raised if the resulting equation would\n # be too big, give a semi-serious answer to reflect on this.\n bot.reply(\"You roll %s: %s = very big\" % (\n trigger.group(2), pretty_str))\n return\n except (SyntaxError, eval_equation.Error):\n bot.reply(\"I don't know how to process that. \" +\n \"Are the dice as well as the algorithms correct?\")\n return\n\n bot.reply(\"You roll %s: %s = %d\" % (\n trigger.group(2), pretty_str, result))\n\n\[email protected](\"choice\")\[email protected](\"ch\")\[email protected](\"choose\")\[email protected](\"medium\")\ndef choose(bot, trigger):\n \"\"\"\n .choice option1|option2|option3 - Makes a difficult choice easy.\n \"\"\"\n if not trigger.group(2):\n return bot.reply('I\\'d choose an option, but you didn\\'t give me any.')\n choices = [trigger.group(2)]\n for delim in '|\\\\/,':\n choices = trigger.group(2).split(delim)\n if len(choices) > 1:\n break\n choices = [choice.strip() for choice in choices]\n # Use a different delimiter in the output, to prevent ambiguity.\n for show_delim in ',|/\\\\':\n if show_delim not in trigger.group(2):\n show_delim += ' '\n break\n\n pick = random.choice(choices)\n return bot.reply('Your options: %s. My choice: %s' % (show_delim.join(choices), pick))\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/dice.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\ndice.py - Dice Module\nCopyright 2010-2013, Dimitri \"Tyrope\" Molenaars, TyRope.nl\nCopyright 2013, Ari Koivula, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat/\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport random\nimport re\nimport operator\n\nimport sopel.module\nfrom sopel.tools.calculation import eval_equation\n\n\nclass DicePouch:\n def __init__(self, num_of_die, type_of_die, addition):\n \"\"\"Initialize dice pouch and roll the dice.\n\n Args:\n num_of_die: number of dice in the pouch.\n type_of_die: how many faces the dice have.\n addition: how much is added to the result of the dice.\n \"\"\"\n self.num = num_of_die\n self.type = type_of_die\n self.addition = addition\n\n self.dice = {}\n self.dropped = {}\n\n self.roll_dice()\n\n def roll_dice(self):\n \"\"\"Roll all the dice in the pouch.\"\"\"\n self.dice = {}\n self.dropped = {}\n for __ in range(self.num):\n number = random.randint(1, self.type)\n count = self.dice.setdefault(number, 0)\n self.dice[number] = count + 1\n\n def drop_lowest(self, n):\n \"\"\"Drop n lowest dice from the result.\n\n Args:\n n: the number of dice to drop.\n \"\"\"\n\n sorted_x = sorted(self.dice.items(), key=operator.itemgetter(0))\n\n for i, count in sorted_x:\n count = self.dice[i]\n if n == 0:\n break\n elif n < count:\n self.dice[i] = count - n\n self.dropped[i] = n\n break\n else:\n self.dice[i] = 0\n self.dropped[i] = count\n n = n - count\n\n for i, count in self.dropped.items():\n if self.dice[i] == 0:\n del self.dice[i]\n\n def get_simple_string(self):\n \"\"\"Return the values of the dice like (2+2+2[+1+1])+1.\"\"\"\n dice = self.dice.items()\n faces = (\"+\".join([str(face)] * times) for face, times in dice)\n dice_str = \"+\".join(faces)\n\n dropped_str = \"\"\n if self.dropped:\n dropped = self.dropped.items()\n dfaces = (\"+\".join([str(face)] * times) for face, times in dropped)\n dropped_str = \"[+%s]\" % (\"+\".join(dfaces),)\n\n plus_str = \"\"\n if self.addition:\n plus_str = \"{:+d}\".format(self.addition)\n\n return \"(%s%s)%s\" % (dice_str, dropped_str, plus_str)\n\n def get_compressed_string(self):\n \"\"\"Return the values of the dice like (3x2[+2x1])+1.\"\"\"\n dice = self.dice.items()\n faces = (\"%dx%d\" % (times, face) for face, times in dice)\n dice_str = \"+\".join(faces)\n\n dropped_str = \"\"\n if self.dropped:\n dropped = self.dropped.items()\n dfaces = (\"%dx%d\" % (times, face) for face, times in dropped)\n dropped_str = \"[+%s]\" % (\"+\".join(dfaces),)\n\n plus_str = \"\"\n if self.addition:\n plus_str = \"{:+d}\".format(self.addition)\n\n return \"(%s%s)%s\" % (dice_str, dropped_str, plus_str)\n\n def get_sum(self):\n \"\"\"Get the sum of non-dropped dice and the addition.\"\"\"\n result = self.addition\n for face, times in self.dice.items():\n result += face * times\n return result\n\n def get_number_of_faces(self):\n \"\"\"Returns sum of different faces for dropped and not dropped dice\n\n This can be used to estimate, whether the result can be shown in\n compressed form in a reasonable amount of space.\n \"\"\"\n return len(self.dice) + len(self.dropped)\n\n\ndef _roll_dice(bot, dice_expression):\n result = re.search(\n r\"\"\"\n (?P<dice_num>-?\\d*)\n d\n (?P<dice_type>-?\\d+)\n (v(?P<drop_lowest>-?\\d+))?\n $\"\"\",\n dice_expression,\n re.IGNORECASE | re.VERBOSE)\n\n dice_num = int(result.group('dice_num') or 1)\n dice_type = int(result.group('dice_type'))\n\n # Dice can't have zero or a negative number of sides.\n if dice_type <= 0:\n bot.reply(\"I don't have any dice with %d sides. =(\" % dice_type)\n return None # Signal there was a problem\n\n # Can't roll a negative number of dice.\n if dice_num < 0:\n bot.reply(\"I'd rather not roll a negative amount of dice. =(\")\n return None # Signal there was a problem\n\n # Upper limit for dice should be at most a million. Creating a dict with\n # more than a million elements already takes a noticeable amount of time\n # on a fast computer and ~55kB of memory.\n if dice_num > 1000:\n bot.reply('I only have 1000 dice. =(')\n return None # Signal there was a problem\n\n dice = DicePouch(dice_num, dice_type, 0)\n\n if result.group('drop_lowest'):\n drop = int(result.group('drop_lowest'))\n if drop >= 0:\n dice.drop_lowest(drop)\n else:\n bot.reply(\"I can't drop the lowest %d dice. =(\" % drop)\n\n return dice\n\n\[email protected](\"roll\")\[email protected](\"dice\")\[email protected](\"d\")\[email protected](\"medium\")\[email protected](\".roll 3d1+1\", 'You roll 3d1+1: (1+1+1)+1 = 4')\[email protected](\".roll 3d1v2+1\", 'You roll 3d1v2+1: (1[+1+1])+1 = 2')\[email protected](\".roll 2d4\", r'You roll 2d4: \\(\\d\\+\\d\\) = \\d', re=True)\[email protected](\".roll 100d1\", r'[^:]*: \\(100x1\\) = 100', re=True)\[email protected](\".roll 1001d1\", 'I only have 1000 dice. =(')\[email protected](\".roll 1d1 + 1d1\", 'You roll 1d1 + 1d1: (1) + (1) = 2')\[email protected](\".roll 1d1+1d1\", 'You roll 1d1+1d1: (1)+(1) = 2')\ndef roll(bot, trigger):\n \"\"\".dice XdY[vZ][+N], rolls dice and reports the result.\n\n X is the number of dice. Y is the number of faces in the dice. Z is the\n number of lowest dice to be dropped from the result. N is the constant to\n be applied to the end result.\n \"\"\"\n # This regexp is only allowed to have one captured group, because having\n # more would alter the output of re.findall.\n dice_regexp = r\"-?\\d*[dD]-?\\d+(?:[vV]-?\\d+)?\"\n\n # Get a list of all dice expressions, evaluate them and then replace the\n # expressions in the original string with the results. Replacing is done\n # using string formatting, so %-characters must be escaped.\n if not trigger.group(2):\n return bot.reply(\"No dice to roll.\")\n arg_str = trigger.group(2)\n dice_expressions = re.findall(dice_regexp, arg_str)\n arg_str = arg_str.replace(\"%\", \"%%\")\n arg_str = re.sub(dice_regexp, \"%s\", arg_str)\n\n f = lambda dice_expr: _roll_dice(bot, dice_expr)\n dice = list(map(f, dice_expressions))\n\n if None in dice:\n # Stop computing roll if there was a problem rolling dice.\n return\n\n def _get_eval_str(dice):\n return \"(%d)\" % (dice.get_sum(),)\n\n def _get_pretty_str(dice):\n if dice.num <= 10:\n return dice.get_simple_string()\n elif dice.get_number_of_faces() <= 10:\n return dice.get_compressed_string()\n else:\n return \"(...)\"\n\n eval_str = arg_str % (tuple(map(_get_eval_str, dice)))\n pretty_str = arg_str % (tuple(map(_get_pretty_str, dice)))\n\n try:\n result = eval_equation(eval_str)\n except TypeError:\n bot.reply(\"The type of this equation is, apparently, not a string. \" +\n \"How did you do that, anyway?\")\n except ValueError:\n # As it seems that ValueError is raised if the resulting equation would\n # be too big, give a semi-serious answer to reflect on this.\n bot.reply(\"You roll %s: %s = very big\" % (\n trigger.group(2), pretty_str))\n return\n except (SyntaxError, eval_equation.Error):\n bot.reply(\"I don't know how to process that. \" +\n \"Are the dice as well as the algorithms correct?\")\n return\n\n bot.reply(\"You roll %s: %s = %d\" % (\n trigger.group(2), pretty_str, result))\n\n\[email protected](\"choice\")\[email protected](\"ch\")\[email protected](\"choose\")\[email protected](\"medium\")\[email protected](\".choose a, b, c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a | b | c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a,b,c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a|b|c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a b c\", r'Your options: a, b, c. My choice: (a|b|c)', re=True)\[email protected](\".choose a, b | just a\",\n r'Your options: \"a, b\", just a. My choice: ((a, b)|(just a))',\n re=True)\[email protected](\".choose a\", 'Your options: a. My choice: a')\ndef choose(bot, trigger):\n \"\"\"\n .choice option1|option2|option3 - Makes a difficult choice easy.\n \"\"\"\n if not trigger.group(2):\n return bot.reply('I\\'d choose an option, but you didn\\'t give me any.')\n choices = [trigger.group(2)]\n for delim in '|\\\\/, ':\n choices = trigger.group(2).split(delim)\n if len(choices) > 1:\n break\n choices = [choice.strip() for choice in choices]\n pick = random.choice(choices)\n\n # Always use a comma in the output\n display_options = ', '.join(\n choice if ',' not in choice else '\"%s\"' % choice\n for choice in choices\n )\n return bot.reply('Your options: %s. My choice: %s' % (display_options, pick))\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/dice.py"}]}
3,874
647
gh_patches_debug_11401
rasdani/github-patches
git_diff
docker__docker-py-1908
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- registry auth for docker service does not work on Windows Credentials to authenticated docker registry are not passed to docker service on Windows after login: ``` client.login(username, password, registry=registry, reauth=True) client.services.create([any_args]) ``` On Centos7 it works fine. Tested with AWS ECR. Debug logs (red lines - Windows, green lines - Centos7): ```diff [DEBUG]: Looking for auth entry for 'XXX.amazonaws.com' [DEBUG]: No entry found [DEBUG]: http://localhost:None "POST /v1.35/auth HTTP/1.1" 200 48 [DEBUG]: {u'IdentityToken': u'', u'Status': u'Login Succeeded'} [DEBUG]: Looking for auth config - [DEBUG]: Using credentials store "wincred" [DEBUG]: Looking for auth entry for 'XXX.amazonaws.com' - [DEBUG]: No entry found - [DEBUG]: No auth config found + [DEBUG]: Found 'XXX.amazonaws.com' + [DEBUG]: Found auth config [DEBUG]: http://localhost:None "POST /v1.35/services/create HTTP/1.1" 201 35 [DEBUG]: http://localhost:None "GET /v1.35/services/ixpf3gaiy0cep3qzj9ly302n6 HTTP/1.1" 200 None ``` Windows environment: ``` docker==2.7.0 docker-pycreds==0.2.1 Python 2.7.14 Client: Version: 17.12.0-ce API version: 1.35 Go version: go1.9.2 Git commit: c97c6d6 Built: Wed Dec 27 20:05:22 2017 OS/Arch: windows/amd64 Server: Engine: Version: 17.12.0-ce API version: 1.35 (minimum version 1.12) Go version: go1.9.2 Git commit: c97c6d6 Built: Wed Dec 27 20:12:29 2017 OS/Arch: linux/amd64 Experimental: true ``` Centos7 environment: ``` docker==2.7.0 docker-pycreds==0.2.1 Python 2.7.5 Client: Version: 17.12.0-ce API version: 1.35 Go version: go1.9.2 Git commit: c97c6d6 Built: Wed Dec 27 20:10:14 2017 OS/Arch: linux/amd64 Server: Engine: Version: 17.12.0-ce API version: 1.35 (minimum version 1.12) Go version: go1.9.2 Git commit: c97c6d6 Built: Wed Dec 27 20:12:46 2017 OS/Arch: linux/amd64 Experimental: false ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/auth.py` Content: ``` 1 import base64 2 import json 3 import logging 4 5 import dockerpycreds 6 import six 7 8 from . import errors 9 from .utils import config 10 11 INDEX_NAME = 'docker.io' 12 INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME) 13 TOKEN_USERNAME = '<token>' 14 15 log = logging.getLogger(__name__) 16 17 18 def resolve_repository_name(repo_name): 19 if '://' in repo_name: 20 raise errors.InvalidRepository( 21 'Repository name cannot contain a scheme ({0})'.format(repo_name) 22 ) 23 24 index_name, remote_name = split_repo_name(repo_name) 25 if index_name[0] == '-' or index_name[-1] == '-': 26 raise errors.InvalidRepository( 27 'Invalid index name ({0}). Cannot begin or end with a' 28 ' hyphen.'.format(index_name) 29 ) 30 return resolve_index_name(index_name), remote_name 31 32 33 def resolve_index_name(index_name): 34 index_name = convert_to_hostname(index_name) 35 if index_name == 'index.' + INDEX_NAME: 36 index_name = INDEX_NAME 37 return index_name 38 39 40 def get_config_header(client, registry): 41 log.debug('Looking for auth config') 42 if not client._auth_configs: 43 log.debug( 44 "No auth config in memory - loading from filesystem" 45 ) 46 client._auth_configs = load_config() 47 authcfg = resolve_authconfig(client._auth_configs, registry) 48 # Do not fail here if no authentication exists for this 49 # specific registry as we can have a readonly pull. Just 50 # put the header if we can. 51 if authcfg: 52 log.debug('Found auth config') 53 # auth_config needs to be a dict in the format used by 54 # auth.py username , password, serveraddress, email 55 return encode_header(authcfg) 56 log.debug('No auth config found') 57 return None 58 59 60 def split_repo_name(repo_name): 61 parts = repo_name.split('/', 1) 62 if len(parts) == 1 or ( 63 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost' 64 ): 65 # This is a docker index repo (ex: username/foobar or ubuntu) 66 return INDEX_NAME, repo_name 67 return tuple(parts) 68 69 70 def get_credential_store(authconfig, registry): 71 if not registry or registry == INDEX_NAME: 72 registry = 'https://index.docker.io/v1/' 73 74 return authconfig.get('credHelpers', {}).get(registry) or authconfig.get( 75 'credsStore' 76 ) 77 78 79 def resolve_authconfig(authconfig, registry=None): 80 """ 81 Returns the authentication data from the given auth configuration for a 82 specific registry. As with the Docker client, legacy entries in the config 83 with full URLs are stripped down to hostnames before checking for a match. 84 Returns None if no match was found. 85 """ 86 87 if 'credHelpers' in authconfig or 'credsStore' in authconfig: 88 store_name = get_credential_store(authconfig, registry) 89 if store_name is not None: 90 log.debug( 91 'Using credentials store "{0}"'.format(store_name) 92 ) 93 return _resolve_authconfig_credstore( 94 authconfig, registry, store_name 95 ) 96 97 # Default to the public index server 98 registry = resolve_index_name(registry) if registry else INDEX_NAME 99 log.debug("Looking for auth entry for {0}".format(repr(registry))) 100 101 authdict = authconfig.get('auths', {}) 102 if registry in authdict: 103 log.debug("Found {0}".format(repr(registry))) 104 return authdict[registry] 105 106 for key, conf in six.iteritems(authdict): 107 if resolve_index_name(key) == registry: 108 log.debug("Found {0}".format(repr(key))) 109 return conf 110 111 log.debug("No entry found") 112 return None 113 114 115 def _resolve_authconfig_credstore(authconfig, registry, credstore_name): 116 if not registry or registry == INDEX_NAME: 117 # The ecosystem is a little schizophrenic with index.docker.io VS 118 # docker.io - in that case, it seems the full URL is necessary. 119 registry = INDEX_URL 120 log.debug("Looking for auth entry for {0}".format(repr(registry))) 121 store = dockerpycreds.Store(credstore_name) 122 try: 123 data = store.get(registry) 124 res = { 125 'ServerAddress': registry, 126 } 127 if data['Username'] == TOKEN_USERNAME: 128 res['IdentityToken'] = data['Secret'] 129 else: 130 res.update({ 131 'Username': data['Username'], 132 'Password': data['Secret'], 133 }) 134 return res 135 except dockerpycreds.CredentialsNotFound as e: 136 log.debug('No entry found') 137 return None 138 except dockerpycreds.StoreError as e: 139 raise errors.DockerException( 140 'Credentials store error: {0}'.format(repr(e)) 141 ) 142 143 144 def convert_to_hostname(url): 145 return url.replace('http://', '').replace('https://', '').split('/', 1)[0] 146 147 148 def decode_auth(auth): 149 if isinstance(auth, six.string_types): 150 auth = auth.encode('ascii') 151 s = base64.b64decode(auth) 152 login, pwd = s.split(b':', 1) 153 return login.decode('utf8'), pwd.decode('utf8') 154 155 156 def encode_header(auth): 157 auth_json = json.dumps(auth).encode('ascii') 158 return base64.urlsafe_b64encode(auth_json) 159 160 161 def parse_auth(entries, raise_on_error=False): 162 """ 163 Parses authentication entries 164 165 Args: 166 entries: Dict of authentication entries. 167 raise_on_error: If set to true, an invalid format will raise 168 InvalidConfigFile 169 170 Returns: 171 Authentication registry. 172 """ 173 174 conf = {} 175 for registry, entry in six.iteritems(entries): 176 if not isinstance(entry, dict): 177 log.debug( 178 'Config entry for key {0} is not auth config'.format(registry) 179 ) 180 # We sometimes fall back to parsing the whole config as if it was 181 # the auth config by itself, for legacy purposes. In that case, we 182 # fail silently and return an empty conf if any of the keys is not 183 # formatted properly. 184 if raise_on_error: 185 raise errors.InvalidConfigFile( 186 'Invalid configuration for registry {0}'.format(registry) 187 ) 188 return {} 189 if 'identitytoken' in entry: 190 log.debug('Found an IdentityToken entry for registry {0}'.format( 191 registry 192 )) 193 conf[registry] = { 194 'IdentityToken': entry['identitytoken'] 195 } 196 continue # Other values are irrelevant if we have a token, skip. 197 198 if 'auth' not in entry: 199 # Starting with engine v1.11 (API 1.23), an empty dictionary is 200 # a valid value in the auths config. 201 # https://github.com/docker/compose/issues/3265 202 log.debug( 203 'Auth data for {0} is absent. Client might be using a ' 204 'credentials store instead.'.format(registry) 205 ) 206 conf[registry] = {} 207 continue 208 209 username, password = decode_auth(entry['auth']) 210 log.debug( 211 'Found entry (registry={0}, username={1})' 212 .format(repr(registry), repr(username)) 213 ) 214 215 conf[registry] = { 216 'username': username, 217 'password': password, 218 'email': entry.get('email'), 219 'serveraddress': registry, 220 } 221 return conf 222 223 224 def load_config(config_path=None, config_dict=None): 225 """ 226 Loads authentication data from a Docker configuration file in the given 227 root directory or if config_path is passed use given path. 228 Lookup priority: 229 explicit config_path parameter > DOCKER_CONFIG environment variable > 230 ~/.docker/config.json > ~/.dockercfg 231 """ 232 233 if not config_dict: 234 config_file = config.find_config_file(config_path) 235 236 if not config_file: 237 return {} 238 try: 239 with open(config_file) as f: 240 config_dict = json.load(f) 241 except (IOError, KeyError, ValueError) as e: 242 # Likely missing new Docker config file or it's in an 243 # unknown format, continue to attempt to read old location 244 # and format. 245 log.debug(e) 246 return _load_legacy_config(config_file) 247 248 res = {} 249 if config_dict.get('auths'): 250 log.debug("Found 'auths' section") 251 res.update({ 252 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True) 253 }) 254 if config_dict.get('credsStore'): 255 log.debug("Found 'credsStore' section") 256 res.update({'credsStore': config_dict.pop('credsStore')}) 257 if config_dict.get('credHelpers'): 258 log.debug("Found 'credHelpers' section") 259 res.update({'credHelpers': config_dict.pop('credHelpers')}) 260 if res: 261 return res 262 263 log.debug( 264 "Couldn't find auth-related section ; attempting to interpret" 265 "as auth-only file" 266 ) 267 return parse_auth(config_dict) 268 269 270 def _load_legacy_config(config_file): 271 log.debug("Attempting to parse legacy auth file format") 272 try: 273 data = [] 274 with open(config_file) as f: 275 for line in f.readlines(): 276 data.append(line.strip().split(' = ')[1]) 277 if len(data) < 2: 278 # Not enough data 279 raise errors.InvalidConfigFile( 280 'Invalid or empty configuration file!' 281 ) 282 283 username, password = decode_auth(data[0]) 284 return { 285 INDEX_NAME: { 286 'username': username, 287 'password': password, 288 'email': data[1], 289 'serveraddress': INDEX_URL, 290 } 291 } 292 except Exception as e: 293 log.debug(e) 294 pass 295 296 log.debug("All parsing attempts failed - returning empty config") 297 return {} 298 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/auth.py b/docker/auth.py --- a/docker/auth.py +++ b/docker/auth.py @@ -90,9 +90,12 @@ log.debug( 'Using credentials store "{0}"'.format(store_name) ) - return _resolve_authconfig_credstore( + cfg = _resolve_authconfig_credstore( authconfig, registry, store_name ) + if cfg is not None: + return cfg + log.debug('No entry in credstore - fetching from auth dict') # Default to the public index server registry = resolve_index_name(registry) if registry else INDEX_NAME
{"golden_diff": "diff --git a/docker/auth.py b/docker/auth.py\n--- a/docker/auth.py\n+++ b/docker/auth.py\n@@ -90,9 +90,12 @@\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n- return _resolve_authconfig_credstore(\n+ cfg = _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n+ if cfg is not None:\n+ return cfg\n+ log.debug('No entry in credstore - fetching from auth dict')\n \n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n", "issue": "registry auth for docker service does not work on Windows\nCredentials to authenticated docker registry are not passed to docker service on Windows after login:\r\n```\r\nclient.login(username, password, registry=registry, reauth=True)\r\nclient.services.create([any_args])\r\n```\r\n\r\nOn Centos7 it works fine.\r\nTested with AWS ECR.\r\n\r\nDebug logs (red lines - Windows, green lines - Centos7):\r\n```diff\r\n[DEBUG]: Looking for auth entry for 'XXX.amazonaws.com'\r\n[DEBUG]: No entry found\r\n[DEBUG]: http://localhost:None \"POST /v1.35/auth HTTP/1.1\" 200 48\r\n[DEBUG]: {u'IdentityToken': u'', u'Status': u'Login Succeeded'}\r\n[DEBUG]: Looking for auth config\r\n- [DEBUG]: Using credentials store \"wincred\"\r\n[DEBUG]: Looking for auth entry for 'XXX.amazonaws.com'\r\n- [DEBUG]: No entry found\r\n- [DEBUG]: No auth config found\r\n+ [DEBUG]: Found 'XXX.amazonaws.com'\r\n+ [DEBUG]: Found auth config\r\n[DEBUG]: http://localhost:None \"POST /v1.35/services/create HTTP/1.1\" 201 35\r\n[DEBUG]: http://localhost:None \"GET /v1.35/services/ixpf3gaiy0cep3qzj9ly302n6 HTTP/1.1\" 200 None\r\n```\r\n\r\nWindows environment:\r\n```\r\ndocker==2.7.0\r\ndocker-pycreds==0.2.1\r\nPython 2.7.14\r\nClient:\r\n Version: 17.12.0-ce\r\n API version: 1.35\r\n Go version: go1.9.2\r\n Git commit: c97c6d6\r\n Built: Wed Dec 27 20:05:22 2017\r\n OS/Arch: windows/amd64\r\n\r\nServer:\r\n Engine:\r\n Version: 17.12.0-ce\r\n API version: 1.35 (minimum version 1.12)\r\n Go version: go1.9.2\r\n Git commit: c97c6d6\r\n Built: Wed Dec 27 20:12:29 2017\r\n OS/Arch: linux/amd64\r\n Experimental: true\r\n```\r\n\r\nCentos7 environment:\r\n```\r\ndocker==2.7.0\r\ndocker-pycreds==0.2.1\r\nPython 2.7.5\r\nClient:\r\n Version: 17.12.0-ce\r\n API version: 1.35\r\n Go version: go1.9.2\r\n Git commit: c97c6d6\r\n Built: Wed Dec 27 20:10:14 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Engine:\r\n Version: 17.12.0-ce\r\n API version: 1.35 (minimum version 1.12)\r\n Go version: go1.9.2\r\n Git commit: c97c6d6\r\n Built: Wed Dec 27 20:12:46 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "import base64\nimport json\nimport logging\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .utils import config\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n return _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n authdict = authconfig.get('auths', {})\n if registry in authdict:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authdict[registry]\n\n for key, conf in six.iteritems(authdict):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return conf\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef load_config(config_path=None, config_dict=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n\n if not config_dict:\n config_file = config.find_config_file(config_path)\n\n if not config_file:\n return {}\n try:\n with open(config_file) as f:\n config_dict = json.load(f)\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n return _load_legacy_config(config_file)\n\n res = {}\n if config_dict.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update({\n 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)\n })\n if config_dict.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': config_dict.pop('credsStore')})\n if config_dict.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': config_dict.pop('credHelpers')})\n if res:\n return res\n\n log.debug(\n \"Couldn't find auth-related section ; attempting to interpret\"\n \"as auth-only file\"\n )\n return parse_auth(config_dict)\n\n\ndef _load_legacy_config(config_file):\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}], "after_files": [{"content": "import base64\nimport json\nimport logging\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .utils import config\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n cfg = _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n if cfg is not None:\n return cfg\n log.debug('No entry in credstore - fetching from auth dict')\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n authdict = authconfig.get('auths', {})\n if registry in authdict:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authdict[registry]\n\n for key, conf in six.iteritems(authdict):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return conf\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef load_config(config_path=None, config_dict=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n\n if not config_dict:\n config_file = config.find_config_file(config_path)\n\n if not config_file:\n return {}\n try:\n with open(config_file) as f:\n config_dict = json.load(f)\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n return _load_legacy_config(config_file)\n\n res = {}\n if config_dict.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update({\n 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True)\n })\n if config_dict.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': config_dict.pop('credsStore')})\n if config_dict.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': config_dict.pop('credHelpers')})\n if res:\n return res\n\n log.debug(\n \"Couldn't find auth-related section ; attempting to interpret\"\n \"as auth-only file\"\n )\n return parse_auth(config_dict)\n\n\ndef _load_legacy_config(config_file):\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}]}
3,931
142
gh_patches_debug_35337
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-2659
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- skill keep reloading indefinitely if there are files with a future date due to a temporary problem in my system, i ended up with a skill with __init.py modification time set in the future. this caused that skill to constantly be reloaded my mycroft core, and unloaded just after the loading was complted. took some hours of debug to understand this was actually the problem. perhaps skills with files with modification date from the future should just be stopped from loading and have a debug log about it? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mycroft/skills/skill_loader.py` Content: ``` 1 # Copyright 2019 Mycroft AI Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 """Periodically run by skill manager to load skills into memory.""" 16 import gc 17 import importlib 18 import os 19 from os.path import dirname 20 import sys 21 from time import time 22 23 from mycroft.configuration import Configuration 24 from mycroft.messagebus import Message 25 from mycroft.skills.settings import save_settings 26 from mycroft.util.log import LOG 27 28 from .settings import SettingsMetaUploader 29 30 SKILL_MAIN_MODULE = '__init__.py' 31 32 33 def remove_submodule_refs(module_name): 34 """Ensure submodules are reloaded by removing the refs from sys.modules. 35 36 Python import system puts a reference for each module in the sys.modules 37 dictionary to bypass loading if a module is already in memory. To make 38 sure skills are completely reloaded these references are deleted. 39 40 Arguments: 41 module_name: name of skill module. 42 """ 43 submodules = [] 44 LOG.debug('Skill module'.format(module_name)) 45 # Collect found submodules 46 for m in sys.modules: 47 if m.startswith(module_name + '.'): 48 submodules.append(m) 49 # Remove all references them to in sys.modules 50 for m in submodules: 51 LOG.debug('Removing sys.modules ref for {}'.format(m)) 52 del(sys.modules[m]) 53 54 55 def load_skill_module(path, skill_id): 56 """Load a skill module 57 58 This function handles the differences between python 3.4 and 3.5+ as well 59 as makes sure the module is inserted into the sys.modules dict. 60 61 Arguments: 62 path: Path to the skill main file (__init__.py) 63 skill_id: skill_id used as skill identifier in the module list 64 """ 65 module_name = skill_id.replace('.', '_') 66 67 remove_submodule_refs(module_name) 68 69 spec = importlib.util.spec_from_file_location(module_name, path) 70 mod = importlib.util.module_from_spec(spec) 71 sys.modules[module_name] = mod 72 spec.loader.exec_module(mod) 73 return mod 74 75 76 def _get_last_modified_time(path): 77 """Get the last modified date of the most recently updated file in a path. 78 79 Exclude compiled python files, hidden directories and the settings.json 80 file. 81 82 Arguments: 83 path: skill directory to check 84 85 Returns: 86 int: time of last change 87 """ 88 all_files = [] 89 for root_dir, dirs, files in os.walk(path): 90 dirs[:] = [d for d in dirs if not d.startswith('.')] 91 for f in files: 92 ignore_file = ( 93 f.endswith('.pyc') or 94 f == 'settings.json' or 95 f.startswith('.') or 96 f.endswith('.qmlc') 97 ) 98 if not ignore_file: 99 all_files.append(os.path.join(root_dir, f)) 100 101 # check files of interest in the skill root directory 102 if all_files: 103 return max(os.path.getmtime(f) for f in all_files) 104 else: 105 return 0 106 107 108 class SkillLoader: 109 def __init__(self, bus, skill_directory): 110 self.bus = bus 111 self.skill_directory = skill_directory 112 self.skill_id = os.path.basename(skill_directory) 113 self.load_attempted = False 114 self.loaded = False 115 self.last_modified = 0 116 self.last_loaded = 0 117 self.instance = None 118 self.active = True 119 self.config = Configuration.get() 120 121 @property 122 def is_blacklisted(self): 123 """Boolean value representing whether or not a skill is blacklisted.""" 124 blacklist = self.config['skills'].get('blacklisted_skills', []) 125 if self.skill_id in blacklist: 126 return True 127 else: 128 return False 129 130 def reload_needed(self): 131 """Load an unloaded skill or reload unloaded/changed skill. 132 133 Returns: 134 bool: if the skill was loaded/reloaded 135 """ 136 try: 137 self.last_modified = _get_last_modified_time(self.skill_directory) 138 except FileNotFoundError as e: 139 LOG.error('Failed to get last_modification time ' 140 '({})'.format(repr(e))) 141 self.last_modified = self.last_loaded 142 143 modified = self.last_modified > self.last_loaded 144 145 # create local reference to avoid threading issues 146 instance = self.instance 147 148 reload_allowed = ( 149 self.active and 150 (instance is None or instance.reload_skill) 151 ) 152 return modified and reload_allowed 153 154 def reload(self): 155 LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id) 156 if self.instance: 157 self._unload() 158 return self._load() 159 160 def load(self): 161 LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id) 162 return self._load() 163 164 def _unload(self): 165 """Remove listeners and stop threads before loading""" 166 self._execute_instance_shutdown() 167 if self.config.get("debug", False): 168 self._garbage_collect() 169 self.loaded = False 170 self._emit_skill_shutdown_event() 171 172 def unload(self): 173 if self.instance: 174 self._execute_instance_shutdown() 175 self.loaded = False 176 177 def activate(self): 178 self.active = True 179 self.load() 180 181 def deactivate(self): 182 self.active = False 183 self.unload() 184 185 def _execute_instance_shutdown(self): 186 """Call the shutdown method of the skill being reloaded.""" 187 try: 188 self.instance.default_shutdown() 189 except Exception: 190 log_msg = 'An error occurred while shutting down {}' 191 LOG.exception(log_msg.format(self.instance.name)) 192 else: 193 LOG.info('Skill {} shut down successfully'.format(self.skill_id)) 194 195 def _garbage_collect(self): 196 """Invoke Python garbage collector to remove false references""" 197 gc.collect() 198 # Remove two local references that are known 199 refs = sys.getrefcount(self.instance) - 2 200 if refs > 0: 201 log_msg = ( 202 "After shutdown of {} there are still {} references " 203 "remaining. The skill won't be cleaned from memory." 204 ) 205 LOG.warning(log_msg.format(self.instance.name, refs)) 206 207 def _emit_skill_shutdown_event(self): 208 message = Message( 209 "mycroft.skills.shutdown", 210 data=dict(path=self.skill_directory, id=self.skill_id) 211 ) 212 self.bus.emit(message) 213 214 def _load(self): 215 self._prepare_for_load() 216 if self.is_blacklisted: 217 self._skip_load() 218 else: 219 skill_module = self._load_skill_source() 220 if skill_module and self._create_skill_instance(skill_module): 221 self._check_for_first_run() 222 self.loaded = True 223 224 self.last_loaded = time() 225 self._communicate_load_status() 226 if self.loaded: 227 self._prepare_settings_meta() 228 return self.loaded 229 230 def _prepare_settings_meta(self): 231 settings_meta = SettingsMetaUploader(self.skill_directory, 232 self.instance.name) 233 self.instance.settings_meta = settings_meta 234 235 def _prepare_for_load(self): 236 self.load_attempted = True 237 self.loaded = False 238 self.instance = None 239 240 def _skip_load(self): 241 log_msg = 'Skill {} is blacklisted - it will not be loaded' 242 LOG.info(log_msg.format(self.skill_id)) 243 244 def _load_skill_source(self): 245 """Use Python's import library to load a skill's source code.""" 246 main_file_path = os.path.join(self.skill_directory, SKILL_MAIN_MODULE) 247 if not os.path.exists(main_file_path): 248 error_msg = 'Failed to load {} due to a missing file.' 249 LOG.error(error_msg.format(self.skill_id)) 250 else: 251 try: 252 skill_module = load_skill_module(main_file_path, self.skill_id) 253 except Exception as e: 254 LOG.exception('Failed to load skill: ' 255 '{} ({})'.format(self.skill_id, repr(e))) 256 else: 257 module_is_skill = ( 258 hasattr(skill_module, 'create_skill') and 259 callable(skill_module.create_skill) 260 ) 261 if module_is_skill: 262 return skill_module 263 return None # Module wasn't loaded 264 265 def _create_skill_instance(self, skill_module): 266 """Use v2 skills framework to create the skill.""" 267 try: 268 self.instance = skill_module.create_skill() 269 except Exception as e: 270 log_msg = 'Skill __init__ failed with {}' 271 LOG.exception(log_msg.format(repr(e))) 272 self.instance = None 273 274 if self.instance: 275 self.instance.skill_id = self.skill_id 276 self.instance.bind(self.bus) 277 try: 278 self.instance.load_data_files() 279 # Set up intent handlers 280 # TODO: can this be a public method? 281 self.instance._register_decorated() 282 self.instance.register_resting_screen() 283 self.instance.initialize() 284 except Exception as e: 285 # If an exception occurs, make sure to clean up the skill 286 self.instance.default_shutdown() 287 self.instance = None 288 log_msg = 'Skill initialization failed with {}' 289 LOG.exception(log_msg.format(repr(e))) 290 291 return self.instance is not None 292 293 def _check_for_first_run(self): 294 """The very first time a skill is run, speak the intro.""" 295 first_run = self.instance.settings.get( 296 "__mycroft_skill_firstrun", 297 True 298 ) 299 if first_run: 300 LOG.info("First run of " + self.skill_id) 301 self.instance.settings["__mycroft_skill_firstrun"] = False 302 save_settings(self.instance.settings_write_path, 303 self.instance.settings) 304 intro = self.instance.get_intro_message() 305 if intro: 306 self.instance.speak(intro) 307 308 def _communicate_load_status(self): 309 if self.loaded: 310 message = Message( 311 'mycroft.skills.loaded', 312 data=dict( 313 path=self.skill_directory, 314 id=self.skill_id, 315 name=self.instance.name, 316 modified=self.last_modified 317 ) 318 ) 319 self.bus.emit(message) 320 LOG.info('Skill {} loaded successfully'.format(self.skill_id)) 321 else: 322 message = Message( 323 'mycroft.skills.loading_failure', 324 data=dict(path=self.skill_directory, id=self.skill_id) 325 ) 326 self.bus.emit(message) 327 LOG.error('Skill {} failed to load'.format(self.skill_id)) 328 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mycroft/skills/skill_loader.py b/mycroft/skills/skill_loader.py --- a/mycroft/skills/skill_loader.py +++ b/mycroft/skills/skill_loader.py @@ -73,6 +73,19 @@ return mod +def _bad_mod_times(mod_times): + """Return all entries with modification time in the future. + + Arguments: + mod_times (dict): dict mapping file paths to modification times. + + Returns: + List of files with bad modification times. + """ + current_time = time() + return [path for path in mod_times if mod_times[path] > current_time] + + def _get_last_modified_time(path): """Get the last modified date of the most recently updated file in a path. @@ -99,6 +112,11 @@ all_files.append(os.path.join(root_dir, f)) # check files of interest in the skill root directory + mod_times = {f: os.path.getmtime(f) for f in all_files} + # Ensure modification times are valid + bad_times = _bad_mod_times(mod_times) + if bad_times: + raise OSError('{} had bad modification times'.format(bad_times)) if all_files: return max(os.path.getmtime(f) for f in all_files) else: @@ -118,6 +136,8 @@ self.active = True self.config = Configuration.get() + self.modtime_error_log_written = False + @property def is_blacklisted(self): """Boolean value representing whether or not a skill is blacklisted.""" @@ -135,10 +155,14 @@ """ try: self.last_modified = _get_last_modified_time(self.skill_directory) - except FileNotFoundError as e: - LOG.error('Failed to get last_modification time ' - '({})'.format(repr(e))) + except OSError as err: self.last_modified = self.last_loaded + if not self.modtime_error_log_written: + self.modtime_error_log_written = True + LOG.error('Failed to get last_modification time ' + '({})'.format(repr(err))) + else: + self.modtime_error_log_written = False modified = self.last_modified > self.last_loaded
{"golden_diff": "diff --git a/mycroft/skills/skill_loader.py b/mycroft/skills/skill_loader.py\n--- a/mycroft/skills/skill_loader.py\n+++ b/mycroft/skills/skill_loader.py\n@@ -73,6 +73,19 @@\n return mod\n \n \n+def _bad_mod_times(mod_times):\n+ \"\"\"Return all entries with modification time in the future.\n+\n+ Arguments:\n+ mod_times (dict): dict mapping file paths to modification times.\n+\n+ Returns:\n+ List of files with bad modification times.\n+ \"\"\"\n+ current_time = time()\n+ return [path for path in mod_times if mod_times[path] > current_time]\n+\n+\n def _get_last_modified_time(path):\n \"\"\"Get the last modified date of the most recently updated file in a path.\n \n@@ -99,6 +112,11 @@\n all_files.append(os.path.join(root_dir, f))\n \n # check files of interest in the skill root directory\n+ mod_times = {f: os.path.getmtime(f) for f in all_files}\n+ # Ensure modification times are valid\n+ bad_times = _bad_mod_times(mod_times)\n+ if bad_times:\n+ raise OSError('{} had bad modification times'.format(bad_times))\n if all_files:\n return max(os.path.getmtime(f) for f in all_files)\n else:\n@@ -118,6 +136,8 @@\n self.active = True\n self.config = Configuration.get()\n \n+ self.modtime_error_log_written = False\n+\n @property\n def is_blacklisted(self):\n \"\"\"Boolean value representing whether or not a skill is blacklisted.\"\"\"\n@@ -135,10 +155,14 @@\n \"\"\"\n try:\n self.last_modified = _get_last_modified_time(self.skill_directory)\n- except FileNotFoundError as e:\n- LOG.error('Failed to get last_modification time '\n- '({})'.format(repr(e)))\n+ except OSError as err:\n self.last_modified = self.last_loaded\n+ if not self.modtime_error_log_written:\n+ self.modtime_error_log_written = True\n+ LOG.error('Failed to get last_modification time '\n+ '({})'.format(repr(err)))\n+ else:\n+ self.modtime_error_log_written = False\n \n modified = self.last_modified > self.last_loaded\n", "issue": "skill keep reloading indefinitely if there are files with a future date\ndue to a temporary problem in my system, i ended up with a skill with __init.py modification time set in the future. this caused that skill to constantly be reloaded my mycroft core, and unloaded just after the loading was complted. took some hours of debug to understand this was actually the problem.\r\n\r\nperhaps skills with files with modification date from the future should just be stopped from loading and have a debug log about it?\n", "before_files": [{"content": "# Copyright 2019 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Periodically run by skill manager to load skills into memory.\"\"\"\nimport gc\nimport importlib\nimport os\nfrom os.path import dirname\nimport sys\nfrom time import time\n\nfrom mycroft.configuration import Configuration\nfrom mycroft.messagebus import Message\nfrom mycroft.skills.settings import save_settings\nfrom mycroft.util.log import LOG\n\nfrom .settings import SettingsMetaUploader\n\nSKILL_MAIN_MODULE = '__init__.py'\n\n\ndef remove_submodule_refs(module_name):\n \"\"\"Ensure submodules are reloaded by removing the refs from sys.modules.\n\n Python import system puts a reference for each module in the sys.modules\n dictionary to bypass loading if a module is already in memory. To make\n sure skills are completely reloaded these references are deleted.\n\n Arguments:\n module_name: name of skill module.\n \"\"\"\n submodules = []\n LOG.debug('Skill module'.format(module_name))\n # Collect found submodules\n for m in sys.modules:\n if m.startswith(module_name + '.'):\n submodules.append(m)\n # Remove all references them to in sys.modules\n for m in submodules:\n LOG.debug('Removing sys.modules ref for {}'.format(m))\n del(sys.modules[m])\n\n\ndef load_skill_module(path, skill_id):\n \"\"\"Load a skill module\n\n This function handles the differences between python 3.4 and 3.5+ as well\n as makes sure the module is inserted into the sys.modules dict.\n\n Arguments:\n path: Path to the skill main file (__init__.py)\n skill_id: skill_id used as skill identifier in the module list\n \"\"\"\n module_name = skill_id.replace('.', '_')\n\n remove_submodule_refs(module_name)\n\n spec = importlib.util.spec_from_file_location(module_name, path)\n mod = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = mod\n spec.loader.exec_module(mod)\n return mod\n\n\ndef _get_last_modified_time(path):\n \"\"\"Get the last modified date of the most recently updated file in a path.\n\n Exclude compiled python files, hidden directories and the settings.json\n file.\n\n Arguments:\n path: skill directory to check\n\n Returns:\n int: time of last change\n \"\"\"\n all_files = []\n for root_dir, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n for f in files:\n ignore_file = (\n f.endswith('.pyc') or\n f == 'settings.json' or\n f.startswith('.') or\n f.endswith('.qmlc')\n )\n if not ignore_file:\n all_files.append(os.path.join(root_dir, f))\n\n # check files of interest in the skill root directory\n if all_files:\n return max(os.path.getmtime(f) for f in all_files)\n else:\n return 0\n\n\nclass SkillLoader:\n def __init__(self, bus, skill_directory):\n self.bus = bus\n self.skill_directory = skill_directory\n self.skill_id = os.path.basename(skill_directory)\n self.load_attempted = False\n self.loaded = False\n self.last_modified = 0\n self.last_loaded = 0\n self.instance = None\n self.active = True\n self.config = Configuration.get()\n\n @property\n def is_blacklisted(self):\n \"\"\"Boolean value representing whether or not a skill is blacklisted.\"\"\"\n blacklist = self.config['skills'].get('blacklisted_skills', [])\n if self.skill_id in blacklist:\n return True\n else:\n return False\n\n def reload_needed(self):\n \"\"\"Load an unloaded skill or reload unloaded/changed skill.\n\n Returns:\n bool: if the skill was loaded/reloaded\n \"\"\"\n try:\n self.last_modified = _get_last_modified_time(self.skill_directory)\n except FileNotFoundError as e:\n LOG.error('Failed to get last_modification time '\n '({})'.format(repr(e)))\n self.last_modified = self.last_loaded\n\n modified = self.last_modified > self.last_loaded\n\n # create local reference to avoid threading issues\n instance = self.instance\n\n reload_allowed = (\n self.active and\n (instance is None or instance.reload_skill)\n )\n return modified and reload_allowed\n\n def reload(self):\n LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id)\n if self.instance:\n self._unload()\n return self._load()\n\n def load(self):\n LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id)\n return self._load()\n\n def _unload(self):\n \"\"\"Remove listeners and stop threads before loading\"\"\"\n self._execute_instance_shutdown()\n if self.config.get(\"debug\", False):\n self._garbage_collect()\n self.loaded = False\n self._emit_skill_shutdown_event()\n\n def unload(self):\n if self.instance:\n self._execute_instance_shutdown()\n self.loaded = False\n\n def activate(self):\n self.active = True\n self.load()\n\n def deactivate(self):\n self.active = False\n self.unload()\n\n def _execute_instance_shutdown(self):\n \"\"\"Call the shutdown method of the skill being reloaded.\"\"\"\n try:\n self.instance.default_shutdown()\n except Exception:\n log_msg = 'An error occurred while shutting down {}'\n LOG.exception(log_msg.format(self.instance.name))\n else:\n LOG.info('Skill {} shut down successfully'.format(self.skill_id))\n\n def _garbage_collect(self):\n \"\"\"Invoke Python garbage collector to remove false references\"\"\"\n gc.collect()\n # Remove two local references that are known\n refs = sys.getrefcount(self.instance) - 2\n if refs > 0:\n log_msg = (\n \"After shutdown of {} there are still {} references \"\n \"remaining. The skill won't be cleaned from memory.\"\n )\n LOG.warning(log_msg.format(self.instance.name, refs))\n\n def _emit_skill_shutdown_event(self):\n message = Message(\n \"mycroft.skills.shutdown\",\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n\n def _load(self):\n self._prepare_for_load()\n if self.is_blacklisted:\n self._skip_load()\n else:\n skill_module = self._load_skill_source()\n if skill_module and self._create_skill_instance(skill_module):\n self._check_for_first_run()\n self.loaded = True\n\n self.last_loaded = time()\n self._communicate_load_status()\n if self.loaded:\n self._prepare_settings_meta()\n return self.loaded\n\n def _prepare_settings_meta(self):\n settings_meta = SettingsMetaUploader(self.skill_directory,\n self.instance.name)\n self.instance.settings_meta = settings_meta\n\n def _prepare_for_load(self):\n self.load_attempted = True\n self.loaded = False\n self.instance = None\n\n def _skip_load(self):\n log_msg = 'Skill {} is blacklisted - it will not be loaded'\n LOG.info(log_msg.format(self.skill_id))\n\n def _load_skill_source(self):\n \"\"\"Use Python's import library to load a skill's source code.\"\"\"\n main_file_path = os.path.join(self.skill_directory, SKILL_MAIN_MODULE)\n if not os.path.exists(main_file_path):\n error_msg = 'Failed to load {} due to a missing file.'\n LOG.error(error_msg.format(self.skill_id))\n else:\n try:\n skill_module = load_skill_module(main_file_path, self.skill_id)\n except Exception as e:\n LOG.exception('Failed to load skill: '\n '{} ({})'.format(self.skill_id, repr(e)))\n else:\n module_is_skill = (\n hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)\n )\n if module_is_skill:\n return skill_module\n return None # Module wasn't loaded\n\n def _create_skill_instance(self, skill_module):\n \"\"\"Use v2 skills framework to create the skill.\"\"\"\n try:\n self.instance = skill_module.create_skill()\n except Exception as e:\n log_msg = 'Skill __init__ failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n self.instance = None\n\n if self.instance:\n self.instance.skill_id = self.skill_id\n self.instance.bind(self.bus)\n try:\n self.instance.load_data_files()\n # Set up intent handlers\n # TODO: can this be a public method?\n self.instance._register_decorated()\n self.instance.register_resting_screen()\n self.instance.initialize()\n except Exception as e:\n # If an exception occurs, make sure to clean up the skill\n self.instance.default_shutdown()\n self.instance = None\n log_msg = 'Skill initialization failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n\n return self.instance is not None\n\n def _check_for_first_run(self):\n \"\"\"The very first time a skill is run, speak the intro.\"\"\"\n first_run = self.instance.settings.get(\n \"__mycroft_skill_firstrun\",\n True\n )\n if first_run:\n LOG.info(\"First run of \" + self.skill_id)\n self.instance.settings[\"__mycroft_skill_firstrun\"] = False\n save_settings(self.instance.settings_write_path,\n self.instance.settings)\n intro = self.instance.get_intro_message()\n if intro:\n self.instance.speak(intro)\n\n def _communicate_load_status(self):\n if self.loaded:\n message = Message(\n 'mycroft.skills.loaded',\n data=dict(\n path=self.skill_directory,\n id=self.skill_id,\n name=self.instance.name,\n modified=self.last_modified\n )\n )\n self.bus.emit(message)\n LOG.info('Skill {} loaded successfully'.format(self.skill_id))\n else:\n message = Message(\n 'mycroft.skills.loading_failure',\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n LOG.error('Skill {} failed to load'.format(self.skill_id))\n", "path": "mycroft/skills/skill_loader.py"}], "after_files": [{"content": "# Copyright 2019 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Periodically run by skill manager to load skills into memory.\"\"\"\nimport gc\nimport importlib\nimport os\nfrom os.path import dirname\nimport sys\nfrom time import time\n\nfrom mycroft.configuration import Configuration\nfrom mycroft.messagebus import Message\nfrom mycroft.skills.settings import save_settings\nfrom mycroft.util.log import LOG\n\nfrom .settings import SettingsMetaUploader\n\nSKILL_MAIN_MODULE = '__init__.py'\n\n\ndef remove_submodule_refs(module_name):\n \"\"\"Ensure submodules are reloaded by removing the refs from sys.modules.\n\n Python import system puts a reference for each module in the sys.modules\n dictionary to bypass loading if a module is already in memory. To make\n sure skills are completely reloaded these references are deleted.\n\n Arguments:\n module_name: name of skill module.\n \"\"\"\n submodules = []\n LOG.debug('Skill module'.format(module_name))\n # Collect found submodules\n for m in sys.modules:\n if m.startswith(module_name + '.'):\n submodules.append(m)\n # Remove all references them to in sys.modules\n for m in submodules:\n LOG.debug('Removing sys.modules ref for {}'.format(m))\n del(sys.modules[m])\n\n\ndef load_skill_module(path, skill_id):\n \"\"\"Load a skill module\n\n This function handles the differences between python 3.4 and 3.5+ as well\n as makes sure the module is inserted into the sys.modules dict.\n\n Arguments:\n path: Path to the skill main file (__init__.py)\n skill_id: skill_id used as skill identifier in the module list\n \"\"\"\n module_name = skill_id.replace('.', '_')\n\n remove_submodule_refs(module_name)\n\n spec = importlib.util.spec_from_file_location(module_name, path)\n mod = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = mod\n spec.loader.exec_module(mod)\n return mod\n\n\ndef _bad_mod_times(mod_times):\n \"\"\"Return all entries with modification time in the future.\n\n Arguments:\n mod_times (dict): dict mapping file paths to modification times.\n\n Returns:\n List of files with bad modification times.\n \"\"\"\n current_time = time()\n return [path for path in mod_times if mod_times[path] > current_time]\n\n\ndef _get_last_modified_time(path):\n \"\"\"Get the last modified date of the most recently updated file in a path.\n\n Exclude compiled python files, hidden directories and the settings.json\n file.\n\n Arguments:\n path: skill directory to check\n\n Returns:\n int: time of last change\n \"\"\"\n all_files = []\n for root_dir, dirs, files in os.walk(path):\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n for f in files:\n ignore_file = (\n f.endswith('.pyc') or\n f == 'settings.json' or\n f.startswith('.') or\n f.endswith('.qmlc')\n )\n if not ignore_file:\n all_files.append(os.path.join(root_dir, f))\n\n # check files of interest in the skill root directory\n mod_times = {f: os.path.getmtime(f) for f in all_files}\n # Ensure modification times are valid\n bad_times = _bad_mod_times(mod_times)\n if bad_times:\n raise OSError('{} had bad modification times'.format(bad_times))\n if all_files:\n return max(os.path.getmtime(f) for f in all_files)\n else:\n return 0\n\n\nclass SkillLoader:\n def __init__(self, bus, skill_directory):\n self.bus = bus\n self.skill_directory = skill_directory\n self.skill_id = os.path.basename(skill_directory)\n self.load_attempted = False\n self.loaded = False\n self.last_modified = 0\n self.last_loaded = 0\n self.instance = None\n self.active = True\n self.config = Configuration.get()\n\n self.modtime_error_log_written = False\n\n @property\n def is_blacklisted(self):\n \"\"\"Boolean value representing whether or not a skill is blacklisted.\"\"\"\n blacklist = self.config['skills'].get('blacklisted_skills', [])\n if self.skill_id in blacklist:\n return True\n else:\n return False\n\n def reload_needed(self):\n \"\"\"Load an unloaded skill or reload unloaded/changed skill.\n\n Returns:\n bool: if the skill was loaded/reloaded\n \"\"\"\n try:\n self.last_modified = _get_last_modified_time(self.skill_directory)\n except OSError as err:\n self.last_modified = self.last_loaded\n if not self.modtime_error_log_written:\n self.modtime_error_log_written = True\n LOG.error('Failed to get last_modification time '\n '({})'.format(repr(err)))\n else:\n self.modtime_error_log_written = False\n\n modified = self.last_modified > self.last_loaded\n\n # create local reference to avoid threading issues\n instance = self.instance\n\n reload_allowed = (\n self.active and\n (instance is None or instance.reload_skill)\n )\n return modified and reload_allowed\n\n def reload(self):\n LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id)\n if self.instance:\n self._unload()\n return self._load()\n\n def load(self):\n LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id)\n return self._load()\n\n def _unload(self):\n \"\"\"Remove listeners and stop threads before loading\"\"\"\n self._execute_instance_shutdown()\n if self.config.get(\"debug\", False):\n self._garbage_collect()\n self.loaded = False\n self._emit_skill_shutdown_event()\n\n def unload(self):\n if self.instance:\n self._execute_instance_shutdown()\n self.loaded = False\n\n def activate(self):\n self.active = True\n self.load()\n\n def deactivate(self):\n self.active = False\n self.unload()\n\n def _execute_instance_shutdown(self):\n \"\"\"Call the shutdown method of the skill being reloaded.\"\"\"\n try:\n self.instance.default_shutdown()\n except Exception:\n log_msg = 'An error occurred while shutting down {}'\n LOG.exception(log_msg.format(self.instance.name))\n else:\n LOG.info('Skill {} shut down successfully'.format(self.skill_id))\n\n def _garbage_collect(self):\n \"\"\"Invoke Python garbage collector to remove false references\"\"\"\n gc.collect()\n # Remove two local references that are known\n refs = sys.getrefcount(self.instance) - 2\n if refs > 0:\n log_msg = (\n \"After shutdown of {} there are still {} references \"\n \"remaining. The skill won't be cleaned from memory.\"\n )\n LOG.warning(log_msg.format(self.instance.name, refs))\n\n def _emit_skill_shutdown_event(self):\n message = Message(\n \"mycroft.skills.shutdown\",\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n\n def _load(self):\n self._prepare_for_load()\n if self.is_blacklisted:\n self._skip_load()\n else:\n skill_module = self._load_skill_source()\n if skill_module and self._create_skill_instance(skill_module):\n self._check_for_first_run()\n self.loaded = True\n\n self.last_loaded = time()\n self._communicate_load_status()\n if self.loaded:\n self._prepare_settings_meta()\n return self.loaded\n\n def _prepare_settings_meta(self):\n settings_meta = SettingsMetaUploader(self.skill_directory,\n self.instance.name)\n self.instance.settings_meta = settings_meta\n\n def _prepare_for_load(self):\n self.load_attempted = True\n self.loaded = False\n self.instance = None\n\n def _skip_load(self):\n log_msg = 'Skill {} is blacklisted - it will not be loaded'\n LOG.info(log_msg.format(self.skill_id))\n\n def _load_skill_source(self):\n \"\"\"Use Python's import library to load a skill's source code.\"\"\"\n main_file_path = os.path.join(self.skill_directory, SKILL_MAIN_MODULE)\n if not os.path.exists(main_file_path):\n error_msg = 'Failed to load {} due to a missing file.'\n LOG.error(error_msg.format(self.skill_id))\n else:\n try:\n skill_module = load_skill_module(main_file_path, self.skill_id)\n except Exception as e:\n LOG.exception('Failed to load skill: '\n '{} ({})'.format(self.skill_id, repr(e)))\n else:\n module_is_skill = (\n hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)\n )\n if module_is_skill:\n return skill_module\n return None # Module wasn't loaded\n\n def _create_skill_instance(self, skill_module):\n \"\"\"Use v2 skills framework to create the skill.\"\"\"\n try:\n self.instance = skill_module.create_skill()\n except Exception as e:\n log_msg = 'Skill __init__ failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n self.instance = None\n\n if self.instance:\n self.instance.skill_id = self.skill_id\n self.instance.bind(self.bus)\n try:\n self.instance.load_data_files()\n # Set up intent handlers\n # TODO: can this be a public method?\n self.instance._register_decorated()\n self.instance.register_resting_screen()\n self.instance.initialize()\n except Exception as e:\n # If an exception occurs, make sure to clean up the skill\n self.instance.default_shutdown()\n self.instance = None\n log_msg = 'Skill initialization failed with {}'\n LOG.exception(log_msg.format(repr(e)))\n\n return self.instance is not None\n\n def _check_for_first_run(self):\n \"\"\"The very first time a skill is run, speak the intro.\"\"\"\n first_run = self.instance.settings.get(\n \"__mycroft_skill_firstrun\",\n True\n )\n if first_run:\n LOG.info(\"First run of \" + self.skill_id)\n self.instance.settings[\"__mycroft_skill_firstrun\"] = False\n save_settings(self.instance.settings_write_path,\n self.instance.settings)\n intro = self.instance.get_intro_message()\n if intro:\n self.instance.speak(intro)\n\n def _communicate_load_status(self):\n if self.loaded:\n message = Message(\n 'mycroft.skills.loaded',\n data=dict(\n path=self.skill_directory,\n id=self.skill_id,\n name=self.instance.name,\n modified=self.last_modified\n )\n )\n self.bus.emit(message)\n LOG.info('Skill {} loaded successfully'.format(self.skill_id))\n else:\n message = Message(\n 'mycroft.skills.loading_failure',\n data=dict(path=self.skill_directory, id=self.skill_id)\n )\n self.bus.emit(message)\n LOG.error('Skill {} failed to load'.format(self.skill_id))\n", "path": "mycroft/skills/skill_loader.py"}]}
3,526
519
gh_patches_debug_26059
rasdani/github-patches
git_diff
DDMAL__CantusDB-192
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- make a test case to test the permissions we have implemented a lot of different restrictions to views. create a unit test to automate the testing process. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/views/sequence.py` Content: ``` 1 from django.views.generic import DetailView, ListView, UpdateView 2 from main_app.models import Sequence 3 from django.db.models import Q 4 from main_app.forms import SequenceEditForm 5 from django.contrib.auth.mixins import LoginRequiredMixin 6 from django.contrib import messages 7 from django.contrib.auth.mixins import UserPassesTestMixin 8 from django.core.exceptions import PermissionDenied 9 from django.http import Http404 10 11 12 13 class SequenceDetailView(DetailView): 14 """ 15 Displays a single Sequence object. Accessed with ``sequences/<int:pk>`` 16 """ 17 18 model = Sequence 19 context_object_name = "sequence" 20 template_name = "sequence_detail.html" 21 22 def get_context_data(self, **kwargs): 23 24 # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page 25 sequence = self.get_object() 26 source = sequence.source 27 if (source.public is False) and (not self.request.user.is_authenticated): 28 raise PermissionDenied() 29 30 context = super().get_context_data(**kwargs) 31 context["concordances"] = Sequence.objects.filter( 32 cantus_id=self.get_object().cantus_id 33 ).order_by("siglum") 34 return context 35 36 37 class SequenceListView(ListView): 38 """ 39 Displays a list of Sequence objects. Accessed with ``sequences/`` 40 """ 41 42 model = Sequence 43 paginate_by = 100 44 context_object_name = "sequences" 45 template_name = "sequence_list.html" 46 47 def get_queryset(self): 48 queryset = super().get_queryset() 49 q_obj_filter = Q(source__visible=True) 50 q_obj_filter &= Q(source__public=True) 51 52 if self.request.GET.get("incipit"): 53 incipit = self.request.GET.get("incipit") 54 q_obj_filter &= Q(incipit__icontains=incipit) 55 if self.request.GET.get("siglum"): 56 siglum = self.request.GET.get("siglum") 57 q_obj_filter &= Q(siglum__icontains=siglum) 58 if self.request.GET.get("cantus_id"): 59 cantus_id = self.request.GET.get("cantus_id") 60 q_obj_filter &= Q(cantus_id__icontains=cantus_id) 61 62 return queryset.filter(q_obj_filter).order_by("siglum", "sequence") 63 64 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView): 65 template_name = "sequence_edit.html" 66 model = Sequence 67 form_class = SequenceEditForm 68 pk_url_kwarg = "sequence_id" 69 70 def form_valid(self, form): 71 messages.success( 72 self.request, 73 "Sequence updated successfully!", 74 ) 75 return super().form_valid(form) 76 77 def test_func(self): 78 user = self.request.user 79 sequence_id = self.kwargs.get(self.pk_url_kwarg) 80 try: 81 sequence = Sequence.objects.get(id=sequence_id) 82 except: 83 raise Http404("This sequence does not exist") 84 # find the source of this sequence 85 source = sequence.source 86 # checks if the user is an editor or a proofreader, 87 # and if the user is given privilege to edit this source and thus, it's sequences 88 is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists() 89 can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id) 90 # checks if the user is a project manager (they should have the privilege to edit any sequence) 91 is_project_manager = user.groups.filter(name="project manager").exists() 92 # checks if the user is a contributor, 93 # and if the user is the creator of this source 94 # (they should only have the privilege to edit sequences in a source they have created) 95 is_contributor = user.groups.filter(name="contributor").exists() 96 97 if ((is_editor_proofreader and can_edit_sequences_in_source) 98 or (is_project_manager) 99 or (is_contributor and source.created_by == user)): 100 return True 101 else: 102 return False 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py --- a/django/cantusdb_project/main_app/views/sequence.py +++ b/django/cantusdb_project/main_app/views/sequence.py @@ -76,27 +76,10 @@ def test_func(self): user = self.request.user - sequence_id = self.kwargs.get(self.pk_url_kwarg) - try: - sequence = Sequence.objects.get(id=sequence_id) - except: - raise Http404("This sequence does not exist") - # find the source of this sequence - source = sequence.source - # checks if the user is an editor or a proofreader, - # and if the user is given privilege to edit this source and thus, it's sequences - is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists() - can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id) # checks if the user is a project manager (they should have the privilege to edit any sequence) is_project_manager = user.groups.filter(name="project manager").exists() - # checks if the user is a contributor, - # and if the user is the creator of this source - # (they should only have the privilege to edit sequences in a source they have created) - is_contributor = user.groups.filter(name="contributor").exists() - - if ((is_editor_proofreader and can_edit_sequences_in_source) - or (is_project_manager) - or (is_contributor and source.created_by == user)): + + if is_project_manager: return True else: return False
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -76,27 +76,10 @@\n \n def test_func(self):\n user = self.request.user\n- sequence_id = self.kwargs.get(self.pk_url_kwarg)\n- try:\n- sequence = Sequence.objects.get(id=sequence_id)\n- except:\n- raise Http404(\"This sequence does not exist\")\n- # find the source of this sequence\n- source = sequence.source\n- # checks if the user is an editor or a proofreader,\n- # and if the user is given privilege to edit this source and thus, it's sequences\n- is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n- # checks if the user is a contributor,\n- # and if the user is the creator of this source \n- # (they should only have the privilege to edit sequences in a source they have created)\n- is_contributor = user.groups.filter(name=\"contributor\").exists()\n-\n- if ((is_editor_proofreader and can_edit_sequences_in_source) \n- or (is_project_manager) \n- or (is_contributor and source.created_by == user)):\n+\n+ if is_project_manager:\n return True\n else:\n return False\n", "issue": "make a test case to test the permissions\nwe have implemented a lot of different restrictions to views. create a unit test to automate the testing process.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n sequence_id = self.kwargs.get(self.pk_url_kwarg)\n try:\n sequence = Sequence.objects.get(id=sequence_id)\n except:\n raise Http404(\"This sequence does not exist\")\n # find the source of this sequence\n source = sequence.source\n # checks if the user is an editor or a proofreader,\n # and if the user is given privilege to edit this source and thus, it's sequences\n is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is a contributor,\n # and if the user is the creator of this source \n # (they should only have the privilege to edit sequences in a source they have created)\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if ((is_editor_proofreader and can_edit_sequences_in_source) \n or (is_project_manager) \n or (is_contributor and source.created_by == user)):\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n\n if is_project_manager:\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}]}
1,360
392
gh_patches_debug_17751
rasdani/github-patches
git_diff
lutris__lutris-3930
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Runners can be configured when not yet installed, but not after removal ### Bug description When you go into the Preferences dialog, Runners section, all the runners may be configured even if not installed. But if you remove one, it's configure button disappears. ### How to Reproduce Go into Preferences, select "Runners". Now, pick a runner and remove it. The configure button for that runner will disappear. I think that is correct- but notice that all the other uninstalled runners have configure buttons. See the media attached. That can't be right. ### Expected behavior I expect that the configure button will appear only for installed runners in the first place. ### Log output ```shell INFO 2021-12-24 07:12:16,550 [startup.check_driver:53]:Using NVIDIA drivers 470.86 for x86_64 INFO 2021-12-24 07:12:16,550 [startup.check_driver:57]:GPU: NVIDIA GeForce RTX 3060 INFO 2021-12-24 07:12:16,550 [startup.check_driver:73]:GPU: 10DE:2503 196E:1377 (nvidia drivers) DEBUG 2021-12-24 07:12:16,725 [lutriswindow.update_store:451]:Showing 25 games DEBUG 2021-12-24 07:12:20,443 [application.show_window:259]:Showing window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{} DEBUG 2021-12-24 07:12:26,667 [application.on_app_window_destroyed:276]:Removed window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{} INFO 2021-12-24 07:12:28,505 [application.do_shutdown:631]:Shutting down Lutris Hmm, using the master branch to repro produces less logging! ``` ### System Information ```shell [System] OS: Pop!_OS 21.10 impish Arch: x86_64 Kernel: 5.15.8-76051508-generic Desktop: GNOME Display Server: x11 [CPU] Vendor: AuthenticAMD Model: AMD Ryzen 7 5800X 8-Core Processor Physical cores: 8 Logical cores: 16 [Memory] RAM: 31.4 GB Swap: 0.0 GB [Graphics] Vendor: NVIDIA Corporation OpenGL Renderer: NVIDIA GeForce RTX 3060/PCIe/SSE2 OpenGL Version: 4.6.0 NVIDIA 470.86 OpenGL Core: 4.6.0 NVIDIA 470.86 OpenGL ES: OpenGL ES 3.2 NVIDIA 470.86 Vulkan: Supported ``` ### Media (optional) ![Screenshot from 2021-12-24 07-06-15](https://user-images.githubusercontent.com/6507403/147351426-da379253-2415-47bf-8ca3-711c33b63236.png) ### Checklist: - [X] I'm not asking for support with a game or the wine runner. - [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed. - [X] I have checked for existing issues that describe my problem prior to opening this one. - [X] I understand that improperly formatted bug reports may be closed without explanation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/gui/config/runner_box.py` Content: ``` 1 from gettext import gettext as _ 2 3 from gi.repository import GObject, Gtk 4 5 from lutris import runners 6 from lutris.gui.config.runner import RunnerConfigDialog 7 from lutris.gui.dialogs import ErrorDialog, QuestionDialog 8 from lutris.gui.dialogs.download import simple_downloader 9 from lutris.gui.dialogs.runner_install import RunnerInstallDialog 10 from lutris.gui.widgets.utils import ICON_SIZE, get_icon 11 from lutris.util.log import logger 12 13 14 class RunnerBox(Gtk.Box): 15 __gsignals__ = { 16 "runner-installed": (GObject.SIGNAL_RUN_FIRST, None, ()), 17 "runner-removed": (GObject.SIGNAL_RUN_FIRST, None, ()), 18 } 19 20 def __init__(self, runner_name): 21 super().__init__(visible=True) 22 23 self.connect("runner-installed", self.on_runner_installed) 24 self.connect("runner-removed", self.on_runner_removed) 25 26 self.set_margin_bottom(12) 27 self.set_margin_top(12) 28 self.set_margin_left(12) 29 self.set_margin_right(12) 30 self.runner = runners.import_runner(runner_name)() 31 icon = get_icon(self.runner.name, icon_format='pixbuf', size=ICON_SIZE) 32 if icon: 33 runner_icon = Gtk.Image(visible=True) 34 runner_icon.set_from_pixbuf(icon) 35 else: 36 runner_icon = Gtk.Image.new_from_icon_name("package-x-generic-symbolic", Gtk.IconSize.DND) 37 runner_icon.show() 38 runner_icon.set_margin_right(12) 39 self.pack_start(runner_icon, False, True, 6) 40 41 self.runner_label_box = Gtk.VBox(visible=True) 42 self.runner_label_box.set_margin_top(12) 43 44 runner_label = Gtk.Label(visible=True) 45 runner_label.set_alignment(0, 0.5) 46 runner_label.set_markup("<b>%s</b>" % self.runner.human_name) 47 self.runner_label_box.pack_start(runner_label, False, False, 0) 48 49 desc_label = Gtk.Label(visible=True) 50 desc_label.set_alignment(0, 0.5) 51 desc_label.set_text(self.runner.description) 52 self.runner_label_box.pack_start(desc_label, False, False, 0) 53 54 self.pack_start(self.runner_label_box, True, True, 0) 55 56 self.configure_button = Gtk.Button.new_from_icon_name("preferences-system-symbolic", Gtk.IconSize.BUTTON) 57 self.configure_button.set_margin_right(12) 58 self.configure_button.connect("clicked", self.on_configure_clicked) 59 self.configure_button.show() 60 self.pack_start(self.configure_button, False, False, 0) 61 if not self.runner.is_installed(): 62 self.runner_label_box.set_sensitive(False) 63 self.action_alignment = Gtk.Alignment.new(0.5, 0.5, 0, 0) 64 self.action_alignment.show() 65 self.action_alignment.add(self.get_action_button()) 66 self.pack_start(self.action_alignment, False, False, 0) 67 68 def get_action_button(self): 69 """Return a install or remove button""" 70 if self.runner.multiple_versions: 71 _button = Gtk.Button.new_from_icon_name("preferences-other-symbolic", Gtk.IconSize.BUTTON) 72 _button.get_style_context().add_class("circular") 73 _button.connect("clicked", self.on_versions_clicked) 74 else: 75 if self.runner.is_installed(): 76 _button = Gtk.Button.new_from_icon_name("edit-delete-symbolic", Gtk.IconSize.BUTTON) 77 _button.get_style_context().add_class("circular") 78 _button.connect("clicked", self.on_remove_clicked) 79 else: 80 _button = Gtk.Button.new_from_icon_name("system-software-install-symbolic", Gtk.IconSize.BUTTON) 81 _button.get_style_context().add_class("circular") 82 _button.connect("clicked", self.on_install_clicked) 83 _button.show() 84 return _button 85 86 def on_versions_clicked(self, widget): 87 RunnerInstallDialog( 88 _("Manage %s versions") % self.runner.name, 89 None, 90 self.runner.name 91 ) 92 # connect a runner-installed signal from the above dialog? 93 94 def on_install_clicked(self, widget): 95 """Install a runner.""" 96 logger.debug("Install of %s requested", self.runner) 97 try: 98 self.runner.install(downloader=simple_downloader) 99 except ( 100 runners.RunnerInstallationError, 101 runners.NonInstallableRunnerError, 102 ) as ex: 103 logger.error(ex) 104 ErrorDialog(ex.message) 105 return 106 if self.runner.is_installed(): 107 self.emit("runner-installed") 108 else: 109 logger.error("Runner failed to install") 110 111 def on_configure_clicked(self, widget): 112 RunnerConfigDialog(self.runner) 113 114 def on_remove_clicked(self, widget): 115 dialog = QuestionDialog( 116 { 117 "title": _("Do you want to uninstall %s?") % self.runner.human_name, 118 "question": _("This will remove <b>%s</b> and all associated data." % self.runner.human_name) 119 120 } 121 ) 122 if Gtk.ResponseType.YES == dialog.result: 123 self.runner.uninstall() 124 self.emit("runner-removed") 125 126 def on_runner_installed(self, widget): 127 """Called after the runnner is installed""" 128 self.runner_label_box.set_sensitive(True) 129 self.configure_button.show() 130 self.action_alignment.get_children()[0].destroy() 131 self.action_alignment.add(self.get_action_button()) 132 133 def on_runner_removed(self, widget): 134 """Called after the runner is removed""" 135 self.runner_label_box.set_sensitive(False) 136 self.configure_button.hide() 137 self.action_alignment.get_children()[0].destroy() 138 self.action_alignment.add(self.get_action_button()) 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/gui/config/runner_box.py b/lutris/gui/config/runner_box.py --- a/lutris/gui/config/runner_box.py +++ b/lutris/gui/config/runner_box.py @@ -126,13 +126,11 @@ def on_runner_installed(self, widget): """Called after the runnner is installed""" self.runner_label_box.set_sensitive(True) - self.configure_button.show() self.action_alignment.get_children()[0].destroy() self.action_alignment.add(self.get_action_button()) def on_runner_removed(self, widget): """Called after the runner is removed""" self.runner_label_box.set_sensitive(False) - self.configure_button.hide() self.action_alignment.get_children()[0].destroy() self.action_alignment.add(self.get_action_button())
{"golden_diff": "diff --git a/lutris/gui/config/runner_box.py b/lutris/gui/config/runner_box.py\n--- a/lutris/gui/config/runner_box.py\n+++ b/lutris/gui/config/runner_box.py\n@@ -126,13 +126,11 @@\n def on_runner_installed(self, widget):\n \"\"\"Called after the runnner is installed\"\"\"\n self.runner_label_box.set_sensitive(True)\n- self.configure_button.show()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n \n def on_runner_removed(self, widget):\n \"\"\"Called after the runner is removed\"\"\"\n self.runner_label_box.set_sensitive(False)\n- self.configure_button.hide()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n", "issue": "Runners can be configured when not yet installed, but not after removal\n### Bug description\n\nWhen you go into the Preferences dialog, Runners section, all the runners may be configured even if not installed. But if you remove one, it's configure button disappears.\n\n### How to Reproduce\n\nGo into Preferences, select \"Runners\". Now, pick a runner and remove it. The configure button for that runner will disappear.\r\n\r\nI think that is correct- but notice that all the other uninstalled runners have configure buttons. See the media attached. That can't be right.\n\n### Expected behavior\n\nI expect that the configure button will appear only for installed runners in the first place.\n\n### Log output\n\n```shell\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:53]:Using NVIDIA drivers 470.86 for x86_64\r\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:57]:GPU: NVIDIA GeForce RTX 3060\r\nINFO 2021-12-24 07:12:16,550 [startup.check_driver:73]:GPU: 10DE:2503 196E:1377 (nvidia drivers)\r\nDEBUG 2021-12-24 07:12:16,725 [lutriswindow.update_store:451]:Showing 25 games\r\nDEBUG 2021-12-24 07:12:20,443 [application.show_window:259]:Showing window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}\r\nDEBUG 2021-12-24 07:12:26,667 [application.on_app_window_destroyed:276]:Removed window <class 'lutris.gui.config.preferences_dialog.PreferencesDialog'>{}\r\nINFO 2021-12-24 07:12:28,505 [application.do_shutdown:631]:Shutting down Lutris\r\n\r\nHmm, using the master branch to repro produces less logging!\n```\n\n\n### System Information\n\n```shell\n[System]\r\nOS: Pop!_OS 21.10 impish\r\nArch: x86_64\r\nKernel: 5.15.8-76051508-generic\r\nDesktop: GNOME\r\nDisplay Server: x11\r\n\r\n[CPU]\r\nVendor: AuthenticAMD\r\nModel: AMD Ryzen 7 5800X 8-Core Processor\r\nPhysical cores: 8\r\nLogical cores: 16\r\n\r\n[Memory]\r\nRAM: 31.4 GB\r\nSwap: 0.0 GB\r\n\r\n[Graphics]\r\nVendor: NVIDIA Corporation\r\nOpenGL Renderer: NVIDIA GeForce RTX 3060/PCIe/SSE2\r\nOpenGL Version: 4.6.0 NVIDIA 470.86\r\nOpenGL Core: 4.6.0 NVIDIA 470.86\r\nOpenGL ES: OpenGL ES 3.2 NVIDIA 470.86\r\nVulkan: Supported\n```\n\n\n### Media (optional)\n\n![Screenshot from 2021-12-24 07-06-15](https://user-images.githubusercontent.com/6507403/147351426-da379253-2415-47bf-8ca3-711c33b63236.png)\r\n\n\n### Checklist:\n\n- [X] I'm not asking for support with a game or the wine runner.\n- [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed.\n- [X] I have checked for existing issues that describe my problem prior to opening this one.\n- [X] I understand that improperly formatted bug reports may be closed without explanation.\n", "before_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk\n\nfrom lutris import runners\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.dialogs.download import simple_downloader\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.widgets.utils import ICON_SIZE, get_icon\nfrom lutris.util.log import logger\n\n\nclass RunnerBox(Gtk.Box):\n __gsignals__ = {\n \"runner-installed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n \"runner-removed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n }\n\n def __init__(self, runner_name):\n super().__init__(visible=True)\n\n self.connect(\"runner-installed\", self.on_runner_installed)\n self.connect(\"runner-removed\", self.on_runner_removed)\n\n self.set_margin_bottom(12)\n self.set_margin_top(12)\n self.set_margin_left(12)\n self.set_margin_right(12)\n self.runner = runners.import_runner(runner_name)()\n icon = get_icon(self.runner.name, icon_format='pixbuf', size=ICON_SIZE)\n if icon:\n runner_icon = Gtk.Image(visible=True)\n runner_icon.set_from_pixbuf(icon)\n else:\n runner_icon = Gtk.Image.new_from_icon_name(\"package-x-generic-symbolic\", Gtk.IconSize.DND)\n runner_icon.show()\n runner_icon.set_margin_right(12)\n self.pack_start(runner_icon, False, True, 6)\n\n self.runner_label_box = Gtk.VBox(visible=True)\n self.runner_label_box.set_margin_top(12)\n\n runner_label = Gtk.Label(visible=True)\n runner_label.set_alignment(0, 0.5)\n runner_label.set_markup(\"<b>%s</b>\" % self.runner.human_name)\n self.runner_label_box.pack_start(runner_label, False, False, 0)\n\n desc_label = Gtk.Label(visible=True)\n desc_label.set_alignment(0, 0.5)\n desc_label.set_text(self.runner.description)\n self.runner_label_box.pack_start(desc_label, False, False, 0)\n\n self.pack_start(self.runner_label_box, True, True, 0)\n\n self.configure_button = Gtk.Button.new_from_icon_name(\"preferences-system-symbolic\", Gtk.IconSize.BUTTON)\n self.configure_button.set_margin_right(12)\n self.configure_button.connect(\"clicked\", self.on_configure_clicked)\n self.configure_button.show()\n self.pack_start(self.configure_button, False, False, 0)\n if not self.runner.is_installed():\n self.runner_label_box.set_sensitive(False)\n self.action_alignment = Gtk.Alignment.new(0.5, 0.5, 0, 0)\n self.action_alignment.show()\n self.action_alignment.add(self.get_action_button())\n self.pack_start(self.action_alignment, False, False, 0)\n\n def get_action_button(self):\n \"\"\"Return a install or remove button\"\"\"\n if self.runner.multiple_versions:\n _button = Gtk.Button.new_from_icon_name(\"preferences-other-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_versions_clicked)\n else:\n if self.runner.is_installed():\n _button = Gtk.Button.new_from_icon_name(\"edit-delete-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_remove_clicked)\n else:\n _button = Gtk.Button.new_from_icon_name(\"system-software-install-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_install_clicked)\n _button.show()\n return _button\n\n def on_versions_clicked(self, widget):\n RunnerInstallDialog(\n _(\"Manage %s versions\") % self.runner.name,\n None,\n self.runner.name\n )\n # connect a runner-installed signal from the above dialog?\n\n def on_install_clicked(self, widget):\n \"\"\"Install a runner.\"\"\"\n logger.debug(\"Install of %s requested\", self.runner)\n try:\n self.runner.install(downloader=simple_downloader)\n except (\n runners.RunnerInstallationError,\n runners.NonInstallableRunnerError,\n ) as ex:\n logger.error(ex)\n ErrorDialog(ex.message)\n return\n if self.runner.is_installed():\n self.emit(\"runner-installed\")\n else:\n logger.error(\"Runner failed to install\")\n\n def on_configure_clicked(self, widget):\n RunnerConfigDialog(self.runner)\n\n def on_remove_clicked(self, widget):\n dialog = QuestionDialog(\n {\n \"title\": _(\"Do you want to uninstall %s?\") % self.runner.human_name,\n \"question\": _(\"This will remove <b>%s</b> and all associated data.\" % self.runner.human_name)\n\n }\n )\n if Gtk.ResponseType.YES == dialog.result:\n self.runner.uninstall()\n self.emit(\"runner-removed\")\n\n def on_runner_installed(self, widget):\n \"\"\"Called after the runnner is installed\"\"\"\n self.runner_label_box.set_sensitive(True)\n self.configure_button.show()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n\n def on_runner_removed(self, widget):\n \"\"\"Called after the runner is removed\"\"\"\n self.runner_label_box.set_sensitive(False)\n self.configure_button.hide()\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n", "path": "lutris/gui/config/runner_box.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk\n\nfrom lutris import runners\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.dialogs.download import simple_downloader\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.widgets.utils import ICON_SIZE, get_icon\nfrom lutris.util.log import logger\n\n\nclass RunnerBox(Gtk.Box):\n __gsignals__ = {\n \"runner-installed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n \"runner-removed\": (GObject.SIGNAL_RUN_FIRST, None, ()),\n }\n\n def __init__(self, runner_name):\n super().__init__(visible=True)\n\n self.connect(\"runner-installed\", self.on_runner_installed)\n self.connect(\"runner-removed\", self.on_runner_removed)\n\n self.set_margin_bottom(12)\n self.set_margin_top(12)\n self.set_margin_left(12)\n self.set_margin_right(12)\n self.runner = runners.import_runner(runner_name)()\n icon = get_icon(self.runner.name, icon_format='pixbuf', size=ICON_SIZE)\n if icon:\n runner_icon = Gtk.Image(visible=True)\n runner_icon.set_from_pixbuf(icon)\n else:\n runner_icon = Gtk.Image.new_from_icon_name(\"package-x-generic-symbolic\", Gtk.IconSize.DND)\n runner_icon.show()\n runner_icon.set_margin_right(12)\n self.pack_start(runner_icon, False, True, 6)\n\n self.runner_label_box = Gtk.VBox(visible=True)\n self.runner_label_box.set_margin_top(12)\n\n runner_label = Gtk.Label(visible=True)\n runner_label.set_alignment(0, 0.5)\n runner_label.set_markup(\"<b>%s</b>\" % self.runner.human_name)\n self.runner_label_box.pack_start(runner_label, False, False, 0)\n\n desc_label = Gtk.Label(visible=True)\n desc_label.set_alignment(0, 0.5)\n desc_label.set_text(self.runner.description)\n self.runner_label_box.pack_start(desc_label, False, False, 0)\n\n self.pack_start(self.runner_label_box, True, True, 0)\n\n self.configure_button = Gtk.Button.new_from_icon_name(\"preferences-system-symbolic\", Gtk.IconSize.BUTTON)\n self.configure_button.set_margin_right(12)\n self.configure_button.connect(\"clicked\", self.on_configure_clicked)\n self.configure_button.show()\n self.pack_start(self.configure_button, False, False, 0)\n if not self.runner.is_installed():\n self.runner_label_box.set_sensitive(False)\n self.action_alignment = Gtk.Alignment.new(0.5, 0.5, 0, 0)\n self.action_alignment.show()\n self.action_alignment.add(self.get_action_button())\n self.pack_start(self.action_alignment, False, False, 0)\n\n def get_action_button(self):\n \"\"\"Return a install or remove button\"\"\"\n if self.runner.multiple_versions:\n _button = Gtk.Button.new_from_icon_name(\"preferences-other-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_versions_clicked)\n else:\n if self.runner.is_installed():\n _button = Gtk.Button.new_from_icon_name(\"edit-delete-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_remove_clicked)\n else:\n _button = Gtk.Button.new_from_icon_name(\"system-software-install-symbolic\", Gtk.IconSize.BUTTON)\n _button.get_style_context().add_class(\"circular\")\n _button.connect(\"clicked\", self.on_install_clicked)\n _button.show()\n return _button\n\n def on_versions_clicked(self, widget):\n RunnerInstallDialog(\n _(\"Manage %s versions\") % self.runner.name,\n None,\n self.runner.name\n )\n # connect a runner-installed signal from the above dialog?\n\n def on_install_clicked(self, widget):\n \"\"\"Install a runner.\"\"\"\n logger.debug(\"Install of %s requested\", self.runner)\n try:\n self.runner.install(downloader=simple_downloader)\n except (\n runners.RunnerInstallationError,\n runners.NonInstallableRunnerError,\n ) as ex:\n logger.error(ex)\n ErrorDialog(ex.message)\n return\n if self.runner.is_installed():\n self.emit(\"runner-installed\")\n else:\n logger.error(\"Runner failed to install\")\n\n def on_configure_clicked(self, widget):\n RunnerConfigDialog(self.runner)\n\n def on_remove_clicked(self, widget):\n dialog = QuestionDialog(\n {\n \"title\": _(\"Do you want to uninstall %s?\") % self.runner.human_name,\n \"question\": _(\"This will remove <b>%s</b> and all associated data.\" % self.runner.human_name)\n\n }\n )\n if Gtk.ResponseType.YES == dialog.result:\n self.runner.uninstall()\n self.emit(\"runner-removed\")\n\n def on_runner_installed(self, widget):\n \"\"\"Called after the runnner is installed\"\"\"\n self.runner_label_box.set_sensitive(True)\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n\n def on_runner_removed(self, widget):\n \"\"\"Called after the runner is removed\"\"\"\n self.runner_label_box.set_sensitive(False)\n self.action_alignment.get_children()[0].destroy()\n self.action_alignment.add(self.get_action_button())\n", "path": "lutris/gui/config/runner_box.py"}]}
2,673
177
gh_patches_debug_38134
rasdani/github-patches
git_diff
bridgecrewio__checkov-536
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id In the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration ``` encrypted - (Optional) Whether the volume should be encrypted or not. Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot. (Default: false). ``` The relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check: https://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 3 4 5 class SecurityGroupRuleDescription(BaseResourceCheck): 6 def __init__(self): 7 name = "Ensure every security groups rule has a description" 8 id = "CKV_AWS_23" 9 supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group', 10 'aws_elasticache_security_group', 'aws_redshift_security_group'] 11 categories = [CheckCategories.NETWORKING] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource) 13 14 def scan_resource_conf(self, conf): 15 """ 16 Looks for description at security group rules : 17 https://www.terraform.io/docs/providers/aws/r/security_group.html 18 :param conf: aws_security_group configuration 19 :return: <CheckResult> 20 """ 21 if 'description' in conf.keys(): 22 if conf['description']: 23 return CheckResult.PASSED 24 egress_result = self.check_rule(rule_type='egress', conf=conf) 25 ingress_result = self.check_rule(rule_type='ingress', conf=conf) 26 if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED: 27 return CheckResult.PASSED 28 return CheckResult.FAILED 29 30 def check_rule(self, rule_type, conf): 31 if rule_type in conf.keys(): 32 for rule in conf[rule_type]: 33 if isinstance(rule, dict): 34 if 'description' not in rule.keys() or not rule['description']: 35 return CheckResult.FAILED 36 return CheckResult.PASSED 37 38 39 check = SecurityGroupRuleDescription() 40 ``` Path: `checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class LaunchConfigurationEBSEncryption(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure all data stored in the Launch configuration EBS is securely encrypted" 8 id = "CKV_AWS_8" 9 supported_resources = ['aws_launch_configuration', 'aws_instance'] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return "*_block_device/[0]/encrypted" 15 16 def scan_resource_conf(self, conf): 17 """ 18 Looks for encryption configuration at launch configuration: 19 https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html 20 :param conf: aws_launch_configuration configuration 21 :return: <CheckResult> 22 """ 23 have_root_block = 0 24 for key in conf.keys(): 25 if "block_device" in key and "ephemeral" not in key: 26 if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]: 27 return CheckResult.FAILED 28 if "root_block_device" in key: 29 # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted. 30 have_root_block = 1 31 if have_root_block == 0: 32 return CheckResult.FAILED 33 34 return CheckResult.PASSED 35 36 37 check = LaunchConfigurationEBSEncryption() 38 ``` Path: `checkov/common/runners/runner_registry.py` Content: ``` 1 import json 2 import logging 3 from abc import abstractmethod 4 5 6 class RunnerRegistry(object): 7 runners = [] 8 scan_reports = [] 9 banner = "" 10 11 def __init__(self, banner, runner_filter, *runners): 12 self.logger = logging.getLogger(__name__) 13 self.runner_filter = runner_filter 14 self.runners = runners 15 self.banner = banner 16 self.filter_runner_framework() 17 self.scan_reports = [] 18 19 @abstractmethod 20 def extract_entity_details(self, entity): 21 raise NotImplementedError() 22 23 def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}): 24 for runner in self.runners: 25 scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files, 26 runner_filter=self.runner_filter) 27 RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines) 28 self.scan_reports.append(scan_report) 29 return self.scan_reports 30 31 def print_reports(self, scan_reports, args): 32 if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only": 33 print(f"{self.banner}\n") 34 exit_codes = [] 35 report_jsons = [] 36 for report in scan_reports: 37 if not report.is_empty(): 38 if args.output == "json": 39 report_jsons.append(report.get_dict()) 40 elif args.output == "junitxml": 41 report.print_junit_xml() 42 elif args.output == 'github_failed_only': 43 report.print_failed_github_md() 44 else: 45 report.print_console(is_quiet=args.quiet) 46 exit_codes.append(report.get_exit_code(args.soft_fail)) 47 if args.output == "json": 48 if len(report_jsons) == 1: 49 print(json.dumps(report_jsons[0], indent=4)) 50 else: 51 print(json.dumps(report_jsons, indent=4)) 52 exit_code = 1 if 1 in exit_codes else 0 53 exit(exit_code) 54 55 def filter_runner_framework(self): 56 if self.runner_filter.framework == 'all': 57 return 58 for runner in self.runners: 59 if runner.check_type == self.runner_filter.framework: 60 self.runners = [runner] 61 return 62 63 @staticmethod 64 def enrich_report_with_guidelines(scan_report, guidelines): 65 for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks: 66 if record.check_id in guidelines: 67 record.set_guideline(guidelines[record.check_id]) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py --- a/checkov/common/runners/runner_registry.py +++ b/checkov/common/runners/runner_registry.py @@ -29,7 +29,11 @@ return self.scan_reports def print_reports(self, scan_reports, args): - if args.output != "json" and args.output != "junitxml" and args.output != "github_failed_only": + if args.output not in ( + "github_failed_only", + "json", + "junitxml", + ): print(f"{self.banner}\n") exit_codes = [] report_jsons = [] diff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py --- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py +++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py @@ -20,15 +20,25 @@ :param conf: aws_launch_configuration configuration :return: <CheckResult> """ - have_root_block = 0 for key in conf.keys(): - if "block_device" in key and "ephemeral" not in key: - if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]: + if ( + "block_device" in key + and + "ephemeral" not in key + ): + if ( + isinstance(conf[key][0], dict) + and + conf[key][0].get("encrypted") != [True] + and + # If present, the encrypted flag will be determined by the snapshot + # Note: checkov does not know if snapshot is encrypted, so we default to PASSED + not conf[key][0].get("snapshot_id") + ): return CheckResult.FAILED - if "root_block_device" in key: - # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted. - have_root_block = 1 - if have_root_block == 0: + + # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted. + if "root_block_device" not in conf.keys(): return CheckResult.FAILED return CheckResult.PASSED diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py --- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py +++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py @@ -6,8 +6,13 @@ def __init__(self): name = "Ensure every security groups rule has a description" id = "CKV_AWS_23" - supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group', - 'aws_elasticache_security_group', 'aws_redshift_security_group'] + supported_resource = [ + 'aws_security_group', + 'aws_security_group_rule', + 'aws_db_security_group', + 'aws_elasticache_security_group', + 'aws_redshift_security_group', + ] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
{"golden_diff": "diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py\n--- a/checkov/common/runners/runner_registry.py\n+++ b/checkov/common/runners/runner_registry.py\n@@ -29,7 +29,11 @@\n return self.scan_reports\n \n def print_reports(self, scan_reports, args):\n- if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n+ if args.output not in (\n+ \"github_failed_only\",\n+ \"json\",\n+ \"junitxml\",\n+ ):\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\ndiff --git a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n--- a/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py\n@@ -20,15 +20,25 @@\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n- have_root_block = 0\n for key in conf.keys():\n- if \"block_device\" in key and \"ephemeral\" not in key:\n- if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n+ if (\n+ \"block_device\" in key\n+ and\n+ \"ephemeral\" not in key\n+ ):\n+ if (\n+ isinstance(conf[key][0], dict)\n+ and\n+ conf[key][0].get(\"encrypted\") != [True]\n+ and\n+ # If present, the encrypted flag will be determined by the snapshot\n+ # Note: checkov does not know if snapshot is encrypted, so we default to PASSED\n+ not conf[key][0].get(\"snapshot_id\")\n+ ):\n return CheckResult.FAILED\n- if \"root_block_device\" in key:\n- # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n- have_root_block = 1\n- if have_root_block == 0: \n+\n+ # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n+ if \"root_block_device\" not in conf.keys():\n return CheckResult.FAILED\n \n return CheckResult.PASSED\ndiff --git a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n--- a/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n+++ b/checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py\n@@ -6,8 +6,13 @@\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n- supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n- 'aws_elasticache_security_group', 'aws_redshift_security_group']\n+ supported_resource = [\n+ 'aws_security_group',\n+ 'aws_security_group_rule',\n+ 'aws_db_security_group',\n+ 'aws_elasticache_security_group',\n+ 'aws_redshift_security_group',\n+ ]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n", "issue": "LaunchConfigurationEBSEncryption gives false-positives, due to not checking snapshot_id\nIn the relevant `aws_launch_configuration` Terraform docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration\r\n\r\n```\r\nencrypted - (Optional) Whether the volume should be encrypted or not. \r\nDo not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.\r\n(Default: false).\r\n```\r\nThe relevant part is **Do not use this option if you are using snapshot_id as the encrypted flag will be determined by the snapshot.** `snapshot_id` is not taken into account in the current check:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/d67ec380daf81b889e83d4d7c7d33f490525e899/checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py#L16-L34\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass SecurityGroupRuleDescription(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n supported_resource = ['aws_security_group', 'aws_security_group_rule', 'aws_db_security_group',\n 'aws_elasticache_security_group', 'aws_redshift_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for description at security group rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'description' in conf.keys():\n if conf['description']:\n return CheckResult.PASSED\n egress_result = self.check_rule(rule_type='egress', conf=conf)\n ingress_result = self.check_rule(rule_type='ingress', conf=conf)\n if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def check_rule(self, rule_type, conf):\n if rule_type in conf.keys():\n for rule in conf[rule_type]:\n if isinstance(rule, dict):\n if 'description' not in rule.keys() or not rule['description']:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupRuleDescription()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass LaunchConfigurationEBSEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the Launch configuration EBS is securely encrypted\"\n id = \"CKV_AWS_8\"\n supported_resources = ['aws_launch_configuration', 'aws_instance']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"*_block_device/[0]/encrypted\"\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for encryption configuration at launch configuration:\n https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n have_root_block = 0\n for key in conf.keys():\n if \"block_device\" in key and \"ephemeral\" not in key:\n if isinstance(conf[key][0], dict) and conf[key][0].get(\"encrypted\") != [True]:\n return CheckResult.FAILED\n if \"root_block_device\" in key:\n # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n have_root_block = 1\n if have_root_block == 0: \n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = LaunchConfigurationEBSEncryption()\n", "path": "checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py"}, {"content": "import json\nimport logging\nfrom abc import abstractmethod\n\n\nclass RunnerRegistry(object):\n runners = []\n scan_reports = []\n banner = \"\"\n\n def __init__(self, banner, runner_filter, *runners):\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = runners\n self.banner = banner\n self.filter_runner_framework()\n self.scan_reports = []\n\n @abstractmethod\n def extract_entity_details(self, entity):\n raise NotImplementedError()\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):\n for runner in self.runners:\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter)\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n self.scan_reports.append(scan_report)\n return self.scan_reports\n\n def print_reports(self, scan_reports, args):\n if args.output != \"json\" and args.output != \"junitxml\" and args.output != \"github_failed_only\":\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n for report in scan_reports:\n if not report.is_empty():\n if args.output == \"json\":\n report_jsons.append(report.get_dict())\n elif args.output == \"junitxml\":\n report.print_junit_xml()\n elif args.output == 'github_failed_only':\n report.print_failed_github_md()\n else:\n report.print_console(is_quiet=args.quiet)\n exit_codes.append(report.get_exit_code(args.soft_fail))\n if args.output == \"json\":\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n exit_code = 1 if 1 in exit_codes else 0\n exit(exit_code)\n\n def filter_runner_framework(self):\n if self.runner_filter.framework == 'all':\n return\n for runner in self.runners:\n if runner.check_type == self.runner_filter.framework:\n self.runners = [runner]\n return\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report, guidelines):\n for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n", "path": "checkov/common/runners/runner_registry.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass SecurityGroupRuleDescription(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure every security groups rule has a description\"\n id = \"CKV_AWS_23\"\n supported_resource = [\n 'aws_security_group',\n 'aws_security_group_rule',\n 'aws_db_security_group',\n 'aws_elasticache_security_group',\n 'aws_redshift_security_group',\n ]\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for description at security group rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'description' in conf.keys():\n if conf['description']:\n return CheckResult.PASSED\n egress_result = self.check_rule(rule_type='egress', conf=conf)\n ingress_result = self.check_rule(rule_type='ingress', conf=conf)\n if egress_result == CheckResult.PASSED and ingress_result == CheckResult.PASSED:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def check_rule(self, rule_type, conf):\n if rule_type in conf.keys():\n for rule in conf[rule_type]:\n if isinstance(rule, dict):\n if 'description' not in rule.keys() or not rule['description']:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupRuleDescription()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupRuleDescription.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass LaunchConfigurationEBSEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the Launch configuration EBS is securely encrypted\"\n id = \"CKV_AWS_8\"\n supported_resources = ['aws_launch_configuration', 'aws_instance']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"*_block_device/[0]/encrypted\"\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for encryption configuration at launch configuration:\n https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html\n :param conf: aws_launch_configuration configuration\n :return: <CheckResult>\n \"\"\"\n for key in conf.keys():\n if (\n \"block_device\" in key\n and\n \"ephemeral\" not in key\n ):\n if (\n isinstance(conf[key][0], dict)\n and\n conf[key][0].get(\"encrypted\") != [True]\n and\n # If present, the encrypted flag will be determined by the snapshot\n # Note: checkov does not know if snapshot is encrypted, so we default to PASSED\n not conf[key][0].get(\"snapshot_id\")\n ):\n return CheckResult.FAILED\n\n # Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.\n if \"root_block_device\" not in conf.keys():\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = LaunchConfigurationEBSEncryption()\n", "path": "checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py"}, {"content": "import json\nimport logging\nfrom abc import abstractmethod\n\n\nclass RunnerRegistry(object):\n runners = []\n scan_reports = []\n banner = \"\"\n\n def __init__(self, banner, runner_filter, *runners):\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = runners\n self.banner = banner\n self.filter_runner_framework()\n self.scan_reports = []\n\n @abstractmethod\n def extract_entity_details(self, entity):\n raise NotImplementedError()\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, guidelines={}):\n for runner in self.runners:\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter)\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n self.scan_reports.append(scan_report)\n return self.scan_reports\n\n def print_reports(self, scan_reports, args):\n if args.output not in (\n \"github_failed_only\",\n \"json\",\n \"junitxml\",\n ):\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n for report in scan_reports:\n if not report.is_empty():\n if args.output == \"json\":\n report_jsons.append(report.get_dict())\n elif args.output == \"junitxml\":\n report.print_junit_xml()\n elif args.output == 'github_failed_only':\n report.print_failed_github_md()\n else:\n report.print_console(is_quiet=args.quiet)\n exit_codes.append(report.get_exit_code(args.soft_fail))\n if args.output == \"json\":\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n exit_code = 1 if 1 in exit_codes else 0\n exit(exit_code)\n\n def filter_runner_framework(self):\n if self.runner_filter.framework == 'all':\n return\n for runner in self.runners:\n if runner.check_type == self.runner_filter.framework:\n self.runners = [runner]\n return\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report, guidelines):\n for record in scan_report.failed_checks + scan_report.passed_checks + scan_report.skipped_checks:\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n", "path": "checkov/common/runners/runner_registry.py"}]}
2,038
804
gh_patches_debug_12143
rasdani/github-patches
git_diff
google__turbinia-294
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Programatically setting config file I am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected. I tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up. What do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `turbinia/config/__init__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2016 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Basic Turbinia config.""" 16 17 from __future__ import unicode_literals 18 19 import imp 20 import itertools 21 import logging 22 import os 23 import sys 24 25 log = logging.getLogger('turbinia') 26 27 # Look for config files with these names 28 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py'] 29 # Look in homedir first, then /etc/turbinia, and finally in the source 30 # config dir for config files 31 CONFIGPATH = [ 32 os.path.expanduser('~'), 33 '/etc/turbinia', 34 os.path.dirname(os.path.abspath(__file__))] 35 # Config vars that we expect to exist in the configuration 36 CONFIGVARS = [ 37 # Turbinia Config 38 'TASK_MANAGER', 39 'LOG_FILE', 40 'LOCK_FILE', 41 'OUTPUT_DIR', 42 'SLEEP_TIME', 43 'SINGLE_RUN', 44 'MOUNT_DIR_PREFIX', 45 'SHARED_FILESYSTEM', 46 # TODO(aarontp): Move this to the recipe config when it's available. 47 'DEBUG_TASKS', 48 # GCE CONFIG 49 'PROJECT', 50 'ZONE', 51 'TURBINIA_REGION', 52 'BUCKET_NAME', 53 'PSQ_TOPIC', 54 'PUBSUB_TOPIC', 55 'GCS_OUTPUT_PATH', 56 'STATE_MANAGER', 57 'INSTANCE_ID', 58 # REDIS CONFIG 59 'REDIS_HOST', 60 'REDIS_PORT', 61 'REDIS_DB', 62 # Celery config 63 'CELERY_BROKER', 64 'CELERY_BACKEND', 65 'KOMBU_BROKER', 66 'KOMBU_CHANNEL', 67 'KOMBU_DURABLE',] 68 # Environment variable to look for path data in 69 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH' 70 71 CONFIG = None 72 73 74 class TurbiniaConfigException(Exception): 75 """Exception for Turbinia configuration.""" 76 pass 77 78 79 def LoadConfig(): 80 """Finds Turbinia config file and loads it.""" 81 # TODO(aarontp): Find way to not require global var here. Maybe a singleton 82 # pattern on the config class. 83 # pylint: disable=global-statement 84 global CONFIG 85 if CONFIG: 86 return CONFIG 87 88 if ENVCONFIGVAR in os.environ: 89 CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':')) 90 91 config_file = None 92 # Load first file found 93 for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES): 94 if os.path.exists(os.path.join(_dir, _file)): 95 config_file = os.path.join(_dir, _file) 96 break 97 98 if config_file is None: 99 raise TurbiniaConfigException('No config files found') 100 101 log.info('Loading config from {0:s}'.format(config_file)) 102 _config = imp.load_source('config', config_file) 103 _config.configSource = config_file 104 ValidateAndSetConfig(_config) 105 CONFIG = _config 106 return _config 107 108 109 def ValidateAndSetConfig(_config): 110 """Makes sure that the config has the vars loaded and set in the module.""" 111 # TODO(aarontp): Allow for non-mandatory config options 112 for var in CONFIGVARS: 113 if not hasattr(_config, var): 114 raise TurbiniaConfigException( 115 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var)) 116 if getattr(_config, var) is None: 117 raise TurbiniaConfigException( 118 'Config attribute {0:s}:{1:s} is not set'.format( 119 _config.configSource, var)) 120 121 # Set the attribute in the current module 122 setattr(sys.modules[__name__], var, getattr(_config, var)) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py --- a/turbinia/config/__init__.py +++ b/turbinia/config/__init__.py @@ -85,12 +85,16 @@ if CONFIG: return CONFIG + # If the environment variable is set, take precedence over the pre-defined + # CONFIGPATHs. + configpath = CONFIGPATH if ENVCONFIGVAR in os.environ: - CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':')) + configpath = os.environ[ENVCONFIGVAR].split(':') + config_file = None # Load first file found - for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES): + for _dir, _file in itertools.product(configpath, CONFIGFILES): if os.path.exists(os.path.join(_dir, _file)): config_file = os.path.join(_dir, _file) break
{"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -85,12 +85,16 @@\n if CONFIG:\n return CONFIG\n \n+ # If the environment variable is set, take precedence over the pre-defined\n+ # CONFIGPATHs.\n+ configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n+ configpath = os.environ[ENVCONFIGVAR].split(':')\n+\n \n config_file = None\n # Load first file found\n- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n+ for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n", "issue": "Programatically setting config file\nI am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.\r\n\r\nI tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.\r\n\r\nWhat do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n if ENVCONFIGVAR in os.environ:\n CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}]}
1,598
220
gh_patches_debug_171
rasdani/github-patches
git_diff
HypothesisWorks__hypothesis-563
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- External pull requests currently fail the deploy task The build on #536 is currently failing because the decryption is trying to run and it doesn't have access to the decryption environment variables because it comes from @Zac-HD's fork rather than the main repo. The solution is just to have that task skip for external pull requests I think. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/deploy.py` Content: ``` 1 #!/usr/bin/env python 2 3 # coding=utf-8 4 # 5 # This file is part of Hypothesis, which may be found at 6 # https://github.com/HypothesisWorks/hypothesis-python 7 # 8 # Most of this work is copyright (C) 2013-2017 David R. MacIver 9 # ([email protected]), but it contains contributions by others. See 10 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 11 # consult the git log if you need to determine who owns an individual 12 # contribution. 13 # 14 # This Source Code Form is subject to the terms of the Mozilla Public License, 15 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 16 # obtain one at http://mozilla.org/MPL/2.0/. 17 # 18 # END HEADER 19 20 from __future__ import division, print_function, absolute_import 21 22 import os 23 import sys 24 import random 25 import shutil 26 import subprocess 27 from time import time, sleep 28 29 import hypothesistooling as tools 30 31 sys.path.append(os.path.dirname(__file__)) # noqa 32 33 34 DIST = os.path.join(tools.ROOT, 'dist') 35 36 37 PENDING_STATUS = ('started', 'created') 38 39 40 if __name__ == '__main__': 41 42 print('Decrypting secrets') 43 44 # We'd normally avoid the use of shell=True, but this is more or less 45 # intended as an opaque string that was given to us by Travis that happens 46 # to be a shell command that we run, and there are a number of good reasons 47 # this particular instance is harmless and would be high effort to 48 # convert (principally: Lack of programmatic generation of the string and 49 # extensive use of environment variables in it), so we're making an 50 # exception here. 51 subprocess.check_call( 52 'openssl aes-256-cbc -K $encrypted_39cb4cc39a80_key ' 53 '-iv $encrypted_39cb4cc39a80_iv -in secrets.tar.enc ' 54 '-out secrets.tar -d', 55 shell=True 56 ) 57 58 subprocess.check_call([ 59 'tar', '-xvf', 'secrets.tar', 60 ]) 61 62 last_release = tools.latest_version() 63 64 print('Current version: %s. Latest released version: %s' % ( 65 tools.__version__, last_release 66 )) 67 68 print('Building an sdist...') 69 70 if os.path.exists(DIST): 71 shutil.rmtree(DIST) 72 73 subprocess.check_output([ 74 sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST, 75 ]) 76 77 if not tools.on_master(): 78 print('Not deploying due to not being on master') 79 sys.exit(0) 80 81 if not tools.has_source_changes(last_release): 82 print('Not deploying due to no source changes') 83 sys.exit(0) 84 85 start_time = time() 86 87 prev_pending = None 88 89 # We time out after an hour, which is a stupidly long time and it should 90 # never actually take that long: A full Travis run only takes about 20-30 91 # minutes! This is really just here as a guard in case something goes 92 # wrong and we're not paying attention so as to not be too mean to Travis.. 93 while time() <= start_time + 60 * 60: 94 jobs = tools.build_jobs() 95 96 failed_jobs = [ 97 (k, v) 98 for k, vs in jobs.items() 99 if k not in PENDING_STATUS + ('passed',) 100 for v in vs 101 ] 102 103 if failed_jobs: 104 print('Failing this due to failure of jobs %s' % ( 105 ', '.join('%s(%s)' % (s, j) for j, s in failed_jobs), 106 )) 107 sys.exit(1) 108 else: 109 pending = [j for s in PENDING_STATUS for j in jobs.get(s, ())] 110 try: 111 # This allows us to test the deploy job for a build locally. 112 pending.remove('deploy') 113 except ValueError: 114 pass 115 if pending: 116 still_pending = set(pending) 117 if prev_pending is None: 118 print('Waiting for the following jobs to complete:') 119 for p in sorted(still_pending): 120 print(' * %s' % (p,)) 121 print() 122 else: 123 completed = prev_pending - still_pending 124 if completed: 125 print('%s completed since last check.' % ( 126 ', '.join(sorted(completed)),)) 127 prev_pending = still_pending 128 naptime = 10.0 * (2 + random.random()) 129 print('Waiting %.2fs for %d more job%s to complete' % ( 130 naptime, len(pending), 's' if len(pending) > 1 else '',)) 131 sleep(naptime) 132 else: 133 break 134 else: 135 print("We've been waiting for an hour. That seems bad. Failing now.") 136 sys.exit(1) 137 138 print('Looks good to release!') 139 print('Now uploading to pypi.') 140 141 subprocess.check_output([ 142 sys.executable, '-m', 'twine', 'upload', 143 '--config-file', './.pypirc', 144 os.path.join(DIST, '*'), 145 ]) 146 147 print('Release seems good. Pushing the tag now.') 148 149 tools.create_tag() 150 sys.exit(0) 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/deploy.py b/scripts/deploy.py --- a/scripts/deploy.py +++ b/scripts/deploy.py @@ -38,6 +38,8 @@ if __name__ == '__main__': + if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true': + sys.exit(0) print('Decrypting secrets')
{"golden_diff": "diff --git a/scripts/deploy.py b/scripts/deploy.py\n--- a/scripts/deploy.py\n+++ b/scripts/deploy.py\n@@ -38,6 +38,8 @@\n \n \n if __name__ == '__main__':\n+ if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true':\n+ sys.exit(0)\n \n print('Decrypting secrets')\n", "issue": "External pull requests currently fail the deploy task\nThe build on #536 is currently failing because the decryption is trying to run and it doesn't have access to the decryption environment variables because it comes from @Zac-HD's fork rather than the main repo.\r\n\r\nThe solution is just to have that task skip for external pull requests I think.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport random\nimport shutil\nimport subprocess\nfrom time import time, sleep\n\nimport hypothesistooling as tools\n\nsys.path.append(os.path.dirname(__file__)) # noqa\n\n\nDIST = os.path.join(tools.ROOT, 'dist')\n\n\nPENDING_STATUS = ('started', 'created')\n\n\nif __name__ == '__main__':\n\n print('Decrypting secrets')\n\n # We'd normally avoid the use of shell=True, but this is more or less\n # intended as an opaque string that was given to us by Travis that happens\n # to be a shell command that we run, and there are a number of good reasons\n # this particular instance is harmless and would be high effort to\n # convert (principally: Lack of programmatic generation of the string and\n # extensive use of environment variables in it), so we're making an\n # exception here.\n subprocess.check_call(\n 'openssl aes-256-cbc -K $encrypted_39cb4cc39a80_key '\n '-iv $encrypted_39cb4cc39a80_iv -in secrets.tar.enc '\n '-out secrets.tar -d',\n shell=True\n )\n\n subprocess.check_call([\n 'tar', '-xvf', 'secrets.tar',\n ])\n\n last_release = tools.latest_version()\n\n print('Current version: %s. Latest released version: %s' % (\n tools.__version__, last_release\n ))\n\n print('Building an sdist...')\n\n if os.path.exists(DIST):\n shutil.rmtree(DIST)\n\n subprocess.check_output([\n sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST,\n ])\n\n if not tools.on_master():\n print('Not deploying due to not being on master')\n sys.exit(0)\n\n if not tools.has_source_changes(last_release):\n print('Not deploying due to no source changes')\n sys.exit(0)\n\n start_time = time()\n\n prev_pending = None\n\n # We time out after an hour, which is a stupidly long time and it should\n # never actually take that long: A full Travis run only takes about 20-30\n # minutes! This is really just here as a guard in case something goes\n # wrong and we're not paying attention so as to not be too mean to Travis..\n while time() <= start_time + 60 * 60:\n jobs = tools.build_jobs()\n\n failed_jobs = [\n (k, v)\n for k, vs in jobs.items()\n if k not in PENDING_STATUS + ('passed',)\n for v in vs\n ]\n\n if failed_jobs:\n print('Failing this due to failure of jobs %s' % (\n ', '.join('%s(%s)' % (s, j) for j, s in failed_jobs),\n ))\n sys.exit(1)\n else:\n pending = [j for s in PENDING_STATUS for j in jobs.get(s, ())]\n try:\n # This allows us to test the deploy job for a build locally.\n pending.remove('deploy')\n except ValueError:\n pass\n if pending:\n still_pending = set(pending)\n if prev_pending is None:\n print('Waiting for the following jobs to complete:')\n for p in sorted(still_pending):\n print(' * %s' % (p,))\n print()\n else:\n completed = prev_pending - still_pending\n if completed:\n print('%s completed since last check.' % (\n ', '.join(sorted(completed)),))\n prev_pending = still_pending\n naptime = 10.0 * (2 + random.random())\n print('Waiting %.2fs for %d more job%s to complete' % (\n naptime, len(pending), 's' if len(pending) > 1 else '',))\n sleep(naptime)\n else:\n break\n else:\n print(\"We've been waiting for an hour. That seems bad. Failing now.\")\n sys.exit(1)\n\n print('Looks good to release!')\n print('Now uploading to pypi.')\n\n subprocess.check_output([\n sys.executable, '-m', 'twine', 'upload',\n '--config-file', './.pypirc',\n os.path.join(DIST, '*'),\n ])\n\n print('Release seems good. Pushing the tag now.')\n\n tools.create_tag()\n sys.exit(0)\n", "path": "scripts/deploy.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport random\nimport shutil\nimport subprocess\nfrom time import time, sleep\n\nimport hypothesistooling as tools\n\nsys.path.append(os.path.dirname(__file__)) # noqa\n\n\nDIST = os.path.join(tools.ROOT, 'dist')\n\n\nPENDING_STATUS = ('started', 'created')\n\n\nif __name__ == '__main__':\n if os.environ.get('TRAVIS_SECURE_ENV_VARS', None) != 'true':\n sys.exit(0)\n\n print('Decrypting secrets')\n\n # We'd normally avoid the use of shell=True, but this is more or less\n # intended as an opaque string that was given to us by Travis that happens\n # to be a shell command that we run, and there are a number of good reasons\n # this particular instance is harmless and would be high effort to\n # convert (principally: Lack of programmatic generation of the string and\n # extensive use of environment variables in it), so we're making an\n # exception here.\n subprocess.check_call(\n 'openssl aes-256-cbc -K $encrypted_39cb4cc39a80_key '\n '-iv $encrypted_39cb4cc39a80_iv -in secrets.tar.enc '\n '-out secrets.tar -d',\n shell=True\n )\n\n subprocess.check_call([\n 'tar', '-xvf', 'secrets.tar',\n ])\n\n last_release = tools.latest_version()\n\n print('Current version: %s. Latest released version: %s' % (\n tools.__version__, last_release\n ))\n\n print('Building an sdist...')\n\n if os.path.exists(DIST):\n shutil.rmtree(DIST)\n\n subprocess.check_output([\n sys.executable, 'setup.py', 'sdist', '--dist-dir', DIST,\n ])\n\n if not tools.on_master():\n print('Not deploying due to not being on master')\n sys.exit(0)\n\n if not tools.has_source_changes(last_release):\n print('Not deploying due to no source changes')\n sys.exit(0)\n\n start_time = time()\n\n prev_pending = None\n\n # We time out after an hour, which is a stupidly long time and it should\n # never actually take that long: A full Travis run only takes about 20-30\n # minutes! This is really just here as a guard in case something goes\n # wrong and we're not paying attention so as to not be too mean to Travis..\n while time() <= start_time + 60 * 60:\n jobs = tools.build_jobs()\n\n failed_jobs = [\n (k, v)\n for k, vs in jobs.items()\n if k not in PENDING_STATUS + ('passed',)\n for v in vs\n ]\n\n if failed_jobs:\n print('Failing this due to failure of jobs %s' % (\n ', '.join('%s(%s)' % (s, j) for j, s in failed_jobs),\n ))\n sys.exit(1)\n else:\n pending = [j for s in PENDING_STATUS for j in jobs.get(s, ())]\n try:\n # This allows us to test the deploy job for a build locally.\n pending.remove('deploy')\n except ValueError:\n pass\n if pending:\n still_pending = set(pending)\n if prev_pending is None:\n print('Waiting for the following jobs to complete:')\n for p in sorted(still_pending):\n print(' * %s' % (p,))\n print()\n else:\n completed = prev_pending - still_pending\n if completed:\n print('%s completed since last check.' % (\n ', '.join(sorted(completed)),))\n prev_pending = still_pending\n naptime = 10.0 * (2 + random.random())\n print('Waiting %.2fs for %d more job%s to complete' % (\n naptime, len(pending), 's' if len(pending) > 1 else '',))\n sleep(naptime)\n else:\n break\n else:\n print(\"We've been waiting for an hour. That seems bad. Failing now.\")\n sys.exit(1)\n\n print('Looks good to release!')\n print('Now uploading to pypi.')\n\n subprocess.check_output([\n sys.executable, '-m', 'twine', 'upload',\n '--config-file', './.pypirc',\n os.path.join(DIST, '*'),\n ])\n\n print('Release seems good. Pushing the tag now.')\n\n tools.create_tag()\n sys.exit(0)\n", "path": "scripts/deploy.py"}]}
1,853
83
gh_patches_debug_5678
rasdani/github-patches
git_diff
stephenmcd__mezzanine-1954
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Small typo in mezzanine/core/auth_backends.py Should read verification rather than verficiation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mezzanine/core/auth_backends.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from django.contrib.auth import get_user_model 4 from django.contrib.auth.backends import ModelBackend 5 from django.contrib.auth.tokens import default_token_generator 6 from django.db.models import Q 7 from django.utils.http import base36_to_int 8 9 10 User = get_user_model() 11 12 13 class MezzanineBackend(ModelBackend): 14 """ 15 Extends Django's ``ModelBackend`` to allow login via username, 16 email, or verification token. 17 18 Args are either ``username`` and ``password``, or ``uidb36`` 19 and ``token``. In either case, ``is_active`` can also be given. 20 21 For login, is_active is not given, so that the login form can 22 raise a specific error for inactive users. 23 For password reset, True is given for is_active. 24 For signup verficiation, False is given for is_active. 25 """ 26 27 def authenticate(self, *args, **kwargs): 28 if kwargs: 29 username = kwargs.pop("username", None) 30 if username: 31 username_or_email = Q(username=username) | Q(email=username) 32 password = kwargs.pop("password", None) 33 try: 34 user = User.objects.get(username_or_email, **kwargs) 35 except User.DoesNotExist: 36 pass 37 else: 38 if user.check_password(password): 39 return user 40 else: 41 if 'uidb36' not in kwargs: 42 return 43 kwargs["id"] = base36_to_int(kwargs.pop("uidb36")) 44 token = kwargs.pop("token") 45 try: 46 user = User.objects.get(**kwargs) 47 except User.DoesNotExist: 48 pass 49 else: 50 if default_token_generator.check_token(user, token): 51 return user 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py --- a/mezzanine/core/auth_backends.py +++ b/mezzanine/core/auth_backends.py @@ -21,7 +21,7 @@ For login, is_active is not given, so that the login form can raise a specific error for inactive users. For password reset, True is given for is_active. - For signup verficiation, False is given for is_active. + For signup verification, False is given for is_active. """ def authenticate(self, *args, **kwargs):
{"golden_diff": "diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py\n--- a/mezzanine/core/auth_backends.py\n+++ b/mezzanine/core/auth_backends.py\n@@ -21,7 +21,7 @@\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n- For signup verficiation, False is given for is_active.\n+ For signup verification, False is given for is_active.\n \"\"\"\n \n def authenticate(self, *args, **kwargs):\n", "issue": "Small typo in mezzanine/core/auth_backends.py\nShould read verification rather than verficiation.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.db.models import Q\nfrom django.utils.http import base36_to_int\n\n\nUser = get_user_model()\n\n\nclass MezzanineBackend(ModelBackend):\n \"\"\"\n Extends Django's ``ModelBackend`` to allow login via username,\n email, or verification token.\n\n Args are either ``username`` and ``password``, or ``uidb36``\n and ``token``. In either case, ``is_active`` can also be given.\n\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n For signup verficiation, False is given for is_active.\n \"\"\"\n\n def authenticate(self, *args, **kwargs):\n if kwargs:\n username = kwargs.pop(\"username\", None)\n if username:\n username_or_email = Q(username=username) | Q(email=username)\n password = kwargs.pop(\"password\", None)\n try:\n user = User.objects.get(username_or_email, **kwargs)\n except User.DoesNotExist:\n pass\n else:\n if user.check_password(password):\n return user\n else:\n if 'uidb36' not in kwargs:\n return\n kwargs[\"id\"] = base36_to_int(kwargs.pop(\"uidb36\"))\n token = kwargs.pop(\"token\")\n try:\n user = User.objects.get(**kwargs)\n except User.DoesNotExist:\n pass\n else:\n if default_token_generator.check_token(user, token):\n return user\n", "path": "mezzanine/core/auth_backends.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.db.models import Q\nfrom django.utils.http import base36_to_int\n\n\nUser = get_user_model()\n\n\nclass MezzanineBackend(ModelBackend):\n \"\"\"\n Extends Django's ``ModelBackend`` to allow login via username,\n email, or verification token.\n\n Args are either ``username`` and ``password``, or ``uidb36``\n and ``token``. In either case, ``is_active`` can also be given.\n\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n For signup verification, False is given for is_active.\n \"\"\"\n\n def authenticate(self, *args, **kwargs):\n if kwargs:\n username = kwargs.pop(\"username\", None)\n if username:\n username_or_email = Q(username=username) | Q(email=username)\n password = kwargs.pop(\"password\", None)\n try:\n user = User.objects.get(username_or_email, **kwargs)\n except User.DoesNotExist:\n pass\n else:\n if user.check_password(password):\n return user\n else:\n if 'uidb36' not in kwargs:\n return\n kwargs[\"id\"] = base36_to_int(kwargs.pop(\"uidb36\"))\n token = kwargs.pop(\"token\")\n try:\n user = User.objects.get(**kwargs)\n except User.DoesNotExist:\n pass\n else:\n if default_token_generator.check_token(user, token):\n return user\n", "path": "mezzanine/core/auth_backends.py"}]}
745
137
gh_patches_debug_30239
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-3261
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- auto_scale_batch_size won't reset current_epoch ## 🐛 Bug When `auto_scale_batch_size` is enabled, the model is initially trained with varying batch sizes. When training begins, `trainer.current_epoch` equals 1 instead of 0. ### To Reproduce Either observe the progress bar or use a simple callback to track the epoch number, once with `auto_scale_batch_size` enabled and once with `auto_scale_batch_size` disabled. ``` from pytorch_lightning import Callback class PrintCallback(Callback): def __init__(self): self.observed_epochs = [] def on_train_epoch_start(self, trainer, pl_module): print(f'Current Epoch: {trainer.current_epoch}') self.observed_epochs.append(trainer.current_epoch) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytorch_lightning/tuner/batch_size_scaling.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License 14 import os 15 from pytorch_lightning.core.lightning import LightningModule 16 from pytorch_lightning.utilities.data import has_len 17 from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr 18 from pytorch_lightning.utilities import rank_zero_warn 19 from pytorch_lightning.utilities.exceptions import MisconfigurationException 20 from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda 21 from pytorch_lightning.loggers.base import DummyLogger 22 from pytorch_lightning import _logger as log 23 from typing import Optional, Tuple 24 25 26 def scale_batch_size(trainer, 27 model: LightningModule, 28 mode: str = 'power', 29 steps_per_trial: int = 3, 30 init_val: int = 2, 31 max_trials: int = 25, 32 batch_arg_name: str = 'batch_size', 33 **fit_kwargs): 34 r""" 35 Will iteratively try to find the largest batch size for a given model 36 that does not give an out of memory (OOM) error. 37 38 Args: 39 trainer: The Trainer 40 model: Model to fit. 41 42 mode: string setting the search mode. Either `power` or `binsearch`. 43 If mode is `power` we keep multiplying the batch size by 2, until 44 we get an OOM error. If mode is 'binsearch', we will initially 45 also keep multiplying by 2 and after encountering an OOM error 46 do a binary search between the last successful batch size and the 47 batch size that failed. 48 49 steps_per_trial: number of steps to run with a given batch size. 50 Idealy 1 should be enough to test if a OOM error occurs, 51 however in practise a few are needed 52 53 init_val: initial batch size to start the search with 54 55 max_trials: max number of increase in batch size done before 56 algorithm is terminated 57 58 batch_arg_name: name of the attribute that stores the batch size. 59 It is expected that the user has provided a model or datamodule that has a hyperparameter 60 with that name. We will look for this attribute name in the following places 61 62 - `model` 63 - `model.hparams` 64 - `model.datamodule` 65 - `trainer.datamodule` (the datamodule passed to the tune method) 66 67 **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader 68 or datamodule. 69 """ 70 if not lightning_hasattr(model, batch_arg_name): 71 raise MisconfigurationException( 72 f'Field {batch_arg_name} not found in both `model` and `model.hparams`') 73 if hasattr(model, batch_arg_name) and hasattr(model, "hparams") and batch_arg_name in model.hparams: 74 rank_zero_warn( 75 f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!' 76 f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.' 77 f' If this is not the intended behavior, please remove either one.' 78 ) 79 80 if hasattr(model.train_dataloader, 'patch_loader_code'): 81 raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders' 82 ' passed directly to `.fit()`. Please disable the feature or' 83 ' incorporate the dataloader into the model.') 84 85 # Arguments we adjust during the batch size finder, save for restoring 86 __scale_batch_dump_params(trainer) 87 88 # Set to values that are required by the algorithm 89 __scale_batch_reset_params(trainer, model, steps_per_trial) 90 91 # Save initial model, that is loaded after batch size is found 92 save_path = os.path.join(trainer.default_root_dir, 'temp_model.ckpt') 93 trainer.save_checkpoint(str(save_path)) 94 95 if trainer.progress_bar_callback: 96 trainer.progress_bar_callback.disable() 97 98 # Initially we just double in size until an OOM is encountered 99 new_size = _adjust_batch_size(trainer, value=init_val) # initially set to init_val 100 if mode == 'power': 101 new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs) 102 elif mode == 'binsearch': 103 new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs) 104 else: 105 raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch') 106 107 garbage_collection_cuda() 108 log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}') 109 110 # Restore initial state of model 111 trainer.checkpoint_connector.restore(str(save_path), on_gpu=trainer.on_gpu) 112 os.remove(save_path) 113 114 # Finish by resetting variables so trainer is ready to fit model 115 __scale_batch_restore_params(trainer) 116 if trainer.progress_bar_callback: 117 trainer.progress_bar_callback.enable() 118 119 return new_size 120 121 122 def __scale_batch_dump_params(trainer): 123 # Prevent going into infinite loop 124 trainer.__dumped_params = { 125 'auto_lr_find': trainer.auto_lr_find, 126 'max_steps': trainer.max_steps, 127 'weights_summary': trainer.weights_summary, 128 'logger': trainer.logger, 129 'callbacks': trainer.callbacks, 130 'checkpoint_callback': trainer.checkpoint_callback, 131 'early_stop_callback': trainer.early_stop_callback, 132 'auto_scale_batch_size': trainer.auto_scale_batch_size, 133 'limit_train_batches': trainer.limit_train_batches, 134 'model': trainer.model, 135 } 136 137 138 def __scale_batch_reset_params(trainer, model, steps_per_trial): 139 trainer.auto_scale_batch_size = None # prevent recursion 140 trainer.auto_lr_find = False # avoid lr find being called multiple times 141 trainer.max_steps = steps_per_trial # take few steps 142 trainer.weights_summary = None # not needed before full run 143 trainer.logger = DummyLogger() 144 trainer.callbacks = [] # not needed before full run 145 trainer.checkpoint_callback = False # required for saving 146 trainer.early_stop_callback = None 147 trainer.limit_train_batches = 1.0 148 trainer.optimizers, trainer.schedulers = [], [] # required for saving 149 trainer.model = model # required for saving 150 151 152 def __scale_batch_restore_params(trainer): 153 trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find'] 154 trainer.max_steps = trainer.__dumped_params['max_steps'] 155 trainer.weights_summary = trainer.__dumped_params['weights_summary'] 156 trainer.logger = trainer.__dumped_params['logger'] 157 trainer.callbacks = trainer.__dumped_params['callbacks'] 158 trainer.checkpoint_callback = trainer.__dumped_params['checkpoint_callback'] 159 trainer.auto_scale_batch_size = trainer.__dumped_params['auto_scale_batch_size'] 160 trainer.early_stop_callback = trainer.__dumped_params['early_stop_callback'] 161 trainer.limit_train_batches = trainer.__dumped_params['limit_train_batches'] 162 trainer.model = trainer.__dumped_params['model'] 163 del trainer.__dumped_params 164 165 166 def _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs): 167 """ Batch scaling mode where the size is doubled at each iteration until an 168 OOM error is encountered. """ 169 for _ in range(max_trials): 170 garbage_collection_cuda() 171 trainer.global_step = 0 # reset after each try 172 try: 173 # Try fit 174 trainer.fit(model, **fit_kwargs) 175 # Double in size 176 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded') 177 except RuntimeError as exception: 178 # Only these errors should trigger an adjustment 179 if is_oom_error(exception): 180 # If we fail in power mode, half the size and return 181 garbage_collection_cuda() 182 new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed') 183 break 184 else: 185 raise # some other error not memory related 186 187 if not changed: 188 break 189 return new_size 190 191 192 def _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs): 193 """ Batch scaling mode where the size is initially is doubled at each iteration 194 until an OOM error is encountered. Hereafter, the batch size is further 195 refined using a binary search """ 196 high = None 197 count = 0 198 while True: 199 garbage_collection_cuda() 200 trainer.global_step = 0 # reset after each try 201 try: 202 # Try fit 203 trainer.fit(model, **fit_kwargs) 204 count += 1 205 if count > max_trials: 206 break 207 # Double in size 208 low = new_size 209 if high: 210 if high - low <= 1: 211 break 212 midval = (high + low) // 2 213 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded') 214 else: 215 new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded') 216 217 if not changed: 218 break 219 220 except RuntimeError as exception: 221 # Only these errors should trigger an adjustment 222 if is_oom_error(exception): 223 # If we fail in power mode, half the size and return 224 garbage_collection_cuda() 225 high = new_size 226 midval = (high + low) // 2 227 new_size, _ = _adjust_batch_size(trainer, value=midval, desc='failed') 228 if high - low <= 1: 229 break 230 else: 231 raise # some other error not memory related 232 233 return new_size 234 235 236 def _adjust_batch_size(trainer, 237 batch_arg_name: str = 'batch_size', 238 factor: float = 1.0, 239 value: Optional[int] = None, 240 desc: str = None) -> Tuple[int, bool]: 241 """ Helper function for adjusting the batch size. 242 243 Args: 244 trainer: instance of pytorch_lightning.Trainer 245 246 batch_arg_name: name of the field where batch_size is stored. 247 248 factor: value which the old batch size is multiplied by to get the 249 new batch size 250 251 value: if a value is given, will override the batch size with this value. 252 Note that the value of `factor` will not have an effect in this case 253 254 desc: either `succeeded` or `failed`. Used purely for logging 255 256 Returns: 257 The new batch size for the next trial and a bool that signals whether the 258 new value is different than the previous batch size. 259 """ 260 model = trainer.get_model() 261 batch_size = lightning_getattr(model, batch_arg_name) 262 new_size = value if value is not None else int(batch_size * factor) 263 if desc: 264 log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}') 265 266 if not _is_valid_batch_size(new_size, trainer.train_dataloader): 267 new_size = min(new_size, len(trainer.train_dataloader.dataset)) 268 269 changed = new_size != batch_size 270 lightning_setattr(model, batch_arg_name, new_size) 271 return new_size, changed 272 273 274 def _is_valid_batch_size(current_size, dataloader): 275 return not has_len(dataloader) or current_size <= len(dataloader) 276 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py --- a/pytorch_lightning/tuner/batch_size_scaling.py +++ b/pytorch_lightning/tuner/batch_size_scaling.py @@ -123,6 +123,7 @@ # Prevent going into infinite loop trainer.__dumped_params = { 'auto_lr_find': trainer.auto_lr_find, + 'current_epoch': trainer.current_epoch, 'max_steps': trainer.max_steps, 'weights_summary': trainer.weights_summary, 'logger': trainer.logger, @@ -138,6 +139,7 @@ def __scale_batch_reset_params(trainer, model, steps_per_trial): trainer.auto_scale_batch_size = None # prevent recursion trainer.auto_lr_find = False # avoid lr find being called multiple times + trainer.current_epoch = 0 trainer.max_steps = steps_per_trial # take few steps trainer.weights_summary = None # not needed before full run trainer.logger = DummyLogger() @@ -151,6 +153,7 @@ def __scale_batch_restore_params(trainer): trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find'] + trainer.current_epoch = trainer.__dumped_params['current_epoch'] trainer.max_steps = trainer.__dumped_params['max_steps'] trainer.weights_summary = trainer.__dumped_params['weights_summary'] trainer.logger = trainer.__dumped_params['logger']
{"golden_diff": "diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py\n--- a/pytorch_lightning/tuner/batch_size_scaling.py\n+++ b/pytorch_lightning/tuner/batch_size_scaling.py\n@@ -123,6 +123,7 @@\n # Prevent going into infinite loop\n trainer.__dumped_params = {\n 'auto_lr_find': trainer.auto_lr_find,\n+ 'current_epoch': trainer.current_epoch,\n 'max_steps': trainer.max_steps,\n 'weights_summary': trainer.weights_summary,\n 'logger': trainer.logger,\n@@ -138,6 +139,7 @@\n def __scale_batch_reset_params(trainer, model, steps_per_trial):\n trainer.auto_scale_batch_size = None # prevent recursion\n trainer.auto_lr_find = False # avoid lr find being called multiple times\n+ trainer.current_epoch = 0\n trainer.max_steps = steps_per_trial # take few steps\n trainer.weights_summary = None # not needed before full run\n trainer.logger = DummyLogger()\n@@ -151,6 +153,7 @@\n \n def __scale_batch_restore_params(trainer):\n trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']\n+ trainer.current_epoch = trainer.__dumped_params['current_epoch']\n trainer.max_steps = trainer.__dumped_params['max_steps']\n trainer.weights_summary = trainer.__dumped_params['weights_summary']\n trainer.logger = trainer.__dumped_params['logger']\n", "issue": "auto_scale_batch_size won't reset current_epoch\n## \ud83d\udc1b Bug\r\n\r\nWhen `auto_scale_batch_size` is enabled, the model is initially trained with varying batch sizes. When training begins, `trainer.current_epoch` equals 1 instead of 0.\r\n\r\n### To Reproduce\r\n\r\nEither observe the progress bar or use a simple callback to track the epoch number, once with `auto_scale_batch_size` enabled and once with `auto_scale_batch_size` disabled.\r\n\r\n```\r\nfrom pytorch_lightning import Callback\r\n\r\nclass PrintCallback(Callback):\r\n \r\n def __init__(self):\r\n self.observed_epochs = []\r\n \r\n def on_train_epoch_start(self, trainer, pl_module):\r\n print(f'Current Epoch: {trainer.current_epoch}')\r\n self.observed_epochs.append(trainer.current_epoch)\r\n\r\n```\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities.data import has_len\nfrom pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda\nfrom pytorch_lightning.loggers.base import DummyLogger\nfrom pytorch_lightning import _logger as log\nfrom typing import Optional, Tuple\n\n\ndef scale_batch_size(trainer,\n model: LightningModule,\n mode: str = 'power',\n steps_per_trial: int = 3,\n init_val: int = 2,\n max_trials: int = 25,\n batch_arg_name: str = 'batch_size',\n **fit_kwargs):\n r\"\"\"\n Will iteratively try to find the largest batch size for a given model\n that does not give an out of memory (OOM) error.\n\n Args:\n trainer: The Trainer\n model: Model to fit.\n\n mode: string setting the search mode. Either `power` or `binsearch`.\n If mode is `power` we keep multiplying the batch size by 2, until\n we get an OOM error. If mode is 'binsearch', we will initially\n also keep multiplying by 2 and after encountering an OOM error\n do a binary search between the last successful batch size and the\n batch size that failed.\n\n steps_per_trial: number of steps to run with a given batch size.\n Idealy 1 should be enough to test if a OOM error occurs,\n however in practise a few are needed\n\n init_val: initial batch size to start the search with\n\n max_trials: max number of increase in batch size done before\n algorithm is terminated\n\n batch_arg_name: name of the attribute that stores the batch size.\n It is expected that the user has provided a model or datamodule that has a hyperparameter\n with that name. We will look for this attribute name in the following places\n\n - `model`\n - `model.hparams`\n - `model.datamodule`\n - `trainer.datamodule` (the datamodule passed to the tune method)\n\n **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader\n or datamodule.\n \"\"\"\n if not lightning_hasattr(model, batch_arg_name):\n raise MisconfigurationException(\n f'Field {batch_arg_name} not found in both `model` and `model.hparams`')\n if hasattr(model, batch_arg_name) and hasattr(model, \"hparams\") and batch_arg_name in model.hparams:\n rank_zero_warn(\n f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!'\n f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.'\n f' If this is not the intended behavior, please remove either one.'\n )\n\n if hasattr(model.train_dataloader, 'patch_loader_code'):\n raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders'\n ' passed directly to `.fit()`. Please disable the feature or'\n ' incorporate the dataloader into the model.')\n\n # Arguments we adjust during the batch size finder, save for restoring\n __scale_batch_dump_params(trainer)\n\n # Set to values that are required by the algorithm\n __scale_batch_reset_params(trainer, model, steps_per_trial)\n\n # Save initial model, that is loaded after batch size is found\n save_path = os.path.join(trainer.default_root_dir, 'temp_model.ckpt')\n trainer.save_checkpoint(str(save_path))\n\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.disable()\n\n # Initially we just double in size until an OOM is encountered\n new_size = _adjust_batch_size(trainer, value=init_val) # initially set to init_val\n if mode == 'power':\n new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n elif mode == 'binsearch':\n new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n else:\n raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch')\n\n garbage_collection_cuda()\n log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}')\n\n # Restore initial state of model\n trainer.checkpoint_connector.restore(str(save_path), on_gpu=trainer.on_gpu)\n os.remove(save_path)\n\n # Finish by resetting variables so trainer is ready to fit model\n __scale_batch_restore_params(trainer)\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.enable()\n\n return new_size\n\n\ndef __scale_batch_dump_params(trainer):\n # Prevent going into infinite loop\n trainer.__dumped_params = {\n 'auto_lr_find': trainer.auto_lr_find,\n 'max_steps': trainer.max_steps,\n 'weights_summary': trainer.weights_summary,\n 'logger': trainer.logger,\n 'callbacks': trainer.callbacks,\n 'checkpoint_callback': trainer.checkpoint_callback,\n 'early_stop_callback': trainer.early_stop_callback,\n 'auto_scale_batch_size': trainer.auto_scale_batch_size,\n 'limit_train_batches': trainer.limit_train_batches,\n 'model': trainer.model,\n }\n\n\ndef __scale_batch_reset_params(trainer, model, steps_per_trial):\n trainer.auto_scale_batch_size = None # prevent recursion\n trainer.auto_lr_find = False # avoid lr find being called multiple times\n trainer.max_steps = steps_per_trial # take few steps\n trainer.weights_summary = None # not needed before full run\n trainer.logger = DummyLogger()\n trainer.callbacks = [] # not needed before full run\n trainer.checkpoint_callback = False # required for saving\n trainer.early_stop_callback = None\n trainer.limit_train_batches = 1.0\n trainer.optimizers, trainer.schedulers = [], [] # required for saving\n trainer.model = model # required for saving\n\n\ndef __scale_batch_restore_params(trainer):\n trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']\n trainer.max_steps = trainer.__dumped_params['max_steps']\n trainer.weights_summary = trainer.__dumped_params['weights_summary']\n trainer.logger = trainer.__dumped_params['logger']\n trainer.callbacks = trainer.__dumped_params['callbacks']\n trainer.checkpoint_callback = trainer.__dumped_params['checkpoint_callback']\n trainer.auto_scale_batch_size = trainer.__dumped_params['auto_scale_batch_size']\n trainer.early_stop_callback = trainer.__dumped_params['early_stop_callback']\n trainer.limit_train_batches = trainer.__dumped_params['limit_train_batches']\n trainer.model = trainer.__dumped_params['model']\n del trainer.__dumped_params\n\n\ndef _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is doubled at each iteration until an\n OOM error is encountered. \"\"\"\n for _ in range(max_trials):\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n # Double in size\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed')\n break\n else:\n raise # some other error not memory related\n\n if not changed:\n break\n return new_size\n\n\ndef _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is initially is doubled at each iteration\n until an OOM error is encountered. Hereafter, the batch size is further\n refined using a binary search \"\"\"\n high = None\n count = 0\n while True:\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n count += 1\n if count > max_trials:\n break\n # Double in size\n low = new_size\n if high:\n if high - low <= 1:\n break\n midval = (high + low) // 2\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded')\n else:\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n\n if not changed:\n break\n\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n high = new_size\n midval = (high + low) // 2\n new_size, _ = _adjust_batch_size(trainer, value=midval, desc='failed')\n if high - low <= 1:\n break\n else:\n raise # some other error not memory related\n\n return new_size\n\n\ndef _adjust_batch_size(trainer,\n batch_arg_name: str = 'batch_size',\n factor: float = 1.0,\n value: Optional[int] = None,\n desc: str = None) -> Tuple[int, bool]:\n \"\"\" Helper function for adjusting the batch size.\n\n Args:\n trainer: instance of pytorch_lightning.Trainer\n\n batch_arg_name: name of the field where batch_size is stored.\n\n factor: value which the old batch size is multiplied by to get the\n new batch size\n\n value: if a value is given, will override the batch size with this value.\n Note that the value of `factor` will not have an effect in this case\n\n desc: either `succeeded` or `failed`. Used purely for logging\n\n Returns:\n The new batch size for the next trial and a bool that signals whether the\n new value is different than the previous batch size.\n \"\"\"\n model = trainer.get_model()\n batch_size = lightning_getattr(model, batch_arg_name)\n new_size = value if value is not None else int(batch_size * factor)\n if desc:\n log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')\n\n if not _is_valid_batch_size(new_size, trainer.train_dataloader):\n new_size = min(new_size, len(trainer.train_dataloader.dataset))\n\n changed = new_size != batch_size\n lightning_setattr(model, batch_arg_name, new_size)\n return new_size, changed\n\n\ndef _is_valid_batch_size(current_size, dataloader):\n return not has_len(dataloader) or current_size <= len(dataloader)\n", "path": "pytorch_lightning/tuner/batch_size_scaling.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities.data import has_len\nfrom pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda\nfrom pytorch_lightning.loggers.base import DummyLogger\nfrom pytorch_lightning import _logger as log\nfrom typing import Optional, Tuple\n\n\ndef scale_batch_size(trainer,\n model: LightningModule,\n mode: str = 'power',\n steps_per_trial: int = 3,\n init_val: int = 2,\n max_trials: int = 25,\n batch_arg_name: str = 'batch_size',\n **fit_kwargs):\n r\"\"\"\n Will iteratively try to find the largest batch size for a given model\n that does not give an out of memory (OOM) error.\n\n Args:\n trainer: The Trainer\n model: Model to fit.\n\n mode: string setting the search mode. Either `power` or `binsearch`.\n If mode is `power` we keep multiplying the batch size by 2, until\n we get an OOM error. If mode is 'binsearch', we will initially\n also keep multiplying by 2 and after encountering an OOM error\n do a binary search between the last successful batch size and the\n batch size that failed.\n\n steps_per_trial: number of steps to run with a given batch size.\n Idealy 1 should be enough to test if a OOM error occurs,\n however in practise a few are needed\n\n init_val: initial batch size to start the search with\n\n max_trials: max number of increase in batch size done before\n algorithm is terminated\n\n batch_arg_name: name of the attribute that stores the batch size.\n It is expected that the user has provided a model or datamodule that has a hyperparameter\n with that name. We will look for this attribute name in the following places\n\n - `model`\n - `model.hparams`\n - `model.datamodule`\n - `trainer.datamodule` (the datamodule passed to the tune method)\n\n **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader\n or datamodule.\n \"\"\"\n if not lightning_hasattr(model, batch_arg_name):\n raise MisconfigurationException(\n f'Field {batch_arg_name} not found in both `model` and `model.hparams`')\n if hasattr(model, batch_arg_name) and hasattr(model, \"hparams\") and batch_arg_name in model.hparams:\n rank_zero_warn(\n f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!'\n f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.'\n f' If this is not the intended behavior, please remove either one.'\n )\n\n if hasattr(model.train_dataloader, 'patch_loader_code'):\n raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders'\n ' passed directly to `.fit()`. Please disable the feature or'\n ' incorporate the dataloader into the model.')\n\n # Arguments we adjust during the batch size finder, save for restoring\n __scale_batch_dump_params(trainer)\n\n # Set to values that are required by the algorithm\n __scale_batch_reset_params(trainer, model, steps_per_trial)\n\n # Save initial model, that is loaded after batch size is found\n save_path = os.path.join(trainer.default_root_dir, 'temp_model.ckpt')\n trainer.save_checkpoint(str(save_path))\n\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.disable()\n\n # Initially we just double in size until an OOM is encountered\n new_size = _adjust_batch_size(trainer, value=init_val) # initially set to init_val\n if mode == 'power':\n new_size = _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n elif mode == 'binsearch':\n new_size = _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs)\n else:\n raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch')\n\n garbage_collection_cuda()\n log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}')\n\n # Restore initial state of model\n trainer.checkpoint_connector.restore(str(save_path), on_gpu=trainer.on_gpu)\n os.remove(save_path)\n\n # Finish by resetting variables so trainer is ready to fit model\n __scale_batch_restore_params(trainer)\n if trainer.progress_bar_callback:\n trainer.progress_bar_callback.enable()\n\n return new_size\n\n\ndef __scale_batch_dump_params(trainer):\n # Prevent going into infinite loop\n trainer.__dumped_params = {\n 'auto_lr_find': trainer.auto_lr_find,\n 'current_epoch': trainer.current_epoch,\n 'max_steps': trainer.max_steps,\n 'weights_summary': trainer.weights_summary,\n 'logger': trainer.logger,\n 'callbacks': trainer.callbacks,\n 'checkpoint_callback': trainer.checkpoint_callback,\n 'early_stop_callback': trainer.early_stop_callback,\n 'auto_scale_batch_size': trainer.auto_scale_batch_size,\n 'limit_train_batches': trainer.limit_train_batches,\n 'model': trainer.model,\n }\n\n\ndef __scale_batch_reset_params(trainer, model, steps_per_trial):\n trainer.auto_scale_batch_size = None # prevent recursion\n trainer.auto_lr_find = False # avoid lr find being called multiple times\n trainer.current_epoch = 0\n trainer.max_steps = steps_per_trial # take few steps\n trainer.weights_summary = None # not needed before full run\n trainer.logger = DummyLogger()\n trainer.callbacks = [] # not needed before full run\n trainer.checkpoint_callback = False # required for saving\n trainer.early_stop_callback = None\n trainer.limit_train_batches = 1.0\n trainer.optimizers, trainer.schedulers = [], [] # required for saving\n trainer.model = model # required for saving\n\n\ndef __scale_batch_restore_params(trainer):\n trainer.auto_lr_find = trainer.__dumped_params['auto_lr_find']\n trainer.current_epoch = trainer.__dumped_params['current_epoch']\n trainer.max_steps = trainer.__dumped_params['max_steps']\n trainer.weights_summary = trainer.__dumped_params['weights_summary']\n trainer.logger = trainer.__dumped_params['logger']\n trainer.callbacks = trainer.__dumped_params['callbacks']\n trainer.checkpoint_callback = trainer.__dumped_params['checkpoint_callback']\n trainer.auto_scale_batch_size = trainer.__dumped_params['auto_scale_batch_size']\n trainer.early_stop_callback = trainer.__dumped_params['early_stop_callback']\n trainer.limit_train_batches = trainer.__dumped_params['limit_train_batches']\n trainer.model = trainer.__dumped_params['model']\n del trainer.__dumped_params\n\n\ndef _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is doubled at each iteration until an\n OOM error is encountered. \"\"\"\n for _ in range(max_trials):\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n # Double in size\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n new_size, _ = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed')\n break\n else:\n raise # some other error not memory related\n\n if not changed:\n break\n return new_size\n\n\ndef _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials, **fit_kwargs):\n \"\"\" Batch scaling mode where the size is initially is doubled at each iteration\n until an OOM error is encountered. Hereafter, the batch size is further\n refined using a binary search \"\"\"\n high = None\n count = 0\n while True:\n garbage_collection_cuda()\n trainer.global_step = 0 # reset after each try\n try:\n # Try fit\n trainer.fit(model, **fit_kwargs)\n count += 1\n if count > max_trials:\n break\n # Double in size\n low = new_size\n if high:\n if high - low <= 1:\n break\n midval = (high + low) // 2\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded')\n else:\n new_size, changed = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')\n\n if not changed:\n break\n\n except RuntimeError as exception:\n # Only these errors should trigger an adjustment\n if is_oom_error(exception):\n # If we fail in power mode, half the size and return\n garbage_collection_cuda()\n high = new_size\n midval = (high + low) // 2\n new_size, _ = _adjust_batch_size(trainer, value=midval, desc='failed')\n if high - low <= 1:\n break\n else:\n raise # some other error not memory related\n\n return new_size\n\n\ndef _adjust_batch_size(trainer,\n batch_arg_name: str = 'batch_size',\n factor: float = 1.0,\n value: Optional[int] = None,\n desc: str = None) -> Tuple[int, bool]:\n \"\"\" Helper function for adjusting the batch size.\n\n Args:\n trainer: instance of pytorch_lightning.Trainer\n\n batch_arg_name: name of the field where batch_size is stored.\n\n factor: value which the old batch size is multiplied by to get the\n new batch size\n\n value: if a value is given, will override the batch size with this value.\n Note that the value of `factor` will not have an effect in this case\n\n desc: either `succeeded` or `failed`. Used purely for logging\n\n Returns:\n The new batch size for the next trial and a bool that signals whether the\n new value is different than the previous batch size.\n \"\"\"\n model = trainer.get_model()\n batch_size = lightning_getattr(model, batch_arg_name)\n new_size = value if value is not None else int(batch_size * factor)\n if desc:\n log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')\n\n if not _is_valid_batch_size(new_size, trainer.train_dataloader):\n new_size = min(new_size, len(trainer.train_dataloader.dataset))\n\n changed = new_size != batch_size\n lightning_setattr(model, batch_arg_name, new_size)\n return new_size, changed\n\n\ndef _is_valid_batch_size(current_size, dataloader):\n return not has_len(dataloader) or current_size <= len(dataloader)\n", "path": "pytorch_lightning/tuner/batch_size_scaling.py"}]}
3,753
330
gh_patches_debug_40037
rasdani/github-patches
git_diff
LMFDB__lmfdb-1760
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- many api json downloads fail with internal server errors The following pages all give internal server errors; the problem appears to occur on collections whose documents contain attributes that are object ids (other than _id, which is handled separately), which causes errors like: TypeError: ObjectId('4f7052dc5009fb18ef000811') is not JSON serializable The following pages all have this problem: http://www.lmfdb.org/api/MaassWaveForms/Coefficients?_format=json http://www.lmfdb.org/api/MaassWaveForms/Coefficients.chunks?_format=json http://www.lmfdb.org/api/MaassWaveForms/Coefficients.files?_format=json http://www.lmfdb.org/api/MaassWaveForms/FS?_format=json http://www.lmfdb.org/api/MaassWaveForms/Table.chunks?_format=json http://www.lmfdb.org/api/MaassWaveForms/maassform_plots?_format=json http://www.lmfdb.org/api/SL2Zsubgroups/groups?_format=json http://www.lmfdb.org/api/modularforms2/webchar.chunks?_format=json http://www.lmfdb.org/api/modularforms2/webeigenvalues.chunks?_format=json http://www.lmfdb.org/api/modularforms2/webmodformspace.chunks?_format=json http://www.lmfdb.org/api/modularforms2/webnewforms.chunks?_format=json http://www.lmfdb.org/api/siegel_modular_forms/samples?_format=json --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lmfdb/api/api.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import pymongo 4 import urllib2 5 ASC = pymongo.ASCENDING 6 DESC = pymongo.DESCENDING 7 import flask 8 import yaml 9 import lmfdb.base as base 10 from lmfdb.utils import flash_error 11 from datetime import datetime 12 from flask import render_template, request, url_for 13 from lmfdb.api import api_page, api_logger 14 from bson.objectid import ObjectId 15 16 # caches the database information 17 _databases = None 18 19 def pluck(n, list): 20 return [_[n] for _ in list] 21 22 def quote_string(value): 23 if isinstance(value,unicode) or isinstance(value,str): 24 return repr(value) 25 elif isinstance(value,ObjectId): 26 return "\"ObjectId('%s')\""%value 27 return value 28 29 def pretty_document(rec,sep=", ",id=True): 30 # sort keys and remove _id for html display 31 attrs = sorted([(key,quote_string(rec[key])) for key in rec.keys() if (id or key != '_id')]) 32 return "{"+sep.join(["'%s': %s"%attr for attr in attrs])+"}" 33 34 35 def censor(entries): 36 """ 37 hide some of the databases and collection from the public 38 """ 39 dontstart = ["system.", "test", "upload", "admin", "contrib"] 40 censor = ["local", "userdb"] 41 for entry in entries: 42 if any(entry == x for x in censor) or \ 43 any(entry.startswith(x) for x in dontstart): 44 continue 45 yield entry 46 47 def init_database_info(): 48 global _databases 49 if _databases is None: 50 C = base.getDBConnection() 51 _databases = {} 52 for db in censor(C.database_names()): 53 colls = list(censor(C[db].collection_names())) 54 _databases[db] = sorted([(c, C[db][c].count()) for c in colls]) 55 56 @api_page.route("/") 57 def index(): 58 init_database_info() 59 databases = _databases 60 title = "API" 61 return render_template("api.html", **locals()) 62 63 @api_page.route("/stats") 64 def stats(): 65 def mb(x): 66 return int(round(x/1000000.0)) 67 info={} 68 info['minsizes'] = ['0','1','10','100','1000','10000','100000'] 69 info['minsize'] = request.args.get('minsize','1').strip() 70 if not info['minsize'] in info['minsizes']: 71 info['minsizes'] = '1' 72 info['groupby'] = 'db' if request.args.get('groupby','').strip().lower() == 'db' else '' 73 info['sortby'] = request.args.get('sortby','size').strip().lower() 74 if not info['sortby'] in ['size', 'objects']: 75 info['sortby'] = 'size' 76 init_database_info() 77 C = base.getDBConnection() 78 dbstats = {db:C[db].command("dbstats") for db in _databases} 79 info['dbs'] = len(dbstats.keys()) 80 collections = objects = 0 81 size = dataSize = indexSize = 0 82 stats = {} 83 for db in dbstats: 84 dbsize = dbstats[db]['dataSize']+dbstats[db]['indexSize'] 85 size += dbsize 86 dataSize += dbstats[db]['dataSize'] 87 indexSize += dbstats[db]['indexSize'] 88 dbsize = mb(dbsize) 89 dbobjects = dbstats[db]['objects'] 90 for c in pluck(0,_databases[db]): 91 if C[db][c].count(): 92 collections += 1 93 coll = '<a href = "' + url_for (".api_query", db=db, collection = c) + '">'+c+'</a>' 94 cstats = C[db].command("collstats",c) 95 objects += cstats['count'] 96 csize = mb(cstats['size']+cstats['totalIndexSize']) 97 if csize >= int(info['minsize']): 98 stats[cstats['ns']] = {'db':db, 'coll':coll, 'dbSize': dbsize, 'size':csize, 'dbObjects':dbobjects, 99 'dataSize':mb(cstats['size']), 'indexSize':mb(cstats['totalIndexSize']), 'avgObjSize':int(round(cstats['avgObjSize'])), 'objects':cstats['count'], 'indexes':cstats['nindexes']} 100 info['collections'] = collections 101 info['objects'] = objects 102 info['size'] = mb(size) 103 info['dataSize'] = mb(dataSize) 104 info['indexSize'] = mb(indexSize) 105 if info['sortby'] == 'objects' and info['groupby'] == 'db': 106 sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbObjects'],stats[x]['db'],-stats[x]['objects'],stats[x]['coll'])) 107 elif info['sortby'] == 'objects': 108 sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['objects'],stats[x]['db'],stats[x]['coll'])) 109 elif info['sortby'] == 'size' and info['groupby'] == 'db': 110 sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbSize'],stats[x]['db'],-stats[x]['size'],stats[x]['coll'])) 111 else: 112 sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['size'],stats[x]['db'],stats[x]['coll'])) 113 info['stats'] = [stats[key] for key in sortedkeys] 114 return render_template('api-stats.html', info=info) 115 116 @api_page.route("/<db>/<collection>/<id>") 117 def api_query_id(db, collection, id): 118 return api_query(db, collection, id = id) 119 120 121 @api_page.route("/<db>/<collection>") 122 def api_query(db, collection, id = None): 123 init_database_info() 124 125 # check what is queried for 126 if db not in _databases or collection not in pluck(0, _databases[db]): 127 return flask.abort(404) 128 129 # parsing the meta parameters _format and _offset 130 format = request.args.get("_format", "html") 131 offset = int(request.args.get("_offset", 0)) 132 DELIM = request.args.get("_delim", ",") 133 fields = request.args.get("_fields", None) 134 sortby = request.args.get("_sort", None) 135 136 if fields: 137 fields = fields.split(DELIM) 138 139 if sortby: 140 sortby = sortby.split(DELIM) 141 142 if offset > 10000: 143 if format != "html": 144 flask.abort(404) 145 else: 146 flash_error("offset %s too large, please refine your query.", offset) 147 return flask.redirect(url_for(".api_query", db=db, collection=collection)) 148 149 # sort = [('fieldname1', ASC/DESC), ...] 150 if sortby is not None: 151 sort = [] 152 for key in sortby: 153 if key.startswith("-"): 154 sort.append((key[1:], DESC)) 155 else: 156 sort.append((key, ASC)) 157 else: 158 sort = None 159 160 # preparing the actual database query q 161 C = base.getDBConnection() 162 q = {} 163 164 if id is not None: 165 if id.startswith('ObjectId('): 166 q["_id"] = ObjectId(id[10:-2]) 167 else: 168 q["_id"] = id 169 single_object = True 170 else: 171 single_object = False 172 173 for qkey, qval in request.args.iteritems(): 174 from ast import literal_eval 175 try: 176 if qkey.startswith("_"): 177 continue 178 if qval.startswith("s"): 179 qval = qval[1:] 180 if qval.startswith("i"): 181 qval = int(qval[1:]) 182 elif qval.startswith("f"): 183 qval = float(qval[1:]) 184 elif qval.startswith("ls"): # indicator, that it might be a list of strings 185 qval = qval[2:].split(DELIM) 186 elif qval.startswith("li"): 187 qval = [int(_) for _ in qval[2:].split(DELIM)] 188 elif qval.startswith("lf"): 189 qval = [float(_) for _ in qval[2:].split(DELIM)] 190 elif qval.startswith("py"): # literal evaluation 191 qval = literal_eval(qval[2:]) 192 elif qval.startswith("cs"): # containing string in list 193 qval = { "$in" : [qval[2:]] } 194 elif qval.startswith("ci"): 195 qval = { "$in" : [int(qval[2:])] } 196 elif qval.startswith("cf"): 197 qval = { "$in" : [float(qval[2:])] } 198 elif qval.startswith("cpy"): 199 qval = { "$in" : [literal_eval(qval[3:])] } 200 except: 201 # no suitable conversion for the value, keep it as string 202 pass 203 204 # update the query 205 q[qkey] = qval 206 207 # executing the query "q" and replacing the _id in the result list 208 api_logger.info("API query: q = '%s', fields = '%s', sort = '%s', offset = %s" % (q, fields, sort, offset)) 209 data = list(C[db][collection].find(q, projection = fields, sort=sort).skip(offset).limit(100)) 210 211 if single_object and not data: 212 if format != 'html': 213 flask.abort(404) 214 else: 215 flash_error("no document with id %s found in collection %s.%s.", id, db, collection) 216 return flask.redirect(url_for(".api_query", db=db, collection=collection)) 217 218 for document in data: 219 oid = document["_id"] 220 if type(oid) == ObjectId: 221 document["_id"] = "ObjectId('%s')" % oid 222 elif isinstance(oid, basestring): 223 document["_id"] = str(oid) 224 225 # preparing the datastructure 226 start = offset 227 next_req = dict(request.args) 228 next_req["_offset"] = offset 229 url_args = next_req.copy() 230 query = url_for(".api_query", db=db, collection=collection, **next_req) 231 offset += len(data) 232 next_req["_offset"] = offset 233 next = url_for(".api_query", db=db, collection=collection, **next_req) 234 235 # the collected result 236 data = { 237 "database": db, 238 "collection": collection, 239 "timestamp": datetime.utcnow().isoformat(), 240 "data": data, 241 "start": start, 242 "offset": offset, 243 "query": query, 244 "next": next 245 } 246 247 # display of the result (default html) 248 if format.lower() == "json": 249 return flask.jsonify(**data) 250 elif format.lower() == "yaml": 251 y = yaml.dump(data, 252 default_flow_style=False, 253 canonical=False, 254 allow_unicode=True) 255 return flask.Response(y, mimetype='text/plain') 256 else: 257 # sort displayed records by key (as json and yaml do) 258 data["pretty"] = pretty_document 259 location = "%s/%s" % (db, collection) 260 title = "API - " + location 261 bc = [("API", url_for(".index")), (location, query)] 262 query_unquote = urllib2.unquote(data["query"]) 263 return render_template("collection.html", 264 title=title, 265 single_object=single_object, 266 query_unquote = query_unquote, 267 url_args = url_args, 268 bread=bc, 269 **data) 270 271 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lmfdb/api/api.py b/lmfdb/api/api.py --- a/lmfdb/api/api.py +++ b/lmfdb/api/api.py @@ -4,13 +4,15 @@ import urllib2 ASC = pymongo.ASCENDING DESC = pymongo.DESCENDING -import flask import yaml +import json +import flask import lmfdb.base as base from lmfdb.utils import flash_error from datetime import datetime -from flask import render_template, request, url_for +from flask import render_template, request, url_for, app, current_app from lmfdb.api import api_page, api_logger +from bson import json_util from bson.objectid import ObjectId # caches the database information @@ -26,6 +28,14 @@ return "\"ObjectId('%s')\""%value return value +def oids_to_strings(doc): + """ recursively replace all ObjectId values in dictionary doc with strings encoding the ObjectId values""" + for k,v in doc.items(): + if isinstance(v,ObjectId): + doc[k] = "ObjectId('%s')" % v + elif isinstance(v,dict): + oids_to_strings(doc[k]) + def pretty_document(rec,sep=", ",id=True): # sort keys and remove _id for html display attrs = sorted([(key,quote_string(rec[key])) for key in rec.keys() if (id or key != '_id')]) @@ -215,12 +225,9 @@ flash_error("no document with id %s found in collection %s.%s.", id, db, collection) return flask.redirect(url_for(".api_query", db=db, collection=collection)) + # fixup object ids for display and json/yaml encoding for document in data: - oid = document["_id"] - if type(oid) == ObjectId: - document["_id"] = "ObjectId('%s')" % oid - elif isinstance(oid, basestring): - document["_id"] = str(oid) + oids_to_strings(document) # preparing the datastructure start = offset @@ -244,9 +251,9 @@ "next": next } - # display of the result (default html) if format.lower() == "json": - return flask.jsonify(**data) + #return flask.jsonify(**data) # can't handle binary data + return current_app.response_class(json.dumps(data, encoding='ISO-8859-1', indent=2, default=json_util.default), mimetype='application/json') elif format.lower() == "yaml": y = yaml.dump(data, default_flow_style=False, @@ -254,7 +261,7 @@ allow_unicode=True) return flask.Response(y, mimetype='text/plain') else: - # sort displayed records by key (as json and yaml do) + # sort displayed records by key (as jsonify and yaml_dump do) data["pretty"] = pretty_document location = "%s/%s" % (db, collection) title = "API - " + location
{"golden_diff": "diff --git a/lmfdb/api/api.py b/lmfdb/api/api.py\n--- a/lmfdb/api/api.py\n+++ b/lmfdb/api/api.py\n@@ -4,13 +4,15 @@\n import urllib2\n ASC = pymongo.ASCENDING\n DESC = pymongo.DESCENDING\n-import flask\n import yaml\n+import json\n+import flask\n import lmfdb.base as base\n from lmfdb.utils import flash_error\n from datetime import datetime\n-from flask import render_template, request, url_for\n+from flask import render_template, request, url_for, app, current_app\n from lmfdb.api import api_page, api_logger\n+from bson import json_util\n from bson.objectid import ObjectId\n \n # caches the database information\n@@ -26,6 +28,14 @@\n return \"\\\"ObjectId('%s')\\\"\"%value\n return value\n \n+def oids_to_strings(doc):\n+ \"\"\" recursively replace all ObjectId values in dictionary doc with strings encoding the ObjectId values\"\"\"\n+ for k,v in doc.items():\n+ if isinstance(v,ObjectId):\n+ doc[k] = \"ObjectId('%s')\" % v\n+ elif isinstance(v,dict):\n+ oids_to_strings(doc[k])\n+\n def pretty_document(rec,sep=\", \",id=True):\n # sort keys and remove _id for html display\n attrs = sorted([(key,quote_string(rec[key])) for key in rec.keys() if (id or key != '_id')])\n@@ -215,12 +225,9 @@\n flash_error(\"no document with id %s found in collection %s.%s.\", id, db, collection)\n return flask.redirect(url_for(\".api_query\", db=db, collection=collection))\n \n+ # fixup object ids for display and json/yaml encoding\n for document in data:\n- oid = document[\"_id\"]\n- if type(oid) == ObjectId:\n- document[\"_id\"] = \"ObjectId('%s')\" % oid\n- elif isinstance(oid, basestring):\n- document[\"_id\"] = str(oid)\n+ oids_to_strings(document)\n \n # preparing the datastructure\n start = offset\n@@ -244,9 +251,9 @@\n \"next\": next\n }\n \n- # display of the result (default html)\n if format.lower() == \"json\":\n- return flask.jsonify(**data)\n+ #return flask.jsonify(**data) # can't handle binary data\n+ return current_app.response_class(json.dumps(data, encoding='ISO-8859-1', indent=2, default=json_util.default), mimetype='application/json')\n elif format.lower() == \"yaml\":\n y = yaml.dump(data,\n default_flow_style=False,\n@@ -254,7 +261,7 @@\n allow_unicode=True)\n return flask.Response(y, mimetype='text/plain')\n else:\n- # sort displayed records by key (as json and yaml do)\n+ # sort displayed records by key (as jsonify and yaml_dump do)\n data[\"pretty\"] = pretty_document\n location = \"%s/%s\" % (db, collection)\n title = \"API - \" + location\n", "issue": "many api json downloads fail with internal server errors\nThe following pages all give internal server errors; the problem appears to occur on collections whose documents contain attributes that are object ids (other than _id, which is handled separately), which causes errors like:\n\nTypeError: ObjectId('4f7052dc5009fb18ef000811') is not JSON serializable\n\nThe following pages all have this problem:\n\nhttp://www.lmfdb.org/api/MaassWaveForms/Coefficients?_format=json\nhttp://www.lmfdb.org/api/MaassWaveForms/Coefficients.chunks?_format=json\nhttp://www.lmfdb.org/api/MaassWaveForms/Coefficients.files?_format=json\nhttp://www.lmfdb.org/api/MaassWaveForms/FS?_format=json\nhttp://www.lmfdb.org/api/MaassWaveForms/Table.chunks?_format=json\nhttp://www.lmfdb.org/api/MaassWaveForms/maassform_plots?_format=json\nhttp://www.lmfdb.org/api/SL2Zsubgroups/groups?_format=json\nhttp://www.lmfdb.org/api/modularforms2/webchar.chunks?_format=json\nhttp://www.lmfdb.org/api/modularforms2/webeigenvalues.chunks?_format=json\nhttp://www.lmfdb.org/api/modularforms2/webmodformspace.chunks?_format=json\nhttp://www.lmfdb.org/api/modularforms2/webnewforms.chunks?_format=json\nhttp://www.lmfdb.org/api/siegel_modular_forms/samples?_format=json\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport pymongo\nimport urllib2\nASC = pymongo.ASCENDING\nDESC = pymongo.DESCENDING\nimport flask\nimport yaml\nimport lmfdb.base as base\nfrom lmfdb.utils import flash_error\nfrom datetime import datetime\nfrom flask import render_template, request, url_for\nfrom lmfdb.api import api_page, api_logger\nfrom bson.objectid import ObjectId\n\n# caches the database information\n_databases = None\n\ndef pluck(n, list):\n return [_[n] for _ in list]\n\ndef quote_string(value):\n if isinstance(value,unicode) or isinstance(value,str):\n return repr(value)\n elif isinstance(value,ObjectId):\n return \"\\\"ObjectId('%s')\\\"\"%value\n return value\n\ndef pretty_document(rec,sep=\", \",id=True):\n # sort keys and remove _id for html display\n attrs = sorted([(key,quote_string(rec[key])) for key in rec.keys() if (id or key != '_id')])\n return \"{\"+sep.join([\"'%s': %s\"%attr for attr in attrs])+\"}\"\n\n\ndef censor(entries):\n \"\"\"\n hide some of the databases and collection from the public\n \"\"\"\n dontstart = [\"system.\", \"test\", \"upload\", \"admin\", \"contrib\"]\n censor = [\"local\", \"userdb\"]\n for entry in entries:\n if any(entry == x for x in censor) or \\\n any(entry.startswith(x) for x in dontstart):\n continue\n yield entry\n\ndef init_database_info():\n global _databases\n if _databases is None:\n C = base.getDBConnection()\n _databases = {}\n for db in censor(C.database_names()):\n colls = list(censor(C[db].collection_names()))\n _databases[db] = sorted([(c, C[db][c].count()) for c in colls])\n\n@api_page.route(\"/\")\ndef index():\n init_database_info()\n databases = _databases\n title = \"API\"\n return render_template(\"api.html\", **locals())\n\n@api_page.route(\"/stats\")\ndef stats():\n def mb(x):\n return int(round(x/1000000.0))\n info={}\n info['minsizes'] = ['0','1','10','100','1000','10000','100000']\n info['minsize'] = request.args.get('minsize','1').strip()\n if not info['minsize'] in info['minsizes']:\n info['minsizes'] = '1'\n info['groupby'] = 'db' if request.args.get('groupby','').strip().lower() == 'db' else ''\n info['sortby'] = request.args.get('sortby','size').strip().lower()\n if not info['sortby'] in ['size', 'objects']:\n info['sortby'] = 'size'\n init_database_info()\n C = base.getDBConnection()\n dbstats = {db:C[db].command(\"dbstats\") for db in _databases}\n info['dbs'] = len(dbstats.keys())\n collections = objects = 0\n size = dataSize = indexSize = 0\n stats = {}\n for db in dbstats:\n dbsize = dbstats[db]['dataSize']+dbstats[db]['indexSize']\n size += dbsize\n dataSize += dbstats[db]['dataSize']\n indexSize += dbstats[db]['indexSize']\n dbsize = mb(dbsize)\n dbobjects = dbstats[db]['objects']\n for c in pluck(0,_databases[db]):\n if C[db][c].count():\n collections += 1\n coll = '<a href = \"' + url_for (\".api_query\", db=db, collection = c) + '\">'+c+'</a>'\n cstats = C[db].command(\"collstats\",c)\n objects += cstats['count']\n csize = mb(cstats['size']+cstats['totalIndexSize'])\n if csize >= int(info['minsize']):\n stats[cstats['ns']] = {'db':db, 'coll':coll, 'dbSize': dbsize, 'size':csize, 'dbObjects':dbobjects,\n 'dataSize':mb(cstats['size']), 'indexSize':mb(cstats['totalIndexSize']), 'avgObjSize':int(round(cstats['avgObjSize'])), 'objects':cstats['count'], 'indexes':cstats['nindexes']}\n info['collections'] = collections\n info['objects'] = objects\n info['size'] = mb(size)\n info['dataSize'] = mb(dataSize)\n info['indexSize'] = mb(indexSize)\n if info['sortby'] == 'objects' and info['groupby'] == 'db':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbObjects'],stats[x]['db'],-stats[x]['objects'],stats[x]['coll']))\n elif info['sortby'] == 'objects':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['objects'],stats[x]['db'],stats[x]['coll']))\n elif info['sortby'] == 'size' and info['groupby'] == 'db':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbSize'],stats[x]['db'],-stats[x]['size'],stats[x]['coll']))\n else:\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['size'],stats[x]['db'],stats[x]['coll']))\n info['stats'] = [stats[key] for key in sortedkeys]\n return render_template('api-stats.html', info=info)\n\n@api_page.route(\"/<db>/<collection>/<id>\")\ndef api_query_id(db, collection, id):\n return api_query(db, collection, id = id)\n\n\n@api_page.route(\"/<db>/<collection>\")\ndef api_query(db, collection, id = None):\n init_database_info()\n\n # check what is queried for\n if db not in _databases or collection not in pluck(0, _databases[db]):\n return flask.abort(404)\n\n # parsing the meta parameters _format and _offset\n format = request.args.get(\"_format\", \"html\")\n offset = int(request.args.get(\"_offset\", 0))\n DELIM = request.args.get(\"_delim\", \",\")\n fields = request.args.get(\"_fields\", None)\n sortby = request.args.get(\"_sort\", None)\n\n if fields:\n fields = fields.split(DELIM)\n\n if sortby:\n sortby = sortby.split(DELIM)\n\n if offset > 10000:\n if format != \"html\":\n flask.abort(404)\n else:\n flash_error(\"offset %s too large, please refine your query.\", offset)\n return flask.redirect(url_for(\".api_query\", db=db, collection=collection))\n\n # sort = [('fieldname1', ASC/DESC), ...]\n if sortby is not None:\n sort = []\n for key in sortby:\n if key.startswith(\"-\"):\n sort.append((key[1:], DESC))\n else:\n sort.append((key, ASC))\n else:\n sort = None\n\n # preparing the actual database query q\n C = base.getDBConnection()\n q = {}\n\n if id is not None:\n if id.startswith('ObjectId('):\n q[\"_id\"] = ObjectId(id[10:-2])\n else:\n q[\"_id\"] = id\n single_object = True\n else:\n single_object = False\n\n for qkey, qval in request.args.iteritems():\n from ast import literal_eval\n try:\n if qkey.startswith(\"_\"):\n continue\n if qval.startswith(\"s\"):\n qval = qval[1:]\n if qval.startswith(\"i\"):\n qval = int(qval[1:])\n elif qval.startswith(\"f\"):\n qval = float(qval[1:])\n elif qval.startswith(\"ls\"): # indicator, that it might be a list of strings\n qval = qval[2:].split(DELIM)\n elif qval.startswith(\"li\"):\n qval = [int(_) for _ in qval[2:].split(DELIM)]\n elif qval.startswith(\"lf\"):\n qval = [float(_) for _ in qval[2:].split(DELIM)]\n elif qval.startswith(\"py\"): # literal evaluation\n qval = literal_eval(qval[2:])\n elif qval.startswith(\"cs\"): # containing string in list\n qval = { \"$in\" : [qval[2:]] }\n elif qval.startswith(\"ci\"):\n qval = { \"$in\" : [int(qval[2:])] }\n elif qval.startswith(\"cf\"):\n qval = { \"$in\" : [float(qval[2:])] }\n elif qval.startswith(\"cpy\"):\n qval = { \"$in\" : [literal_eval(qval[3:])] }\n except:\n # no suitable conversion for the value, keep it as string\n pass\n\n # update the query\n q[qkey] = qval\n\n # executing the query \"q\" and replacing the _id in the result list\n api_logger.info(\"API query: q = '%s', fields = '%s', sort = '%s', offset = %s\" % (q, fields, sort, offset))\n data = list(C[db][collection].find(q, projection = fields, sort=sort).skip(offset).limit(100))\n \n if single_object and not data:\n if format != 'html':\n flask.abort(404)\n else:\n flash_error(\"no document with id %s found in collection %s.%s.\", id, db, collection)\n return flask.redirect(url_for(\".api_query\", db=db, collection=collection))\n \n for document in data:\n oid = document[\"_id\"]\n if type(oid) == ObjectId:\n document[\"_id\"] = \"ObjectId('%s')\" % oid\n elif isinstance(oid, basestring):\n document[\"_id\"] = str(oid)\n\n # preparing the datastructure\n start = offset\n next_req = dict(request.args)\n next_req[\"_offset\"] = offset\n url_args = next_req.copy()\n query = url_for(\".api_query\", db=db, collection=collection, **next_req)\n offset += len(data)\n next_req[\"_offset\"] = offset\n next = url_for(\".api_query\", db=db, collection=collection, **next_req)\n\n # the collected result\n data = {\n \"database\": db,\n \"collection\": collection,\n \"timestamp\": datetime.utcnow().isoformat(),\n \"data\": data,\n \"start\": start,\n \"offset\": offset,\n \"query\": query,\n \"next\": next\n }\n\n # display of the result (default html)\n if format.lower() == \"json\":\n return flask.jsonify(**data)\n elif format.lower() == \"yaml\":\n y = yaml.dump(data,\n default_flow_style=False,\n canonical=False,\n allow_unicode=True)\n return flask.Response(y, mimetype='text/plain')\n else:\n # sort displayed records by key (as json and yaml do)\n data[\"pretty\"] = pretty_document\n location = \"%s/%s\" % (db, collection)\n title = \"API - \" + location\n bc = [(\"API\", url_for(\".index\")), (location, query)]\n query_unquote = urllib2.unquote(data[\"query\"])\n return render_template(\"collection.html\",\n title=title,\n single_object=single_object,\n query_unquote = query_unquote,\n url_args = url_args,\n bread=bc,\n **data)\n\n", "path": "lmfdb/api/api.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport pymongo\nimport urllib2\nASC = pymongo.ASCENDING\nDESC = pymongo.DESCENDING\nimport yaml\nimport json\nimport flask\nimport lmfdb.base as base\nfrom lmfdb.utils import flash_error\nfrom datetime import datetime\nfrom flask import render_template, request, url_for, app, current_app\nfrom lmfdb.api import api_page, api_logger\nfrom bson import json_util\nfrom bson.objectid import ObjectId\n\n# caches the database information\n_databases = None\n\ndef pluck(n, list):\n return [_[n] for _ in list]\n\ndef quote_string(value):\n if isinstance(value,unicode) or isinstance(value,str):\n return repr(value)\n elif isinstance(value,ObjectId):\n return \"\\\"ObjectId('%s')\\\"\"%value\n return value\n\ndef oids_to_strings(doc):\n \"\"\" recursively replace all ObjectId values in dictionary doc with strings encoding the ObjectId values\"\"\"\n for k,v in doc.items():\n if isinstance(v,ObjectId):\n doc[k] = \"ObjectId('%s')\" % v\n elif isinstance(v,dict):\n oids_to_strings(doc[k])\n\ndef pretty_document(rec,sep=\", \",id=True):\n # sort keys and remove _id for html display\n attrs = sorted([(key,quote_string(rec[key])) for key in rec.keys() if (id or key != '_id')])\n return \"{\"+sep.join([\"'%s': %s\"%attr for attr in attrs])+\"}\"\n\n\ndef censor(entries):\n \"\"\"\n hide some of the databases and collection from the public\n \"\"\"\n dontstart = [\"system.\", \"test\", \"upload\", \"admin\", \"contrib\"]\n censor = [\"local\", \"userdb\"]\n for entry in entries:\n if any(entry == x for x in censor) or \\\n any(entry.startswith(x) for x in dontstart):\n continue\n yield entry\n\ndef init_database_info():\n global _databases\n if _databases is None:\n C = base.getDBConnection()\n _databases = {}\n for db in censor(C.database_names()):\n colls = list(censor(C[db].collection_names()))\n _databases[db] = sorted([(c, C[db][c].count()) for c in colls])\n\n@api_page.route(\"/\")\ndef index():\n init_database_info()\n databases = _databases\n title = \"API\"\n return render_template(\"api.html\", **locals())\n\n@api_page.route(\"/stats\")\ndef stats():\n def mb(x):\n return int(round(x/1000000.0))\n info={}\n info['minsizes'] = ['0','1','10','100','1000','10000','100000']\n info['minsize'] = request.args.get('minsize','1').strip()\n if not info['minsize'] in info['minsizes']:\n info['minsizes'] = '1'\n info['groupby'] = 'db' if request.args.get('groupby','').strip().lower() == 'db' else ''\n info['sortby'] = request.args.get('sortby','size').strip().lower()\n if not info['sortby'] in ['size', 'objects']:\n info['sortby'] = 'size'\n init_database_info()\n C = base.getDBConnection()\n dbstats = {db:C[db].command(\"dbstats\") for db in _databases}\n info['dbs'] = len(dbstats.keys())\n collections = objects = 0\n size = dataSize = indexSize = 0\n stats = {}\n for db in dbstats:\n dbsize = dbstats[db]['dataSize']+dbstats[db]['indexSize']\n size += dbsize\n dataSize += dbstats[db]['dataSize']\n indexSize += dbstats[db]['indexSize']\n dbsize = mb(dbsize)\n dbobjects = dbstats[db]['objects']\n for c in pluck(0,_databases[db]):\n if C[db][c].count():\n collections += 1\n coll = '<a href = \"' + url_for (\".api_query\", db=db, collection = c) + '\">'+c+'</a>'\n cstats = C[db].command(\"collstats\",c)\n objects += cstats['count']\n csize = mb(cstats['size']+cstats['totalIndexSize'])\n if csize >= int(info['minsize']):\n stats[cstats['ns']] = {'db':db, 'coll':coll, 'dbSize': dbsize, 'size':csize, 'dbObjects':dbobjects,\n 'dataSize':mb(cstats['size']), 'indexSize':mb(cstats['totalIndexSize']), 'avgObjSize':int(round(cstats['avgObjSize'])), 'objects':cstats['count'], 'indexes':cstats['nindexes']}\n info['collections'] = collections\n info['objects'] = objects\n info['size'] = mb(size)\n info['dataSize'] = mb(dataSize)\n info['indexSize'] = mb(indexSize)\n if info['sortby'] == 'objects' and info['groupby'] == 'db':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbObjects'],stats[x]['db'],-stats[x]['objects'],stats[x]['coll']))\n elif info['sortby'] == 'objects':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['objects'],stats[x]['db'],stats[x]['coll']))\n elif info['sortby'] == 'size' and info['groupby'] == 'db':\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['dbSize'],stats[x]['db'],-stats[x]['size'],stats[x]['coll']))\n else:\n sortedkeys = sorted([db for db in stats],key=lambda x: (-stats[x]['size'],stats[x]['db'],stats[x]['coll']))\n info['stats'] = [stats[key] for key in sortedkeys]\n return render_template('api-stats.html', info=info)\n\n@api_page.route(\"/<db>/<collection>/<id>\")\ndef api_query_id(db, collection, id):\n return api_query(db, collection, id = id)\n\n\n@api_page.route(\"/<db>/<collection>\")\ndef api_query(db, collection, id = None):\n init_database_info()\n\n # check what is queried for\n if db not in _databases or collection not in pluck(0, _databases[db]):\n return flask.abort(404)\n\n # parsing the meta parameters _format and _offset\n format = request.args.get(\"_format\", \"html\")\n offset = int(request.args.get(\"_offset\", 0))\n DELIM = request.args.get(\"_delim\", \",\")\n fields = request.args.get(\"_fields\", None)\n sortby = request.args.get(\"_sort\", None)\n\n if fields:\n fields = fields.split(DELIM)\n\n if sortby:\n sortby = sortby.split(DELIM)\n\n if offset > 10000:\n if format != \"html\":\n flask.abort(404)\n else:\n flash_error(\"offset %s too large, please refine your query.\", offset)\n return flask.redirect(url_for(\".api_query\", db=db, collection=collection))\n\n # sort = [('fieldname1', ASC/DESC), ...]\n if sortby is not None:\n sort = []\n for key in sortby:\n if key.startswith(\"-\"):\n sort.append((key[1:], DESC))\n else:\n sort.append((key, ASC))\n else:\n sort = None\n\n # preparing the actual database query q\n C = base.getDBConnection()\n q = {}\n\n if id is not None:\n if id.startswith('ObjectId('):\n q[\"_id\"] = ObjectId(id[10:-2])\n else:\n q[\"_id\"] = id\n single_object = True\n else:\n single_object = False\n\n for qkey, qval in request.args.iteritems():\n from ast import literal_eval\n try:\n if qkey.startswith(\"_\"):\n continue\n if qval.startswith(\"s\"):\n qval = qval[1:]\n if qval.startswith(\"i\"):\n qval = int(qval[1:])\n elif qval.startswith(\"f\"):\n qval = float(qval[1:])\n elif qval.startswith(\"ls\"): # indicator, that it might be a list of strings\n qval = qval[2:].split(DELIM)\n elif qval.startswith(\"li\"):\n qval = [int(_) for _ in qval[2:].split(DELIM)]\n elif qval.startswith(\"lf\"):\n qval = [float(_) for _ in qval[2:].split(DELIM)]\n elif qval.startswith(\"py\"): # literal evaluation\n qval = literal_eval(qval[2:])\n elif qval.startswith(\"cs\"): # containing string in list\n qval = { \"$in\" : [qval[2:]] }\n elif qval.startswith(\"ci\"):\n qval = { \"$in\" : [int(qval[2:])] }\n elif qval.startswith(\"cf\"):\n qval = { \"$in\" : [float(qval[2:])] }\n elif qval.startswith(\"cpy\"):\n qval = { \"$in\" : [literal_eval(qval[3:])] }\n except:\n # no suitable conversion for the value, keep it as string\n pass\n\n # update the query\n q[qkey] = qval\n\n # executing the query \"q\" and replacing the _id in the result list\n api_logger.info(\"API query: q = '%s', fields = '%s', sort = '%s', offset = %s\" % (q, fields, sort, offset))\n data = list(C[db][collection].find(q, projection = fields, sort=sort).skip(offset).limit(100))\n \n if single_object and not data:\n if format != 'html':\n flask.abort(404)\n else:\n flash_error(\"no document with id %s found in collection %s.%s.\", id, db, collection)\n return flask.redirect(url_for(\".api_query\", db=db, collection=collection))\n \n # fixup object ids for display and json/yaml encoding\n for document in data:\n oids_to_strings(document)\n\n # preparing the datastructure\n start = offset\n next_req = dict(request.args)\n next_req[\"_offset\"] = offset\n url_args = next_req.copy()\n query = url_for(\".api_query\", db=db, collection=collection, **next_req)\n offset += len(data)\n next_req[\"_offset\"] = offset\n next = url_for(\".api_query\", db=db, collection=collection, **next_req)\n\n # the collected result\n data = {\n \"database\": db,\n \"collection\": collection,\n \"timestamp\": datetime.utcnow().isoformat(),\n \"data\": data,\n \"start\": start,\n \"offset\": offset,\n \"query\": query,\n \"next\": next\n }\n\n if format.lower() == \"json\":\n #return flask.jsonify(**data) # can't handle binary data\n return current_app.response_class(json.dumps(data, encoding='ISO-8859-1', indent=2, default=json_util.default), mimetype='application/json')\n elif format.lower() == \"yaml\":\n y = yaml.dump(data,\n default_flow_style=False,\n canonical=False,\n allow_unicode=True)\n return flask.Response(y, mimetype='text/plain')\n else:\n # sort displayed records by key (as jsonify and yaml_dump do)\n data[\"pretty\"] = pretty_document\n location = \"%s/%s\" % (db, collection)\n title = \"API - \" + location\n bc = [(\"API\", url_for(\".index\")), (location, query)]\n query_unquote = urllib2.unquote(data[\"query\"])\n return render_template(\"collection.html\",\n title=title,\n single_object=single_object,\n query_unquote = query_unquote,\n url_args = url_args,\n bread=bc,\n **data)\n\n", "path": "lmfdb/api/api.py"}]}
3,902
691
gh_patches_debug_794
rasdani/github-patches
git_diff
scikit-image__scikit-image-3650
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- tifffile: try to use the one in the user's install first Should we try importing tifffile before using the one we versionned it? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/io/_plugins/tifffile_plugin.py` Content: ``` 1 from ...external.tifffile import TiffFile, imsave, parse_kwargs 2 3 4 def imread(fname, dtype=None, **kwargs): 5 """Load a tiff image from file. 6 7 Parameters 8 ---------- 9 fname : str or file 10 File name or file-like-object. 11 dtype : numpy dtype object or string specifier 12 Specifies data type of array elements (Not currently used). 13 kwargs : keyword pairs, optional 14 Additional keyword arguments to pass through (see ``tifffile``'s 15 ``imread`` function). 16 17 Notes 18 ----- 19 Provided by Christophe Golhke's tifffile.py [1]_, and supports many 20 advanced image types including multi-page and floating point. 21 22 References 23 ---------- 24 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py 25 26 """ 27 28 if 'img_num' in kwargs: 29 kwargs['key'] = kwargs.pop('img_num') 30 31 # parse_kwargs will extract keyword arguments intended for the TiffFile 32 # class and remove them from the kwargs dictionary in-place 33 tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome'] 34 kwargs_tiff = parse_kwargs(kwargs, *tiff_keys) 35 36 # read and return tiff as numpy array 37 with TiffFile(fname, **kwargs_tiff) as tif: 38 return tif.asarray(**kwargs) 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py --- a/skimage/io/_plugins/tifffile_plugin.py +++ b/skimage/io/_plugins/tifffile_plugin.py @@ -1,4 +1,7 @@ -from ...external.tifffile import TiffFile, imsave, parse_kwargs +try: + from tifffile import TiffFile, imsave, parse_kwargs +except ImportError: + from ...external.tifffile import TiffFile, imsave, parse_kwargs def imread(fname, dtype=None, **kwargs):
{"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,4 +1,7 @@\n-from ...external.tifffile import TiffFile, imsave, parse_kwargs\n+try:\n+ from tifffile import TiffFile, imsave, parse_kwargs\n+except ImportError:\n+ from ...external.tifffile import TiffFile, imsave, parse_kwargs\n \n \n def imread(fname, dtype=None, **kwargs):\n", "issue": "tifffile: try to use the one in the user's install first\nShould we try importing tifffile before using the one we versionned it?\n", "before_files": [{"content": "from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}], "after_files": [{"content": "try:\n from tifffile import TiffFile, imsave, parse_kwargs\nexcept ImportError:\n from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]}
682
142
gh_patches_debug_6145
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1743
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Submission dates for fixture domain requests are blank ### Current Behavior For applications generated by fixtures, submission dates are not showing because they are not set to begin with ![image](https://github.com/cisagov/manage.get.gov/assets/144387994/46ca040a-3b72-4ff4-be4b-5094eb604bcd) ### Expected Behavior Submission dates are shown ### Steps to Reproduce 1. Logon as Domain Manager with an account associated with fixtures 2. View Domain request table 3. Date submitted column is blank (for any requests from fixtures) ### Environment Development sandbox ### Additional Context Per Alysia, submission date should be added to fixtures for the fake data ### Issue Links _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/registrar/fixtures_applications.py` Content: ``` 1 import logging 2 import random 3 from faker import Faker 4 5 from registrar.models import ( 6 User, 7 DomainApplication, 8 DraftDomain, 9 Contact, 10 Website, 11 ) 12 13 fake = Faker() 14 logger = logging.getLogger(__name__) 15 16 17 class DomainApplicationFixture: 18 """ 19 Load domain applications into the database. 20 21 Make sure this class' `load` method is called from `handle` 22 in management/commands/load.py, then use `./manage.py load` 23 to run this code. 24 """ 25 26 # any fields not specified here will be filled in with fake data or defaults 27 # NOTE BENE: each fixture must have `organization_name` for uniqueness! 28 # Here is a more complete example as a template: 29 # { 30 # "status": "started", 31 # "organization_name": "Example - Just started", 32 # "organization_type": "federal", 33 # "federal_agency": None, 34 # "federal_type": None, 35 # "address_line1": None, 36 # "address_line2": None, 37 # "city": None, 38 # "state_territory": None, 39 # "zipcode": None, 40 # "urbanization": None, 41 # "purpose": None, 42 # "anything_else": None, 43 # "is_policy_acknowledged": None, 44 # "authorizing_official": None, 45 # "submitter": None, 46 # "other_contacts": [], 47 # "current_websites": [], 48 # "alternative_domains": [], 49 # }, 50 DA = [ 51 { 52 "status": DomainApplication.ApplicationStatus.STARTED, 53 "organization_name": "Example - Finished but not submitted", 54 }, 55 { 56 "status": DomainApplication.ApplicationStatus.SUBMITTED, 57 "organization_name": "Example - Submitted but pending investigation", 58 }, 59 { 60 "status": DomainApplication.ApplicationStatus.IN_REVIEW, 61 "organization_name": "Example - In investigation", 62 }, 63 { 64 "status": DomainApplication.ApplicationStatus.IN_REVIEW, 65 "organization_name": "Example - Approved", 66 }, 67 { 68 "status": DomainApplication.ApplicationStatus.WITHDRAWN, 69 "organization_name": "Example - Withdrawn", 70 }, 71 { 72 "status": DomainApplication.ApplicationStatus.ACTION_NEEDED, 73 "organization_name": "Example - Action needed", 74 }, 75 { 76 "status": "rejected", 77 "organization_name": "Example - Rejected", 78 }, 79 ] 80 81 @classmethod 82 def fake_contact(cls): 83 return { 84 "first_name": fake.first_name(), 85 "middle_name": None, 86 "last_name": fake.last_name(), 87 "title": fake.job(), 88 "email": fake.ascii_safe_email(), 89 "phone": "201-555-5555", 90 } 91 92 @classmethod 93 def fake_dot_gov(cls): 94 return f"{fake.slug()}.gov" 95 96 @classmethod 97 def _set_non_foreign_key_fields(cls, da: DomainApplication, app: dict): 98 """Helper method used by `load`.""" 99 da.status = app["status"] if "status" in app else "started" 100 da.organization_type = app["organization_type"] if "organization_type" in app else "federal" 101 da.federal_agency = ( 102 app["federal_agency"] 103 if "federal_agency" in app 104 # Random choice of agency for selects, used as placeholders for testing. 105 else random.choice(DomainApplication.AGENCIES) # nosec 106 ) 107 108 da.federal_type = ( 109 app["federal_type"] 110 if "federal_type" in app 111 else random.choice(["executive", "judicial", "legislative"]) # nosec 112 ) 113 da.address_line1 = app["address_line1"] if "address_line1" in app else fake.street_address() 114 da.address_line2 = app["address_line2"] if "address_line2" in app else None 115 da.city = app["city"] if "city" in app else fake.city() 116 da.state_territory = app["state_territory"] if "state_territory" in app else fake.state_abbr() 117 da.zipcode = app["zipcode"] if "zipcode" in app else fake.postalcode() 118 da.urbanization = app["urbanization"] if "urbanization" in app else None 119 da.purpose = app["purpose"] if "purpose" in app else fake.paragraph() 120 da.anything_else = app["anything_else"] if "anything_else" in app else None 121 da.is_policy_acknowledged = app["is_policy_acknowledged"] if "is_policy_acknowledged" in app else True 122 123 @classmethod 124 def _set_foreign_key_fields(cls, da: DomainApplication, app: dict, user: User): 125 """Helper method used by `load`.""" 126 if not da.investigator: 127 da.investigator = User.objects.get(username=user.username) if "investigator" in app else None 128 129 if not da.authorizing_official: 130 if "authorizing_official" in app and app["authorizing_official"] is not None: 131 da.authorizing_official, _ = Contact.objects.get_or_create(**app["authorizing_official"]) 132 else: 133 da.authorizing_official = Contact.objects.create(**cls.fake_contact()) 134 135 if not da.submitter: 136 if "submitter" in app and app["submitter"] is not None: 137 da.submitter, _ = Contact.objects.get_or_create(**app["submitter"]) 138 else: 139 da.submitter = Contact.objects.create(**cls.fake_contact()) 140 141 if not da.requested_domain: 142 if "requested_domain" in app and app["requested_domain"] is not None: 143 da.requested_domain, _ = DraftDomain.objects.get_or_create(name=app["requested_domain"]) 144 else: 145 da.requested_domain = DraftDomain.objects.create(name=cls.fake_dot_gov()) 146 147 @classmethod 148 def _set_many_to_many_relations(cls, da: DomainApplication, app: dict): 149 """Helper method used by `load`.""" 150 if "other_contacts" in app: 151 for contact in app["other_contacts"]: 152 da.other_contacts.add(Contact.objects.get_or_create(**contact)[0]) 153 elif not da.other_contacts.exists(): 154 other_contacts = [ 155 Contact.objects.create(**cls.fake_contact()) for _ in range(random.randint(0, 3)) # nosec 156 ] 157 da.other_contacts.add(*other_contacts) 158 159 if "current_websites" in app: 160 for website in app["current_websites"]: 161 da.current_websites.add(Website.objects.get_or_create(website=website)[0]) 162 elif not da.current_websites.exists(): 163 current_websites = [ 164 Website.objects.create(website=fake.uri()) for _ in range(random.randint(0, 3)) # nosec 165 ] 166 da.current_websites.add(*current_websites) 167 168 if "alternative_domains" in app: 169 for domain in app["alternative_domains"]: 170 da.alternative_domains.add(Website.objects.get_or_create(website=domain)[0]) 171 elif not da.alternative_domains.exists(): 172 alternative_domains = [ 173 Website.objects.create(website=cls.fake_dot_gov()) for _ in range(random.randint(0, 3)) # nosec 174 ] 175 da.alternative_domains.add(*alternative_domains) 176 177 @classmethod 178 def load(cls): 179 """Creates domain applications for each user in the database.""" 180 logger.info("Going to load %s domain applications" % len(cls.DA)) 181 try: 182 users = list(User.objects.all()) # force evaluation to catch db errors 183 except Exception as e: 184 logger.warning(e) 185 return 186 187 for user in users: 188 logger.debug("Loading domain applications for %s" % user) 189 for app in cls.DA: 190 try: 191 da, _ = DomainApplication.objects.get_or_create( 192 creator=user, 193 organization_name=app["organization_name"], 194 ) 195 cls._set_non_foreign_key_fields(da, app) 196 cls._set_foreign_key_fields(da, app, user) 197 da.save() 198 cls._set_many_to_many_relations(da, app) 199 except Exception as e: 200 logger.warning(e) 201 202 203 class DomainFixture(DomainApplicationFixture): 204 205 """Create one domain and permissions on it for each user.""" 206 207 @classmethod 208 def load(cls): 209 try: 210 users = list(User.objects.all()) # force evaluation to catch db errors 211 except Exception as e: 212 logger.warning(e) 213 return 214 215 for user in users: 216 # approve one of each users in review status domains 217 application = DomainApplication.objects.filter( 218 creator=user, status=DomainApplication.ApplicationStatus.IN_REVIEW 219 ).last() 220 logger.debug(f"Approving {application} for {user}") 221 222 # We don't want fixtures sending out real emails to 223 # fake email addresses, so we just skip that and log it instead 224 application.approve(send_email=False) 225 application.save() 226 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/registrar/fixtures_applications.py b/src/registrar/fixtures_applications.py --- a/src/registrar/fixtures_applications.py +++ b/src/registrar/fixtures_applications.py @@ -104,7 +104,7 @@ # Random choice of agency for selects, used as placeholders for testing. else random.choice(DomainApplication.AGENCIES) # nosec ) - + da.submission_date = fake.date() da.federal_type = ( app["federal_type"] if "federal_type" in app
{"golden_diff": "diff --git a/src/registrar/fixtures_applications.py b/src/registrar/fixtures_applications.py\n--- a/src/registrar/fixtures_applications.py\n+++ b/src/registrar/fixtures_applications.py\n@@ -104,7 +104,7 @@\n # Random choice of agency for selects, used as placeholders for testing.\n else random.choice(DomainApplication.AGENCIES) # nosec\n )\n-\n+ da.submission_date = fake.date()\n da.federal_type = (\n app[\"federal_type\"]\n if \"federal_type\" in app\n", "issue": "Submission dates for fixture domain requests are blank\n### Current Behavior\r\n\r\nFor applications generated by fixtures, submission dates are not showing because they are not set to begin with\r\n\r\n![image](https://github.com/cisagov/manage.get.gov/assets/144387994/46ca040a-3b72-4ff4-be4b-5094eb604bcd)\r\n\r\n\r\n### Expected Behavior\r\n\r\nSubmission dates are shown\r\n\r\n### Steps to Reproduce\r\n\r\n1. Logon as Domain Manager with an account associated with fixtures\r\n2. View Domain request table\r\n3. Date submitted column is blank (for any requests from fixtures)\r\n\r\n\r\n### Environment\r\n\r\nDevelopment sandbox\r\n\r\n### Additional Context\r\n\r\nPer Alysia, submission date should be added to fixtures for the fake data\r\n\r\n### Issue Links\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport random\nfrom faker import Faker\n\nfrom registrar.models import (\n User,\n DomainApplication,\n DraftDomain,\n Contact,\n Website,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass DomainApplicationFixture:\n \"\"\"\n Load domain applications into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n # any fields not specified here will be filled in with fake data or defaults\n # NOTE BENE: each fixture must have `organization_name` for uniqueness!\n # Here is a more complete example as a template:\n # {\n # \"status\": \"started\",\n # \"organization_name\": \"Example - Just started\",\n # \"organization_type\": \"federal\",\n # \"federal_agency\": None,\n # \"federal_type\": None,\n # \"address_line1\": None,\n # \"address_line2\": None,\n # \"city\": None,\n # \"state_territory\": None,\n # \"zipcode\": None,\n # \"urbanization\": None,\n # \"purpose\": None,\n # \"anything_else\": None,\n # \"is_policy_acknowledged\": None,\n # \"authorizing_official\": None,\n # \"submitter\": None,\n # \"other_contacts\": [],\n # \"current_websites\": [],\n # \"alternative_domains\": [],\n # },\n DA = [\n {\n \"status\": DomainApplication.ApplicationStatus.STARTED,\n \"organization_name\": \"Example - Finished but not submitted\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.SUBMITTED,\n \"organization_name\": \"Example - Submitted but pending investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - In investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - Approved\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.WITHDRAWN,\n \"organization_name\": \"Example - Withdrawn\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.ACTION_NEEDED,\n \"organization_name\": \"Example - Action needed\",\n },\n {\n \"status\": \"rejected\",\n \"organization_name\": \"Example - Rejected\",\n },\n ]\n\n @classmethod\n def fake_contact(cls):\n return {\n \"first_name\": fake.first_name(),\n \"middle_name\": None,\n \"last_name\": fake.last_name(),\n \"title\": fake.job(),\n \"email\": fake.ascii_safe_email(),\n \"phone\": \"201-555-5555\",\n }\n\n @classmethod\n def fake_dot_gov(cls):\n return f\"{fake.slug()}.gov\"\n\n @classmethod\n def _set_non_foreign_key_fields(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n da.status = app[\"status\"] if \"status\" in app else \"started\"\n da.organization_type = app[\"organization_type\"] if \"organization_type\" in app else \"federal\"\n da.federal_agency = (\n app[\"federal_agency\"]\n if \"federal_agency\" in app\n # Random choice of agency for selects, used as placeholders for testing.\n else random.choice(DomainApplication.AGENCIES) # nosec\n )\n\n da.federal_type = (\n app[\"federal_type\"]\n if \"federal_type\" in app\n else random.choice([\"executive\", \"judicial\", \"legislative\"]) # nosec\n )\n da.address_line1 = app[\"address_line1\"] if \"address_line1\" in app else fake.street_address()\n da.address_line2 = app[\"address_line2\"] if \"address_line2\" in app else None\n da.city = app[\"city\"] if \"city\" in app else fake.city()\n da.state_territory = app[\"state_territory\"] if \"state_territory\" in app else fake.state_abbr()\n da.zipcode = app[\"zipcode\"] if \"zipcode\" in app else fake.postalcode()\n da.urbanization = app[\"urbanization\"] if \"urbanization\" in app else None\n da.purpose = app[\"purpose\"] if \"purpose\" in app else fake.paragraph()\n da.anything_else = app[\"anything_else\"] if \"anything_else\" in app else None\n da.is_policy_acknowledged = app[\"is_policy_acknowledged\"] if \"is_policy_acknowledged\" in app else True\n\n @classmethod\n def _set_foreign_key_fields(cls, da: DomainApplication, app: dict, user: User):\n \"\"\"Helper method used by `load`.\"\"\"\n if not da.investigator:\n da.investigator = User.objects.get(username=user.username) if \"investigator\" in app else None\n\n if not da.authorizing_official:\n if \"authorizing_official\" in app and app[\"authorizing_official\"] is not None:\n da.authorizing_official, _ = Contact.objects.get_or_create(**app[\"authorizing_official\"])\n else:\n da.authorizing_official = Contact.objects.create(**cls.fake_contact())\n\n if not da.submitter:\n if \"submitter\" in app and app[\"submitter\"] is not None:\n da.submitter, _ = Contact.objects.get_or_create(**app[\"submitter\"])\n else:\n da.submitter = Contact.objects.create(**cls.fake_contact())\n\n if not da.requested_domain:\n if \"requested_domain\" in app and app[\"requested_domain\"] is not None:\n da.requested_domain, _ = DraftDomain.objects.get_or_create(name=app[\"requested_domain\"])\n else:\n da.requested_domain = DraftDomain.objects.create(name=cls.fake_dot_gov())\n\n @classmethod\n def _set_many_to_many_relations(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n if \"other_contacts\" in app:\n for contact in app[\"other_contacts\"]:\n da.other_contacts.add(Contact.objects.get_or_create(**contact)[0])\n elif not da.other_contacts.exists():\n other_contacts = [\n Contact.objects.create(**cls.fake_contact()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.other_contacts.add(*other_contacts)\n\n if \"current_websites\" in app:\n for website in app[\"current_websites\"]:\n da.current_websites.add(Website.objects.get_or_create(website=website)[0])\n elif not da.current_websites.exists():\n current_websites = [\n Website.objects.create(website=fake.uri()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.current_websites.add(*current_websites)\n\n if \"alternative_domains\" in app:\n for domain in app[\"alternative_domains\"]:\n da.alternative_domains.add(Website.objects.get_or_create(website=domain)[0])\n elif not da.alternative_domains.exists():\n alternative_domains = [\n Website.objects.create(website=cls.fake_dot_gov()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.alternative_domains.add(*alternative_domains)\n\n @classmethod\n def load(cls):\n \"\"\"Creates domain applications for each user in the database.\"\"\"\n logger.info(\"Going to load %s domain applications\" % len(cls.DA))\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n logger.debug(\"Loading domain applications for %s\" % user)\n for app in cls.DA:\n try:\n da, _ = DomainApplication.objects.get_or_create(\n creator=user,\n organization_name=app[\"organization_name\"],\n )\n cls._set_non_foreign_key_fields(da, app)\n cls._set_foreign_key_fields(da, app, user)\n da.save()\n cls._set_many_to_many_relations(da, app)\n except Exception as e:\n logger.warning(e)\n\n\nclass DomainFixture(DomainApplicationFixture):\n\n \"\"\"Create one domain and permissions on it for each user.\"\"\"\n\n @classmethod\n def load(cls):\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n # approve one of each users in review status domains\n application = DomainApplication.objects.filter(\n creator=user, status=DomainApplication.ApplicationStatus.IN_REVIEW\n ).last()\n logger.debug(f\"Approving {application} for {user}\")\n\n # We don't want fixtures sending out real emails to\n # fake email addresses, so we just skip that and log it instead\n application.approve(send_email=False)\n application.save()\n", "path": "src/registrar/fixtures_applications.py"}], "after_files": [{"content": "import logging\nimport random\nfrom faker import Faker\n\nfrom registrar.models import (\n User,\n DomainApplication,\n DraftDomain,\n Contact,\n Website,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass DomainApplicationFixture:\n \"\"\"\n Load domain applications into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n # any fields not specified here will be filled in with fake data or defaults\n # NOTE BENE: each fixture must have `organization_name` for uniqueness!\n # Here is a more complete example as a template:\n # {\n # \"status\": \"started\",\n # \"organization_name\": \"Example - Just started\",\n # \"organization_type\": \"federal\",\n # \"federal_agency\": None,\n # \"federal_type\": None,\n # \"address_line1\": None,\n # \"address_line2\": None,\n # \"city\": None,\n # \"state_territory\": None,\n # \"zipcode\": None,\n # \"urbanization\": None,\n # \"purpose\": None,\n # \"anything_else\": None,\n # \"is_policy_acknowledged\": None,\n # \"authorizing_official\": None,\n # \"submitter\": None,\n # \"other_contacts\": [],\n # \"current_websites\": [],\n # \"alternative_domains\": [],\n # },\n DA = [\n {\n \"status\": DomainApplication.ApplicationStatus.STARTED,\n \"organization_name\": \"Example - Finished but not submitted\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.SUBMITTED,\n \"organization_name\": \"Example - Submitted but pending investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - In investigation\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.IN_REVIEW,\n \"organization_name\": \"Example - Approved\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.WITHDRAWN,\n \"organization_name\": \"Example - Withdrawn\",\n },\n {\n \"status\": DomainApplication.ApplicationStatus.ACTION_NEEDED,\n \"organization_name\": \"Example - Action needed\",\n },\n {\n \"status\": \"rejected\",\n \"organization_name\": \"Example - Rejected\",\n },\n ]\n\n @classmethod\n def fake_contact(cls):\n return {\n \"first_name\": fake.first_name(),\n \"middle_name\": None,\n \"last_name\": fake.last_name(),\n \"title\": fake.job(),\n \"email\": fake.ascii_safe_email(),\n \"phone\": \"201-555-5555\",\n }\n\n @classmethod\n def fake_dot_gov(cls):\n return f\"{fake.slug()}.gov\"\n\n @classmethod\n def _set_non_foreign_key_fields(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n da.status = app[\"status\"] if \"status\" in app else \"started\"\n da.organization_type = app[\"organization_type\"] if \"organization_type\" in app else \"federal\"\n da.federal_agency = (\n app[\"federal_agency\"]\n if \"federal_agency\" in app\n # Random choice of agency for selects, used as placeholders for testing.\n else random.choice(DomainApplication.AGENCIES) # nosec\n )\n da.submission_date = fake.date()\n da.federal_type = (\n app[\"federal_type\"]\n if \"federal_type\" in app\n else random.choice([\"executive\", \"judicial\", \"legislative\"]) # nosec\n )\n da.address_line1 = app[\"address_line1\"] if \"address_line1\" in app else fake.street_address()\n da.address_line2 = app[\"address_line2\"] if \"address_line2\" in app else None\n da.city = app[\"city\"] if \"city\" in app else fake.city()\n da.state_territory = app[\"state_territory\"] if \"state_territory\" in app else fake.state_abbr()\n da.zipcode = app[\"zipcode\"] if \"zipcode\" in app else fake.postalcode()\n da.urbanization = app[\"urbanization\"] if \"urbanization\" in app else None\n da.purpose = app[\"purpose\"] if \"purpose\" in app else fake.paragraph()\n da.anything_else = app[\"anything_else\"] if \"anything_else\" in app else None\n da.is_policy_acknowledged = app[\"is_policy_acknowledged\"] if \"is_policy_acknowledged\" in app else True\n\n @classmethod\n def _set_foreign_key_fields(cls, da: DomainApplication, app: dict, user: User):\n \"\"\"Helper method used by `load`.\"\"\"\n if not da.investigator:\n da.investigator = User.objects.get(username=user.username) if \"investigator\" in app else None\n\n if not da.authorizing_official:\n if \"authorizing_official\" in app and app[\"authorizing_official\"] is not None:\n da.authorizing_official, _ = Contact.objects.get_or_create(**app[\"authorizing_official\"])\n else:\n da.authorizing_official = Contact.objects.create(**cls.fake_contact())\n\n if not da.submitter:\n if \"submitter\" in app and app[\"submitter\"] is not None:\n da.submitter, _ = Contact.objects.get_or_create(**app[\"submitter\"])\n else:\n da.submitter = Contact.objects.create(**cls.fake_contact())\n\n if not da.requested_domain:\n if \"requested_domain\" in app and app[\"requested_domain\"] is not None:\n da.requested_domain, _ = DraftDomain.objects.get_or_create(name=app[\"requested_domain\"])\n else:\n da.requested_domain = DraftDomain.objects.create(name=cls.fake_dot_gov())\n\n @classmethod\n def _set_many_to_many_relations(cls, da: DomainApplication, app: dict):\n \"\"\"Helper method used by `load`.\"\"\"\n if \"other_contacts\" in app:\n for contact in app[\"other_contacts\"]:\n da.other_contacts.add(Contact.objects.get_or_create(**contact)[0])\n elif not da.other_contacts.exists():\n other_contacts = [\n Contact.objects.create(**cls.fake_contact()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.other_contacts.add(*other_contacts)\n\n if \"current_websites\" in app:\n for website in app[\"current_websites\"]:\n da.current_websites.add(Website.objects.get_or_create(website=website)[0])\n elif not da.current_websites.exists():\n current_websites = [\n Website.objects.create(website=fake.uri()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.current_websites.add(*current_websites)\n\n if \"alternative_domains\" in app:\n for domain in app[\"alternative_domains\"]:\n da.alternative_domains.add(Website.objects.get_or_create(website=domain)[0])\n elif not da.alternative_domains.exists():\n alternative_domains = [\n Website.objects.create(website=cls.fake_dot_gov()) for _ in range(random.randint(0, 3)) # nosec\n ]\n da.alternative_domains.add(*alternative_domains)\n\n @classmethod\n def load(cls):\n \"\"\"Creates domain applications for each user in the database.\"\"\"\n logger.info(\"Going to load %s domain applications\" % len(cls.DA))\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n logger.debug(\"Loading domain applications for %s\" % user)\n for app in cls.DA:\n try:\n da, _ = DomainApplication.objects.get_or_create(\n creator=user,\n organization_name=app[\"organization_name\"],\n )\n cls._set_non_foreign_key_fields(da, app)\n cls._set_foreign_key_fields(da, app, user)\n da.save()\n cls._set_many_to_many_relations(da, app)\n except Exception as e:\n logger.warning(e)\n\n\nclass DomainFixture(DomainApplicationFixture):\n\n \"\"\"Create one domain and permissions on it for each user.\"\"\"\n\n @classmethod\n def load(cls):\n try:\n users = list(User.objects.all()) # force evaluation to catch db errors\n except Exception as e:\n logger.warning(e)\n return\n\n for user in users:\n # approve one of each users in review status domains\n application = DomainApplication.objects.filter(\n creator=user, status=DomainApplication.ApplicationStatus.IN_REVIEW\n ).last()\n logger.debug(f\"Approving {application} for {user}\")\n\n # We don't want fixtures sending out real emails to\n # fake email addresses, so we just skip that and log it instead\n application.approve(send_email=False)\n application.save()\n", "path": "src/registrar/fixtures_applications.py"}]}
2,959
123
gh_patches_debug_54541
rasdani/github-patches
git_diff
dbt-labs__dbt-core-1148
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support jinja expression statements ## Feature ### Feature description http://jinja.pocoo.org/docs/2.10/extensions/#expression-statement Presently, we hack expressions with: ``` {% set _ = my_dict.update({"a": 1, "b": 2}) %} ``` Instead, the jinja expression statement will make it possible to write: ``` {% do my_dict.update({"a": 1, "b": 2}) %} ``` This is a minor difference, but it will make jinja sql more readable and idiomatic. ### Who will this benefit? jinja writers --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dbt/clients/jinja.py` Content: ``` 1 import codecs 2 import linecache 3 import os 4 5 import jinja2 6 import jinja2._compat 7 import jinja2.ext 8 import jinja2.nodes 9 import jinja2.parser 10 import jinja2.sandbox 11 12 import dbt.compat 13 import dbt.exceptions 14 15 from dbt.node_types import NodeType 16 from dbt.utils import AttrDict 17 18 from dbt.logger import GLOBAL_LOGGER as logger # noqa 19 20 21 class MacroFuzzParser(jinja2.parser.Parser): 22 def parse_macro(self): 23 node = jinja2.nodes.Macro(lineno=next(self.stream).lineno) 24 25 # modified to fuzz macros defined in the same file. this way 26 # dbt can understand the stack of macros being called. 27 # - @cmcarthur 28 node.name = dbt.utils.get_dbt_macro_name( 29 self.parse_assign_target(name_only=True).name) 30 31 self.parse_signature(node) 32 node.body = self.parse_statements(('name:endmacro',), 33 drop_needle=True) 34 return node 35 36 37 class MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment): 38 def _parse(self, source, name, filename): 39 return MacroFuzzParser( 40 self, source, name, 41 jinja2._compat.encode_filename(filename) 42 ).parse() 43 44 def _compile(self, source, filename): 45 """Override jinja's compilation to stash the rendered source inside 46 the python linecache for debugging. 47 """ 48 if filename == '<template>': 49 # make a better filename 50 filename = 'dbt-{}'.format( 51 codecs.encode(os.urandom(12), 'hex').decode('ascii') 52 ) 53 # encode, though I don't think this matters 54 filename = jinja2._compat.encode_filename(filename) 55 # put ourselves in the cache using the 'lazycache' method 56 linecache.cache[filename] = (lambda: source,) 57 58 return super(MacroFuzzEnvironment, self)._compile(source, filename) 59 60 61 class TemplateCache(object): 62 63 def __init__(self): 64 self.file_cache = {} 65 66 def get_node_template(self, node): 67 key = (node['package_name'], node['original_file_path']) 68 69 if key in self.file_cache: 70 return self.file_cache[key] 71 72 template = get_template( 73 string=node.get('raw_sql'), 74 ctx={}, 75 node=node 76 ) 77 self.file_cache[key] = template 78 79 return template 80 81 def clear(self): 82 self.file_cache.clear() 83 84 85 template_cache = TemplateCache() 86 87 88 def macro_generator(node): 89 def apply_context(context): 90 def call(*args, **kwargs): 91 name = node.get('name') 92 template = template_cache.get_node_template(node) 93 module = template.make_module(context, False, context) 94 95 if node['resource_type'] == NodeType.Operation: 96 macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)] 97 else: 98 macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)] 99 module.__dict__.update(context) 100 101 try: 102 return macro(*args, **kwargs) 103 except dbt.exceptions.MacroReturn as e: 104 return e.value 105 except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: 106 dbt.exceptions.raise_compiler_error(str(e), node) 107 except dbt.exceptions.CompilationException as e: 108 e.stack.append(node) 109 raise e 110 111 return call 112 return apply_context 113 114 115 class MaterializationExtension(jinja2.ext.Extension): 116 tags = ['materialization'] 117 118 def parse(self, parser): 119 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno) 120 materialization_name = \ 121 parser.parse_assign_target(name_only=True).name 122 123 adapter_name = 'default' 124 node.args = [] 125 node.defaults = [] 126 127 while parser.stream.skip_if('comma'): 128 target = parser.parse_assign_target(name_only=True) 129 130 if target.name == 'default': 131 pass 132 133 elif target.name == 'adapter': 134 parser.stream.expect('assign') 135 value = parser.parse_expression() 136 adapter_name = value.value 137 138 else: 139 dbt.exceptions.invalid_materialization_argument( 140 materialization_name, target.name) 141 142 node.name = dbt.utils.get_materialization_macro_name( 143 materialization_name, adapter_name) 144 145 node.body = parser.parse_statements(('name:endmaterialization',), 146 drop_needle=True) 147 148 return node 149 150 151 class OperationExtension(jinja2.ext.Extension): 152 tags = ['operation'] 153 154 def parse(self, parser): 155 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno) 156 operation_name = \ 157 parser.parse_assign_target(name_only=True).name 158 159 node.args = [] 160 node.defaults = [] 161 162 while parser.stream.skip_if('comma'): 163 target = parser.parse_assign_target(name_only=True) 164 165 node.name = dbt.utils.get_operation_macro_name(operation_name) 166 167 node.body = parser.parse_statements(('name:endoperation',), 168 drop_needle=True) 169 170 return node 171 172 173 class DocumentationExtension(jinja2.ext.Extension): 174 tags = ['docs'] 175 176 def parse(self, parser): 177 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno) 178 docs_name = parser.parse_assign_target(name_only=True).name 179 180 node.args = [] 181 node.defaults = [] 182 node.name = dbt.utils.get_docs_macro_name(docs_name) 183 node.body = parser.parse_statements(('name:enddocs',), 184 drop_needle=True) 185 return node 186 187 188 def _is_dunder_name(name): 189 return name.startswith('__') and name.endswith('__') 190 191 192 def create_macro_capture_env(node): 193 194 class ParserMacroCapture(jinja2.Undefined): 195 """ 196 This class sets up the parser to capture macros. 197 """ 198 def __init__(self, hint=None, obj=None, name=None, exc=None): 199 super(ParserMacroCapture, self).__init__(hint=hint, name=name) 200 self.node = node 201 self.name = name 202 self.package_name = node.get('package_name') 203 # jinja uses these for safety, so we have to override them. 204 # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa 205 self.unsafe_callable = False 206 self.alters_data = False 207 208 def __deepcopy__(self, memo): 209 path = os.path.join(self.node.get('root_path'), 210 self.node.get('original_file_path')) 211 212 logger.debug( 213 'dbt encountered an undefined variable, "{}" in node {}.{} ' 214 '(source path: {})' 215 .format(self.name, self.node.get('package_name'), 216 self.node.get('name'), path)) 217 218 # match jinja's message 219 dbt.exceptions.raise_compiler_error( 220 "{!r} is undefined".format(self.name), 221 node=self.node 222 ) 223 224 def __getitem__(self, name): 225 # Propagate the undefined value if a caller accesses this as if it 226 # were a dictionary 227 return self 228 229 def __getattr__(self, name): 230 if name == 'name' or _is_dunder_name(name): 231 raise AttributeError( 232 "'{}' object has no attribute '{}'" 233 .format(type(self).__name__, name) 234 ) 235 236 self.package_name = self.name 237 self.name = name 238 239 return self 240 241 def __call__(self, *args, **kwargs): 242 return True 243 244 return ParserMacroCapture 245 246 247 def get_environment(node=None, capture_macros=False): 248 args = { 249 'extensions': [] 250 } 251 252 if capture_macros: 253 args['undefined'] = create_macro_capture_env(node) 254 255 args['extensions'].append(MaterializationExtension) 256 args['extensions'].append(OperationExtension) 257 args['extensions'].append(DocumentationExtension) 258 259 return MacroFuzzEnvironment(**args) 260 261 262 def parse(string): 263 try: 264 return get_environment().parse(dbt.compat.to_string(string)) 265 266 except (jinja2.exceptions.TemplateSyntaxError, 267 jinja2.exceptions.UndefinedError) as e: 268 e.translated = False 269 dbt.exceptions.raise_compiler_error(str(e)) 270 271 272 def get_template(string, ctx, node=None, capture_macros=False): 273 try: 274 env = get_environment(node, capture_macros) 275 276 template_source = dbt.compat.to_string(string) 277 return env.from_string(template_source, globals=ctx) 278 279 except (jinja2.exceptions.TemplateSyntaxError, 280 jinja2.exceptions.UndefinedError) as e: 281 e.translated = False 282 dbt.exceptions.raise_compiler_error(str(e), node) 283 284 285 def render_template(template, ctx, node=None): 286 try: 287 return template.render(ctx) 288 289 except (jinja2.exceptions.TemplateSyntaxError, 290 jinja2.exceptions.UndefinedError) as e: 291 e.translated = False 292 dbt.exceptions.raise_compiler_error(str(e), node) 293 294 295 def get_rendered(string, ctx, node=None, 296 capture_macros=False): 297 template = get_template(string, ctx, node, 298 capture_macros=capture_macros) 299 300 return render_template(template, ctx, node) 301 302 303 def undefined_error(msg): 304 raise jinja2.exceptions.UndefinedError(msg) 305 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py --- a/dbt/clients/jinja.py +++ b/dbt/clients/jinja.py @@ -246,7 +246,7 @@ def get_environment(node=None, capture_macros=False): args = { - 'extensions': [] + 'extensions': ['jinja2.ext.do'] } if capture_macros:
{"golden_diff": "diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py\n--- a/dbt/clients/jinja.py\n+++ b/dbt/clients/jinja.py\n@@ -246,7 +246,7 @@\n \n def get_environment(node=None, capture_macros=False):\n args = {\n- 'extensions': []\n+ 'extensions': ['jinja2.ext.do']\n }\n \n if capture_macros:\n", "issue": "Support jinja expression statements\n## Feature\r\n\r\n### Feature description\r\nhttp://jinja.pocoo.org/docs/2.10/extensions/#expression-statement\r\n\r\nPresently, we hack expressions with:\r\n```\r\n{% set _ = my_dict.update({\"a\": 1, \"b\": 2}) %}\r\n```\r\n\r\nInstead, the jinja expression statement will make it possible to write:\r\n\r\n```\r\n{% do my_dict.update({\"a\": 1, \"b\": 2}) %}\r\n```\r\n\r\nThis is a minor difference, but it will make jinja sql more readable and idiomatic.\r\n\r\n### Who will this benefit?\r\njinja writers\n", "before_files": [{"content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None, exc=None):\n super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'), path))\n\n # match jinja's message\n dbt.exceptions.raise_compiler_error(\n \"{!r} is undefined\".format(self.name),\n node=self.node\n )\n\n def __getitem__(self, name):\n # Propagate the undefined value if a caller accesses this as if it\n # were a dictionary\n return self\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': []\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py"}], "after_files": [{"content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None, exc=None):\n super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'), path))\n\n # match jinja's message\n dbt.exceptions.raise_compiler_error(\n \"{!r} is undefined\".format(self.name),\n node=self.node\n )\n\n def __getitem__(self, name):\n # Propagate the undefined value if a caller accesses this as if it\n # were a dictionary\n return self\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': ['jinja2.ext.do']\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py"}]}
3,205
95
gh_patches_debug_1461
rasdani/github-patches
git_diff
kartoza__prj.app-346
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Display thumbnails in a modal window when we click on fullscreen We can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif The user needs to use the "previous" button in the web browser to come back to the changelog. It would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django_project/core/settings/project.py` Content: ``` 1 # coding=utf-8 2 3 """Project level settings. 4 5 Adjust these values as needed but don't commit passwords etc. to any public 6 repository! 7 """ 8 9 import os # noqa 10 from django.utils.translation import ugettext_lazy as _ 11 from .utils import absolute_path 12 from .contrib import * # noqa 13 14 # Project apps 15 INSTALLED_APPS += ( 16 'base', 17 'changes', 18 'github_issue', 19 'vota', 20 ) 21 22 # Due to profile page does not available, 23 # this will redirect to home page after login 24 LOGIN_REDIRECT_URL = '/' 25 26 # How many versions to list in each project box 27 PROJECT_VERSION_LIST_SIZE = 10 28 29 # Set debug to false for production 30 DEBUG = TEMPLATE_DEBUG = False 31 32 SOUTH_TESTS_MIGRATE = False 33 34 35 # Set languages which want to be translated 36 LANGUAGES = ( 37 ('en', _('English')), 38 ('af', _('Afrikaans')), 39 ('id', _('Indonesian')), 40 ('ko', _('Korean')), 41 ) 42 43 # Set storage path for the translation files 44 LOCALE_PATHS = (absolute_path('locale'),) 45 46 47 MIDDLEWARE_CLASSES = ( 48 # For nav bar generation 49 'core.custom_middleware.NavContextMiddleware', 50 ) + MIDDLEWARE_CLASSES 51 52 # Project specific javascript files to be pipelined 53 # For third party libs like jquery should go in contrib.py 54 PIPELINE_JS['project'] = { 55 'source_filenames': ( 56 'js/csrf-ajax.js', 57 'js/changelog.js', 58 'js/github-issue.js' 59 ), 60 'output_filename': 'js/project.js', 61 } 62 63 # Project specific css files to be pipelined 64 # For third party libs like bootstrap should go in contrib.py 65 PIPELINE_CSS['project'] = { 66 'source_filenames': ( 67 'css/changelog.css', 68 'css/form.css', 69 'css/fonts.css' 70 ), 71 'output_filename': 'css/project.css', 72 'extra_context': { 73 'media': 'screen, projection', 74 }, 75 } 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py --- a/django_project/core/settings/project.py +++ b/django_project/core/settings/project.py @@ -55,7 +55,8 @@ 'source_filenames': ( 'js/csrf-ajax.js', 'js/changelog.js', - 'js/github-issue.js' + 'js/github-issue.js', + 'js/entry.js', ), 'output_filename': 'js/project.js', }
{"golden_diff": "diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py\n--- a/django_project/core/settings/project.py\n+++ b/django_project/core/settings/project.py\n@@ -55,7 +55,8 @@\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n- 'js/github-issue.js'\n+ 'js/github-issue.js',\n+ 'js/entry.js',\n ),\n 'output_filename': 'js/project.js',\n }\n", "issue": "Display thumbnails in a modal window when we click on fullscreen\nWe can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif\nThe user needs to use the \"previous\" button in the web browser to come back to the changelog.\n\nIt would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js'\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n", "path": "django_project/core/settings/project.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js',\n 'js/entry.js',\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n", "path": "django_project/core/settings/project.py"}]}
1,002
110
gh_patches_debug_50751
rasdani/github-patches
git_diff
pantsbuild__pants-16113
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pants poetry-based lockfiles fail to include hashes. This was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry. The proximal cause is this announcement from PyPI: https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154 And the root cause is this Poetry code: https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283 There was a Poetry fix released and backported to the 1.1. branch here: https://github.com/python-poetry/poetry/pull/5973 Users can fix with 2 steps: 1. Update Pants config ```toml [poetry] # N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111 # Undo once on a Pants with this version or greater as the default. version = "poetry==1.1.14" ``` 2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac. This issue tracks bumping Pants default to this fixed Poetry version. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/python/subsystems/poetry.py` Content: ``` 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import annotations 5 6 from collections import defaultdict 7 from dataclasses import dataclass 8 from textwrap import dedent 9 from typing import Any, Iterable, Sequence 10 11 import toml 12 from pkg_resources import Requirement 13 14 from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase 15 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints 16 from pants.engine.fs import FileContent 17 18 # ---------------------------------------------------------------------------------------- 19 # Subsystem 20 # ---------------------------------------------------------------------------------------- 21 22 23 class PoetrySubsystem(PythonToolRequirementsBase): 24 options_scope = "poetry" 25 help = "Used to generate lockfiles for third-party Python dependencies." 26 27 default_version = "poetry==1.1.8" 28 29 register_interpreter_constraints = True 30 default_interpreter_constraints = ["CPython>=3.7,<4"] 31 32 33 # We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed 34 # in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584. 35 # WONTFIX(#12314): only use this custom launcher if using Poetry 1.1.. 36 POETRY_LAUNCHER = FileContent( 37 "__pants_poetry_launcher.py", 38 dedent( 39 """\ 40 from poetry.console import main 41 from poetry.puzzle.provider import Provider 42 43 Provider.UNSAFE_PACKAGES = set() 44 main() 45 """ 46 ).encode(), 47 ) 48 49 50 # ---------------------------------------------------------------------------------------- 51 # Parsing 52 # ---------------------------------------------------------------------------------------- 53 54 _HEADER = { 55 "name": "pants-lockfile-generation", 56 "version": "0.1.0", 57 "description": "", 58 "authors": ["pantsbuild"], 59 } 60 61 62 def create_pyproject_toml( 63 requirements: Iterable[str], interpreter_constraints: InterpreterConstraints 64 ) -> str: 65 return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints)) 66 67 68 def create_pyproject_toml_as_dict( 69 raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints 70 ) -> dict: 71 python_constraint = {"python": interpreter_constraints.to_poetry_constraint()} 72 project_name_to_poetry_deps = defaultdict(list) 73 for raw_req in raw_requirements: 74 # WONTFIX(#12314): add error handling. 75 req = Requirement.parse(raw_req) 76 poetry_dep = PoetryDependency.from_requirement(req) 77 project_name_to_poetry_deps[req.project_name].append(poetry_dep) 78 79 deps = { 80 project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps) 81 for project_name, poetry_deps in project_name_to_poetry_deps.items() 82 } 83 return {"tool": {"poetry": {**_HEADER, "dependencies": {**python_constraint, **deps}}}} 84 85 86 @dataclass(frozen=True) 87 class PoetryDependency: 88 name: str 89 version: str | None 90 extras: tuple[str, ...] = () 91 markers: str | None = None 92 93 @classmethod 94 def from_requirement(cls, requirement: Requirement) -> PoetryDependency: 95 return PoetryDependency( 96 requirement.project_name, 97 version=str(requirement.specifier) or None, # type: ignore[attr-defined] 98 extras=tuple(sorted(requirement.extras)), 99 markers=str(requirement.marker) if requirement.marker else None, 100 ) 101 102 @classmethod 103 def to_pyproject_toml_metadata( 104 cls, deps: Sequence[PoetryDependency] 105 ) -> dict[str, Any] | list[dict[str, Any]]: 106 def convert_dep(dep: PoetryDependency) -> dict[str, Any]: 107 metadata: dict[str, Any] = {"version": dep.version or "*"} 108 if dep.extras: 109 metadata["extras"] = dep.extras 110 if dep.markers: 111 metadata["markers"] = dep.markers 112 return metadata 113 114 if not deps: 115 raise AssertionError("Must have at least one element!") 116 if len(deps) == 1: 117 return convert_dep(deps[0]) 118 119 entries = [] 120 name = deps[0].name 121 for dep in deps: 122 if dep.name != name: 123 raise AssertionError(f"All elements must have the same project name. Given: {deps}") 124 entries.append(convert_dep(dep)) 125 return entries 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py --- a/src/python/pants/backend/python/subsystems/poetry.py +++ b/src/python/pants/backend/python/subsystems/poetry.py @@ -24,7 +24,7 @@ options_scope = "poetry" help = "Used to generate lockfiles for third-party Python dependencies." - default_version = "poetry==1.1.8" + default_version = "poetry==1.1.14" register_interpreter_constraints = True default_interpreter_constraints = ["CPython>=3.7,<4"]
{"golden_diff": "diff --git a/src/python/pants/backend/python/subsystems/poetry.py b/src/python/pants/backend/python/subsystems/poetry.py\n--- a/src/python/pants/backend/python/subsystems/poetry.py\n+++ b/src/python/pants/backend/python/subsystems/poetry.py\n@@ -24,7 +24,7 @@\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n \n- default_version = \"poetry==1.1.8\"\n+ default_version = \"poetry==1.1.14\"\n \n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n", "issue": "Pants poetry-based lockfiles fail to include hashes.\nThis was detected in a unit test in the Pants repo, but is a wider problem for all versions of Pants that support generating lockfiles using Poetry.\r\n\r\nThe proximal cause is this announcement from PyPI:\r\n https://discuss.python.org/t/backwards-incompatible-change-to-pypi-json-api/17154\r\n\r\nAnd the root cause is this Poetry code:\r\n https://github.com/python-poetry/poetry/blob/bce13c14f73060b3abbb791dea585d8fde26eaef/poetry/repositories/pypi_repository.py#L272-L283\r\n\r\nThere was a Poetry fix released and backported to the 1.1. branch here:\r\n https://github.com/python-poetry/poetry/pull/5973\r\n\r\nUsers can fix with 2 steps:\r\n1. Update Pants config\r\n```toml\r\n[poetry]\r\n# N.B.: Works around issue described at https://github.com/pantsbuild/pants/issues/16111\r\n# Undo once on a Pants with this version or greater as the default.\r\nversion = \"poetry==1.1.14\"\r\n```\r\n2. Clear Poetry caches with `rm -rf ~/.cache/pypoetry` on Linux and `rm -rf ~/Library/Caches/pypoetry` on Mac.\r\n\r\nThis issue tracks bumping Pants default to this fixed Poetry version.\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom textwrap import dedent\nfrom typing import Any, Iterable, Sequence\n\nimport toml\nfrom pkg_resources import Requirement\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.engine.fs import FileContent\n\n# ----------------------------------------------------------------------------------------\n# Subsystem\n# ----------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.8\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n# ----------------------------------------------------------------------------------------\n# Parsing\n# ----------------------------------------------------------------------------------------\n\n_HEADER = {\n \"name\": \"pants-lockfile-generation\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"authors\": [\"pantsbuild\"],\n}\n\n\ndef create_pyproject_toml(\n requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> str:\n return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))\n\n\ndef create_pyproject_toml_as_dict(\n raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> dict:\n python_constraint = {\"python\": interpreter_constraints.to_poetry_constraint()}\n project_name_to_poetry_deps = defaultdict(list)\n for raw_req in raw_requirements:\n # WONTFIX(#12314): add error handling.\n req = Requirement.parse(raw_req)\n poetry_dep = PoetryDependency.from_requirement(req)\n project_name_to_poetry_deps[req.project_name].append(poetry_dep)\n\n deps = {\n project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)\n for project_name, poetry_deps in project_name_to_poetry_deps.items()\n }\n return {\"tool\": {\"poetry\": {**_HEADER, \"dependencies\": {**python_constraint, **deps}}}}\n\n\n@dataclass(frozen=True)\nclass PoetryDependency:\n name: str\n version: str | None\n extras: tuple[str, ...] = ()\n markers: str | None = None\n\n @classmethod\n def from_requirement(cls, requirement: Requirement) -> PoetryDependency:\n return PoetryDependency(\n requirement.project_name,\n version=str(requirement.specifier) or None, # type: ignore[attr-defined]\n extras=tuple(sorted(requirement.extras)),\n markers=str(requirement.marker) if requirement.marker else None,\n )\n\n @classmethod\n def to_pyproject_toml_metadata(\n cls, deps: Sequence[PoetryDependency]\n ) -> dict[str, Any] | list[dict[str, Any]]:\n def convert_dep(dep: PoetryDependency) -> dict[str, Any]:\n metadata: dict[str, Any] = {\"version\": dep.version or \"*\"}\n if dep.extras:\n metadata[\"extras\"] = dep.extras\n if dep.markers:\n metadata[\"markers\"] = dep.markers\n return metadata\n\n if not deps:\n raise AssertionError(\"Must have at least one element!\")\n if len(deps) == 1:\n return convert_dep(deps[0])\n\n entries = []\n name = deps[0].name\n for dep in deps:\n if dep.name != name:\n raise AssertionError(f\"All elements must have the same project name. Given: {deps}\")\n entries.append(convert_dep(dep))\n return entries\n", "path": "src/python/pants/backend/python/subsystems/poetry.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom textwrap import dedent\nfrom typing import Any, Iterable, Sequence\n\nimport toml\nfrom pkg_resources import Requirement\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.engine.fs import FileContent\n\n# ----------------------------------------------------------------------------------------\n# Subsystem\n# ----------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.14\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n# ----------------------------------------------------------------------------------------\n# Parsing\n# ----------------------------------------------------------------------------------------\n\n_HEADER = {\n \"name\": \"pants-lockfile-generation\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"authors\": [\"pantsbuild\"],\n}\n\n\ndef create_pyproject_toml(\n requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> str:\n return toml.dumps(create_pyproject_toml_as_dict(requirements, interpreter_constraints))\n\n\ndef create_pyproject_toml_as_dict(\n raw_requirements: Iterable[str], interpreter_constraints: InterpreterConstraints\n) -> dict:\n python_constraint = {\"python\": interpreter_constraints.to_poetry_constraint()}\n project_name_to_poetry_deps = defaultdict(list)\n for raw_req in raw_requirements:\n # WONTFIX(#12314): add error handling.\n req = Requirement.parse(raw_req)\n poetry_dep = PoetryDependency.from_requirement(req)\n project_name_to_poetry_deps[req.project_name].append(poetry_dep)\n\n deps = {\n project_name: PoetryDependency.to_pyproject_toml_metadata(poetry_deps)\n for project_name, poetry_deps in project_name_to_poetry_deps.items()\n }\n return {\"tool\": {\"poetry\": {**_HEADER, \"dependencies\": {**python_constraint, **deps}}}}\n\n\n@dataclass(frozen=True)\nclass PoetryDependency:\n name: str\n version: str | None\n extras: tuple[str, ...] = ()\n markers: str | None = None\n\n @classmethod\n def from_requirement(cls, requirement: Requirement) -> PoetryDependency:\n return PoetryDependency(\n requirement.project_name,\n version=str(requirement.specifier) or None, # type: ignore[attr-defined]\n extras=tuple(sorted(requirement.extras)),\n markers=str(requirement.marker) if requirement.marker else None,\n )\n\n @classmethod\n def to_pyproject_toml_metadata(\n cls, deps: Sequence[PoetryDependency]\n ) -> dict[str, Any] | list[dict[str, Any]]:\n def convert_dep(dep: PoetryDependency) -> dict[str, Any]:\n metadata: dict[str, Any] = {\"version\": dep.version or \"*\"}\n if dep.extras:\n metadata[\"extras\"] = dep.extras\n if dep.markers:\n metadata[\"markers\"] = dep.markers\n return metadata\n\n if not deps:\n raise AssertionError(\"Must have at least one element!\")\n if len(deps) == 1:\n return convert_dep(deps[0])\n\n entries = []\n name = deps[0].name\n for dep in deps:\n if dep.name != name:\n raise AssertionError(f\"All elements must have the same project name. Given: {deps}\")\n entries.append(convert_dep(dep))\n return entries\n", "path": "src/python/pants/backend/python/subsystems/poetry.py"}]}
1,785
152
gh_patches_debug_13638
rasdani/github-patches
git_diff
googleapis__google-cloud-python-2503
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Current `tox` configuration ends up testing old code For example: ``` bash $ rm -r .tox/ $ tox -e system-tests --notest GLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py system-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests system-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip system-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0 ___________________________________ summary ____________________________________ system-tests: skipped tests congratulations :) $ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude="*.pyc" diff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py --- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400 +++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400 @@ -655,6 +655,32 @@ self.acl.all().grant_read() self.acl.save(client=client) + def compose(self, sources, client=None): + """Concatenate source blobs into this one. + + :type sources: list of :class:`Blob` + :param sources: blobs whose contents will be composed into this blob. + + :type client: :class:`~google.cloud.storage.client.Client` or + ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :raises: :exc:`ValueError` if this blob does not have its + :attr:`content_type` set. + """ + if self.content_type is None: + raise ValueError("Destination 'content_type' not set.") + client = self._require_client(client) + request = { + 'sourceObjects': [{'name': source.name} for source in sources], + 'destination': self._properties.copy(), + } + api_response = client.connection.api_request( + method='POST', path=self.path + '/compose', data=request, + _target_object=self) + self._set_properties(api_response) + cache_control = _scalar_property('cacheControl') """HTTP 'Cache-Control' header for this object. ``` Somehow, the tarball / wheel is being cached. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/pycodestyle_on_repo.py` Content: ``` 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Custom script to run pycodestyle on google-cloud codebase. 16 17 This runs pycodestyle as a script via subprocess but only runs it on the 18 .py files that are checked in to the repository. 19 """ 20 21 22 import os 23 import subprocess 24 import sys 25 26 from script_utils import get_affected_files 27 28 29 def main(): 30 """Run pycodestyle on all Python files in the repository.""" 31 git_root = subprocess.check_output( 32 ['git', 'rev-parse', '--show-toplevel']).strip() 33 os.chdir(git_root) 34 candidates, _ = get_affected_files() 35 python_files = [ 36 candidate for candidate in candidates if candidate.endswith('.py')] 37 38 pycodestyle_command = ['pycodestyle'] + python_files 39 status_code = subprocess.call(pycodestyle_command) 40 sys.exit(status_code) 41 42 43 if __name__ == '__main__': 44 main() 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py --- a/scripts/pycodestyle_on_repo.py +++ b/scripts/pycodestyle_on_repo.py @@ -19,6 +19,8 @@ """ +from __future__ import print_function + import os import subprocess import sys @@ -35,9 +37,12 @@ python_files = [ candidate for candidate in candidates if candidate.endswith('.py')] - pycodestyle_command = ['pycodestyle'] + python_files - status_code = subprocess.call(pycodestyle_command) - sys.exit(status_code) + if not python_files: + print('No Python files to lint, exiting.') + else: + pycodestyle_command = ['pycodestyle'] + python_files + status_code = subprocess.call(pycodestyle_command) + sys.exit(status_code) if __name__ == '__main__':
{"golden_diff": "diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py\n--- a/scripts/pycodestyle_on_repo.py\n+++ b/scripts/pycodestyle_on_repo.py\n@@ -19,6 +19,8 @@\n \"\"\"\n \n \n+from __future__ import print_function\n+\n import os\n import subprocess\n import sys\n@@ -35,9 +37,12 @@\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n \n- pycodestyle_command = ['pycodestyle'] + python_files\n- status_code = subprocess.call(pycodestyle_command)\n- sys.exit(status_code)\n+ if not python_files:\n+ print('No Python files to lint, exiting.')\n+ else:\n+ pycodestyle_command = ['pycodestyle'] + python_files\n+ status_code = subprocess.call(pycodestyle_command)\n+ sys.exit(status_code)\n \n \n if __name__ == '__main__':\n", "issue": "Current `tox` configuration ends up testing old code\nFor example:\n\n``` bash\n$ rm -r .tox/\n$ tox -e system-tests --notest\nGLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py\nsystem-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests\nsystem-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip\nsystem-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0\n___________________________________ summary ____________________________________\n system-tests: skipped tests\n congratulations :)\n$ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude=\"*.pyc\"\ndiff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py\n--- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400\n+++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400\n@@ -655,6 +655,32 @@\n self.acl.all().grant_read()\n self.acl.save(client=client)\n\n+ def compose(self, sources, client=None):\n+ \"\"\"Concatenate source blobs into this one.\n+\n+ :type sources: list of :class:`Blob`\n+ :param sources: blobs whose contents will be composed into this blob.\n+\n+ :type client: :class:`~google.cloud.storage.client.Client` or\n+ ``NoneType``\n+ :param client: Optional. The client to use. If not passed, falls back\n+ to the ``client`` stored on the blob's bucket.\n+\n+ :raises: :exc:`ValueError` if this blob does not have its\n+ :attr:`content_type` set.\n+ \"\"\"\n+ if self.content_type is None:\n+ raise ValueError(\"Destination 'content_type' not set.\")\n+ client = self._require_client(client)\n+ request = {\n+ 'sourceObjects': [{'name': source.name} for source in sources],\n+ 'destination': self._properties.copy(),\n+ }\n+ api_response = client.connection.api_request(\n+ method='POST', path=self.path + '/compose', data=request,\n+ _target_object=self)\n+ self._set_properties(api_response)\n+\n cache_control = _scalar_property('cacheControl')\n \"\"\"HTTP 'Cache-Control' header for this object.\n```\n\nSomehow, the tarball / wheel is being cached.\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run pycodestyle on google-cloud codebase.\n\nThis runs pycodestyle as a script via subprocess but only runs it on the\n.py files that are checked in to the repository.\n\"\"\"\n\n\nimport os\nimport subprocess\nimport sys\n\nfrom script_utils import get_affected_files\n\n\ndef main():\n \"\"\"Run pycodestyle on all Python files in the repository.\"\"\"\n git_root = subprocess.check_output(\n ['git', 'rev-parse', '--show-toplevel']).strip()\n os.chdir(git_root)\n candidates, _ = get_affected_files()\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n\n pycodestyle_command = ['pycodestyle'] + python_files\n status_code = subprocess.call(pycodestyle_command)\n sys.exit(status_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scripts/pycodestyle_on_repo.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run pycodestyle on google-cloud codebase.\n\nThis runs pycodestyle as a script via subprocess but only runs it on the\n.py files that are checked in to the repository.\n\"\"\"\n\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\n\nfrom script_utils import get_affected_files\n\n\ndef main():\n \"\"\"Run pycodestyle on all Python files in the repository.\"\"\"\n git_root = subprocess.check_output(\n ['git', 'rev-parse', '--show-toplevel']).strip()\n os.chdir(git_root)\n candidates, _ = get_affected_files()\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n\n if not python_files:\n print('No Python files to lint, exiting.')\n else:\n pycodestyle_command = ['pycodestyle'] + python_files\n status_code = subprocess.call(pycodestyle_command)\n sys.exit(status_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scripts/pycodestyle_on_repo.py"}]}
1,702
210
gh_patches_debug_8101
rasdani/github-patches
git_diff
scrapy__scrapy-1983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master) Sometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch. (at least on rc3 as well) ``` 2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING: 2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING: 2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING: ``` It happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/core/downloader/tls.py` Content: ``` 1 import logging 2 from OpenSSL import SSL 3 4 5 logger = logging.getLogger(__name__) 6 7 METHOD_SSLv3 = 'SSLv3' 8 METHOD_TLS = 'TLS' 9 METHOD_TLSv10 = 'TLSv1.0' 10 METHOD_TLSv11 = 'TLSv1.1' 11 METHOD_TLSv12 = 'TLSv1.2' 12 13 openssl_methods = { 14 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended) 15 METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended) 16 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only 17 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only 18 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only 19 } 20 21 # ClientTLSOptions requires a recent-enough version of Twisted 22 try: 23 24 # taken from twisted/twisted/internet/_sslverify.py 25 try: 26 from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START 27 except ImportError: 28 SSL_CB_HANDSHAKE_START = 0x10 29 SSL_CB_HANDSHAKE_DONE = 0x20 30 31 from twisted.internet._sslverify import (ClientTLSOptions, 32 _maybeSetHostNameIndication, 33 verifyHostname, 34 VerificationError) 35 36 class ScrapyClientTLSOptions(ClientTLSOptions): 37 # same as Twisted's ClientTLSOptions, 38 # except that VerificationError is caught 39 # and doesn't close the connection 40 def _identityVerifyingInfoCallback(self, connection, where, ret): 41 if where & SSL_CB_HANDSHAKE_START: 42 _maybeSetHostNameIndication(connection, self._hostnameBytes) 43 elif where & SSL_CB_HANDSHAKE_DONE: 44 try: 45 verifyHostname(connection, self._hostnameASCII) 46 except VerificationError as e: 47 logger.warning(e) 48 49 except ImportError: 50 # ImportError should not matter for older Twisted versions 51 # as the above is not used in the fallback ScrapyClientContextFactory 52 pass 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py --- a/scrapy/core/downloader/tls.py +++ b/scrapy/core/downloader/tls.py @@ -44,7 +44,9 @@ try: verifyHostname(connection, self._hostnameASCII) except VerificationError as e: - logger.warning(e) + logger.warning( + 'Remote certificate is not valid for hostname "{}"; {}'.format( + self._hostnameASCII, e)) except ImportError: # ImportError should not matter for older Twisted versions
{"golden_diff": "diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -44,7 +44,9 @@\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n- logger.warning(e)\n+ logger.warning(\n+ 'Remote certificate is not valid for hostname \"{}\"; {}'.format(\n+ self._hostnameASCII, e))\n \n except ImportError:\n # ImportError should not matter for older Twisted versions\n", "issue": "empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master)\nSometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch.\n(at least on rc3 as well)\n\n```\n2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING: \n```\n\nIt happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there).\n\n", "before_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(e)\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}], "after_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(\n 'Remote certificate is not valid for hostname \"{}\"; {}'.format(\n self._hostnameASCII, e))\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}]}
1,042
127
gh_patches_debug_7730
rasdani/github-patches
git_diff
freedomofpress__securedrop-3737
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SecureDrop backups from previous versions don't work if database migration has occurred ## Description Backup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server. ## Steps to Reproduce 1. Install SecureDrop 0.8.0 2. `securedrop-admin backup` 3. Upgrade to 0.9.0 (or higher) 4. `securedrop-admin restore` 5. Observe source and journalist interface return error 500 ## Expected Behavior The application should be operational. ## Actual Behavior The source and journalist interfaces return error 500s. ## Comments Running `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/roles/restore/files/restore.py` Content: ``` 1 #!/usr/bin/python2.7 2 """ 3 This script and backup archive should be copied to the App server and run by 4 the Ansible playbook. When run (as root), it restores the contents of the 0.3 5 backup file to the machine it's run on. 6 7 python restore.py sd-backup-TIMESTAMP.tar.gz 8 """ 9 10 import os 11 import subprocess 12 import sys 13 import tarfile 14 15 16 def verify_args(): 17 usage = """ 18 Usage: restore.py <backup file> 19 20 <backup file> Path to a SecureDrop 0.3 backup created by backup.py" 21 """ 22 if len(sys.argv) != 2: 23 print(usage) 24 sys.exit(1) 25 26 if not os.path.exists(sys.argv[1]): 27 print("<backup file> '{}' not found".format(sys.argv[1])) 28 sys.exit(1) 29 30 if os.geteuid() != 0: 31 print("This program must be run as root!") 32 sys.exit(1) 33 34 35 def main(): 36 verify_args() 37 38 with tarfile.open(sys.argv[1], 'r:*') as backup: 39 # This assumes that both the old installation (source of the backup) 40 # and the new installation (destination of the restore) used the 41 # default paths for various locations. 42 backup.extractall(path='/') 43 44 # Reload Tor and the web server so they pick up the new configuration 45 # If the process exits with a non-zero return code, raises an exception. 46 subprocess.check_call(['service', 'apache2', 'restart']) 47 subprocess.check_call(['service', 'tor', 'reload']) 48 49 50 if __name__ == "__main__": 51 main() 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py --- a/install_files/ansible-base/roles/restore/files/restore.py +++ b/install_files/ansible-base/roles/restore/files/restore.py @@ -45,6 +45,8 @@ # If the process exits with a non-zero return code, raises an exception. subprocess.check_call(['service', 'apache2', 'restart']) subprocess.check_call(['service', 'tor', 'reload']) + # Apply database migrations (if backed-up version < version to restore) + subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code']) if __name__ == "__main__":
{"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py\n--- a/install_files/ansible-base/roles/restore/files/restore.py\n+++ b/install_files/ansible-base/roles/restore/files/restore.py\n@@ -45,6 +45,8 @@\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n+ # Apply database migrations (if backed-up version < version to restore)\n+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n \n \n if __name__ == \"__main__\":\n", "issue": "SecureDrop backups from previous versions don't work if database migration has occurred\n## Description\r\n\r\nBackup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Install SecureDrop 0.8.0\r\n2. `securedrop-admin backup`\r\n3. Upgrade to 0.9.0 (or higher)\r\n4. `securedrop-admin restore`\r\n5. Observe source and journalist interface return error 500\r\n\r\n## Expected Behavior\r\n\r\nThe application should be operational.\r\n\r\n## Actual Behavior\r\n\r\nThe source and journalist interfaces return error 500s.\r\n\r\n## Comments\r\n\r\nRunning `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably.\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}], "after_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n # Apply database migrations (if backed-up version < version to restore)\n subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}]}
941
162
gh_patches_debug_5171
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-2581
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip ## Describe the Bug I built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication): ```python from strawberry.fastapi import BaseContext class CustomContext(BaseContext): @property def user(self) -> User: ... async def get_context() -> CustomContext: return CustomContext() ``` With that I receive the following mypy error: ```shell error: Call to untyped function "CustomContext" in typed context [no-untyped-call] ``` For now, I added the following workaround to my code: ```python class CustomContext(BaseContext): if typing.TYPE_CHECKING: def __init__(self) -> None: pass ... ``` ## System Information - Operating system: macOS Monterey - Strawberry version (if applicable): `0.158.1` - FastAPI version (if applicable): `0.92.0` - mypy version (if applicable): `0.991` (also tested with `1.0.1`) ## Additional Context I'm happy to provide a PR to address the issue myself. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/fastapi/context.py` Content: ``` 1 from typing import Any, Dict, Optional, Union 2 3 from starlette.background import BackgroundTasks 4 from starlette.requests import Request 5 from starlette.responses import Response 6 from starlette.websockets import WebSocket 7 8 CustomContext = Union["BaseContext", Dict[str, Any]] 9 MergedContext = Union[ 10 "BaseContext", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]] 11 ] 12 13 14 class BaseContext: 15 connection_params: Optional[Any] = None 16 17 def __init__(self): 18 self.request: Optional[Union[Request, WebSocket]] = None 19 self.background_tasks: Optional[BackgroundTasks] = None 20 self.response: Optional[Response] = None 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py --- a/strawberry/fastapi/context.py +++ b/strawberry/fastapi/context.py @@ -14,7 +14,7 @@ class BaseContext: connection_params: Optional[Any] = None - def __init__(self): + def __init__(self) -> None: self.request: Optional[Union[Request, WebSocket]] = None self.background_tasks: Optional[BackgroundTasks] = None self.response: Optional[Response] = None
{"golden_diff": "diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py\n--- a/strawberry/fastapi/context.py\n+++ b/strawberry/fastapi/context.py\n@@ -14,7 +14,7 @@\n class BaseContext:\n connection_params: Optional[Any] = None\n \n- def __init__(self):\n+ def __init__(self) -> None:\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "issue": "Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip\n## Describe the Bug\r\n\r\nI built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication):\r\n\r\n```python\r\nfrom strawberry.fastapi import BaseContext\r\n\r\nclass CustomContext(BaseContext):\r\n @property\r\n def user(self) -> User:\r\n ...\r\n\r\nasync def get_context() -> CustomContext:\r\n return CustomContext()\r\n```\r\n\r\nWith that I receive the following mypy error:\r\n```shell\r\n error: Call to untyped function \"CustomContext\" in typed context [no-untyped-call]\r\n```\r\n\r\nFor now, I added the following workaround to my code:\r\n```python\r\nclass CustomContext(BaseContext):\r\n if typing.TYPE_CHECKING:\r\n def __init__(self) -> None:\r\n pass\r\n \r\n ...\r\n```\r\n\r\n## System Information\r\n\r\n - Operating system: macOS Monterey\r\n - Strawberry version (if applicable): `0.158.1`\r\n - FastAPI version (if applicable): `0.92.0`\r\n - mypy version (if applicable): `0.991` (also tested with `1.0.1`)\r\n\r\n## Additional Context\r\n\r\nI'm happy to provide a PR to address the issue myself.\r\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Union\n\nfrom starlette.background import BackgroundTasks\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nCustomContext = Union[\"BaseContext\", Dict[str, Any]]\nMergedContext = Union[\n \"BaseContext\", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]\n]\n\n\nclass BaseContext:\n connection_params: Optional[Any] = None\n\n def __init__(self):\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "path": "strawberry/fastapi/context.py"}], "after_files": [{"content": "from typing import Any, Dict, Optional, Union\n\nfrom starlette.background import BackgroundTasks\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nCustomContext = Union[\"BaseContext\", Dict[str, Any]]\nMergedContext = Union[\n \"BaseContext\", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]\n]\n\n\nclass BaseContext:\n connection_params: Optional[Any] = None\n\n def __init__(self) -> None:\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "path": "strawberry/fastapi/context.py"}]}
707
128
gh_patches_debug_11668
rasdani/github-patches
git_diff
boto__boto-1776
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- attrs parameter to Item.__init__ and table.new_item don't work as documented In dynamodb/table.py, `new_item` is documented to take an `attrs` parameter which will be used to populate the new item. However, this doesn't work: ` In [3]: item = table.new_item('key', attrs={"a":"test"}) In [4]: item.save() Out[4]: {'ConsumedCapacityUnits': 1.0} In [5]: read = table.get_item('key') In [6]: print read {'key': 'key'} ` The bug is in item.py in `__init__` - `self._updates` is `None` until after the `attrs` dictionary is copied - but `__setitem__` doesn't update the `_updates` dictionary if it is None. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `boto/dynamodb/item.py` Content: ``` 1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ 2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 # copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, dis- 8 # tribute, sublicense, and/or sell copies of the Software, and to permit 9 # persons to whom the Software is furnished to do so, subject to the fol- 10 # lowing conditions: 11 # 12 # The above copyright notice and this permission notice shall be included 13 # in all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 # IN THE SOFTWARE. 22 # 23 24 from boto.dynamodb.exceptions import DynamoDBItemError 25 26 27 class Item(dict): 28 """ 29 An item in Amazon DynamoDB. 30 31 :ivar hash_key: The HashKey of this item. 32 :ivar range_key: The RangeKey of this item or None if no RangeKey 33 is defined. 34 :ivar hash_key_name: The name of the HashKey associated with this item. 35 :ivar range_key_name: The name of the RangeKey associated with this item. 36 :ivar table: The Table this item belongs to. 37 """ 38 39 def __init__(self, table, hash_key=None, range_key=None, attrs=None): 40 self.table = table 41 self._updates = None 42 self._hash_key_name = self.table.schema.hash_key_name 43 self._range_key_name = self.table.schema.range_key_name 44 if attrs == None: 45 attrs = {} 46 if hash_key == None: 47 hash_key = attrs.get(self._hash_key_name, None) 48 self[self._hash_key_name] = hash_key 49 if self._range_key_name: 50 if range_key == None: 51 range_key = attrs.get(self._range_key_name, None) 52 self[self._range_key_name] = range_key 53 for key, value in attrs.items(): 54 if key != self._hash_key_name and key != self._range_key_name: 55 self[key] = value 56 self.consumed_units = 0 57 self._updates = {} 58 59 @property 60 def hash_key(self): 61 return self[self._hash_key_name] 62 63 @property 64 def range_key(self): 65 return self.get(self._range_key_name) 66 67 @property 68 def hash_key_name(self): 69 return self._hash_key_name 70 71 @property 72 def range_key_name(self): 73 return self._range_key_name 74 75 def add_attribute(self, attr_name, attr_value): 76 """ 77 Queue the addition of an attribute to an item in DynamoDB. 78 This will eventually result in an UpdateItem request being issued 79 with an update action of ADD when the save method is called. 80 81 :type attr_name: str 82 :param attr_name: Name of the attribute you want to alter. 83 84 :type attr_value: int|long|float|set 85 :param attr_value: Value which is to be added to the attribute. 86 """ 87 self._updates[attr_name] = ("ADD", attr_value) 88 89 def delete_attribute(self, attr_name, attr_value=None): 90 """ 91 Queue the deletion of an attribute from an item in DynamoDB. 92 This call will result in a UpdateItem request being issued 93 with update action of DELETE when the save method is called. 94 95 :type attr_name: str 96 :param attr_name: Name of the attribute you want to alter. 97 98 :type attr_value: set 99 :param attr_value: A set of values to be removed from the attribute. 100 This parameter is optional. If None, the whole attribute is 101 removed from the item. 102 """ 103 self._updates[attr_name] = ("DELETE", attr_value) 104 105 def put_attribute(self, attr_name, attr_value): 106 """ 107 Queue the putting of an attribute to an item in DynamoDB. 108 This call will result in an UpdateItem request being issued 109 with the update action of PUT when the save method is called. 110 111 :type attr_name: str 112 :param attr_name: Name of the attribute you want to alter. 113 114 :type attr_value: int|long|float|str|set 115 :param attr_value: New value of the attribute. 116 """ 117 self._updates[attr_name] = ("PUT", attr_value) 118 119 def save(self, expected_value=None, return_values=None): 120 """ 121 Commits pending updates to Amazon DynamoDB. 122 123 :type expected_value: dict 124 :param expected_value: A dictionary of name/value pairs that 125 you expect. This dictionary should have name/value pairs 126 where the name is the name of the attribute and the value is 127 either the value you are expecting or False if you expect 128 the attribute not to exist. 129 130 :type return_values: str 131 :param return_values: Controls the return of attribute name/value pairs 132 before they were updated. Possible values are: None, 'ALL_OLD', 133 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is 134 specified and the item is overwritten, the content of the old item 135 is returned. If 'ALL_NEW' is specified, then all the attributes of 136 the new version of the item are returned. If 'UPDATED_NEW' is 137 specified, the new versions of only the updated attributes are 138 returned. 139 """ 140 return self.table.layer2.update_item(self, expected_value, 141 return_values) 142 143 def delete(self, expected_value=None, return_values=None): 144 """ 145 Delete the item from DynamoDB. 146 147 :type expected_value: dict 148 :param expected_value: A dictionary of name/value pairs that 149 you expect. This dictionary should have name/value pairs 150 where the name is the name of the attribute and the value 151 is either the value you are expecting or False if you expect 152 the attribute not to exist. 153 154 :type return_values: str 155 :param return_values: Controls the return of attribute 156 name-value pairs before then were changed. Possible 157 values are: None or 'ALL_OLD'. If 'ALL_OLD' is 158 specified and the item is overwritten, the content 159 of the old item is returned. 160 """ 161 return self.table.layer2.delete_item(self, expected_value, 162 return_values) 163 164 def put(self, expected_value=None, return_values=None): 165 """ 166 Store a new item or completely replace an existing item 167 in Amazon DynamoDB. 168 169 :type expected_value: dict 170 :param expected_value: A dictionary of name/value pairs that 171 you expect. This dictionary should have name/value pairs 172 where the name is the name of the attribute and the value 173 is either the value you are expecting or False if you expect 174 the attribute not to exist. 175 176 :type return_values: str 177 :param return_values: Controls the return of attribute 178 name-value pairs before then were changed. Possible 179 values are: None or 'ALL_OLD'. If 'ALL_OLD' is 180 specified and the item is overwritten, the content 181 of the old item is returned. 182 """ 183 return self.table.layer2.put_item(self, expected_value, return_values) 184 185 def __setitem__(self, key, value): 186 """Overrwrite the setter to instead update the _updates 187 method so this can act like a normal dict""" 188 if self._updates is not None: 189 self.put_attribute(key, value) 190 dict.__setitem__(self, key, value) 191 192 def __delitem__(self, key): 193 """Remove this key from the items""" 194 if self._updates is not None: 195 self.delete_attribute(key) 196 dict.__delitem__(self, key) 197 198 # Allow this item to still be pickled 199 def __getstate__(self): 200 return self.__dict__ 201 def __setstate__(self, d): 202 self.__dict__.update(d) 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py --- a/boto/dynamodb/item.py +++ b/boto/dynamodb/item.py @@ -50,11 +50,11 @@ if range_key == None: range_key = attrs.get(self._range_key_name, None) self[self._range_key_name] = range_key + self._updates = {} for key, value in attrs.items(): if key != self._hash_key_name and key != self._range_key_name: self[key] = value self.consumed_units = 0 - self._updates = {} @property def hash_key(self):
{"golden_diff": "diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py\n--- a/boto/dynamodb/item.py\n+++ b/boto/dynamodb/item.py\n@@ -50,11 +50,11 @@\n if range_key == None:\n range_key = attrs.get(self._range_key_name, None)\n self[self._range_key_name] = range_key\n+ self._updates = {}\n for key, value in attrs.items():\n if key != self._hash_key_name and key != self._range_key_name:\n self[key] = value\n self.consumed_units = 0\n- self._updates = {}\n \n @property\n def hash_key(self):\n", "issue": "attrs parameter to Item.__init__ and table.new_item don't work as documented\nIn dynamodb/table.py, `new_item` is documented to take an `attrs` parameter which will be used to populate the new item.\n\nHowever, this doesn't work:\n`\n In [3]: item = table.new_item('key', attrs={\"a\":\"test\"})\n In [4]: item.save()\n Out[4]: {'ConsumedCapacityUnits': 1.0}\n In [5]: read = table.get_item('key')\n In [6]: print read\n {'key': 'key'}\n`\n\nThe bug is in item.py in `__init__` - `self._updates` is `None` until after the `attrs` dictionary is copied - but `__setitem__` doesn't update the `_updates` dictionary if it is None.\n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nfrom boto.dynamodb.exceptions import DynamoDBItemError\n\n\nclass Item(dict):\n \"\"\"\n An item in Amazon DynamoDB.\n\n :ivar hash_key: The HashKey of this item.\n :ivar range_key: The RangeKey of this item or None if no RangeKey\n is defined.\n :ivar hash_key_name: The name of the HashKey associated with this item.\n :ivar range_key_name: The name of the RangeKey associated with this item.\n :ivar table: The Table this item belongs to.\n \"\"\"\n \n def __init__(self, table, hash_key=None, range_key=None, attrs=None):\n self.table = table\n self._updates = None\n self._hash_key_name = self.table.schema.hash_key_name\n self._range_key_name = self.table.schema.range_key_name\n if attrs == None:\n attrs = {}\n if hash_key == None:\n hash_key = attrs.get(self._hash_key_name, None)\n self[self._hash_key_name] = hash_key\n if self._range_key_name:\n if range_key == None:\n range_key = attrs.get(self._range_key_name, None)\n self[self._range_key_name] = range_key\n for key, value in attrs.items():\n if key != self._hash_key_name and key != self._range_key_name:\n self[key] = value\n self.consumed_units = 0\n self._updates = {}\n\n @property\n def hash_key(self):\n return self[self._hash_key_name]\n\n @property\n def range_key(self):\n return self.get(self._range_key_name)\n\n @property\n def hash_key_name(self):\n return self._hash_key_name\n\n @property\n def range_key_name(self):\n return self._range_key_name\n\n def add_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the addition of an attribute to an item in DynamoDB.\n This will eventually result in an UpdateItem request being issued\n with an update action of ADD when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|set\n :param attr_value: Value which is to be added to the attribute.\n \"\"\"\n self._updates[attr_name] = (\"ADD\", attr_value)\n\n def delete_attribute(self, attr_name, attr_value=None):\n \"\"\"\n Queue the deletion of an attribute from an item in DynamoDB.\n This call will result in a UpdateItem request being issued\n with update action of DELETE when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: set\n :param attr_value: A set of values to be removed from the attribute.\n This parameter is optional. If None, the whole attribute is\n removed from the item.\n \"\"\"\n self._updates[attr_name] = (\"DELETE\", attr_value)\n\n def put_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the putting of an attribute to an item in DynamoDB.\n This call will result in an UpdateItem request being issued\n with the update action of PUT when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|str|set\n :param attr_value: New value of the attribute.\n \"\"\"\n self._updates[attr_name] = (\"PUT\", attr_value)\n\n def save(self, expected_value=None, return_values=None):\n \"\"\"\n Commits pending updates to Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value is\n either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute name/value pairs\n before they were updated. Possible values are: None, 'ALL_OLD',\n 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is\n specified and the item is overwritten, the content of the old item\n is returned. If 'ALL_NEW' is specified, then all the attributes of\n the new version of the item are returned. If 'UPDATED_NEW' is\n specified, the new versions of only the updated attributes are\n returned.\n \"\"\"\n return self.table.layer2.update_item(self, expected_value,\n return_values)\n\n def delete(self, expected_value=None, return_values=None):\n \"\"\"\n Delete the item from DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.delete_item(self, expected_value,\n return_values)\n\n def put(self, expected_value=None, return_values=None):\n \"\"\"\n Store a new item or completely replace an existing item\n in Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.put_item(self, expected_value, return_values)\n\n def __setitem__(self, key, value):\n \"\"\"Overrwrite the setter to instead update the _updates\n method so this can act like a normal dict\"\"\"\n if self._updates is not None:\n self.put_attribute(key, value)\n dict.__setitem__(self, key, value)\n\n def __delitem__(self, key):\n \"\"\"Remove this key from the items\"\"\"\n if self._updates is not None:\n self.delete_attribute(key)\n dict.__delitem__(self, key)\n\n # Allow this item to still be pickled\n def __getstate__(self):\n return self.__dict__\n def __setstate__(self, d):\n self.__dict__.update(d)\n", "path": "boto/dynamodb/item.py"}], "after_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\nfrom boto.dynamodb.exceptions import DynamoDBItemError\n\n\nclass Item(dict):\n \"\"\"\n An item in Amazon DynamoDB.\n\n :ivar hash_key: The HashKey of this item.\n :ivar range_key: The RangeKey of this item or None if no RangeKey\n is defined.\n :ivar hash_key_name: The name of the HashKey associated with this item.\n :ivar range_key_name: The name of the RangeKey associated with this item.\n :ivar table: The Table this item belongs to.\n \"\"\"\n \n def __init__(self, table, hash_key=None, range_key=None, attrs=None):\n self.table = table\n self._updates = None\n self._hash_key_name = self.table.schema.hash_key_name\n self._range_key_name = self.table.schema.range_key_name\n if attrs == None:\n attrs = {}\n if hash_key == None:\n hash_key = attrs.get(self._hash_key_name, None)\n self[self._hash_key_name] = hash_key\n if self._range_key_name:\n if range_key == None:\n range_key = attrs.get(self._range_key_name, None)\n self[self._range_key_name] = range_key\n self._updates = {}\n for key, value in attrs.items():\n if key != self._hash_key_name and key != self._range_key_name:\n self[key] = value\n self.consumed_units = 0\n\n @property\n def hash_key(self):\n return self[self._hash_key_name]\n\n @property\n def range_key(self):\n return self.get(self._range_key_name)\n\n @property\n def hash_key_name(self):\n return self._hash_key_name\n\n @property\n def range_key_name(self):\n return self._range_key_name\n\n def add_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the addition of an attribute to an item in DynamoDB.\n This will eventually result in an UpdateItem request being issued\n with an update action of ADD when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|set\n :param attr_value: Value which is to be added to the attribute.\n \"\"\"\n self._updates[attr_name] = (\"ADD\", attr_value)\n\n def delete_attribute(self, attr_name, attr_value=None):\n \"\"\"\n Queue the deletion of an attribute from an item in DynamoDB.\n This call will result in a UpdateItem request being issued\n with update action of DELETE when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: set\n :param attr_value: A set of values to be removed from the attribute.\n This parameter is optional. If None, the whole attribute is\n removed from the item.\n \"\"\"\n self._updates[attr_name] = (\"DELETE\", attr_value)\n\n def put_attribute(self, attr_name, attr_value):\n \"\"\"\n Queue the putting of an attribute to an item in DynamoDB.\n This call will result in an UpdateItem request being issued\n with the update action of PUT when the save method is called.\n\n :type attr_name: str\n :param attr_name: Name of the attribute you want to alter.\n\n :type attr_value: int|long|float|str|set\n :param attr_value: New value of the attribute.\n \"\"\"\n self._updates[attr_name] = (\"PUT\", attr_value)\n\n def save(self, expected_value=None, return_values=None):\n \"\"\"\n Commits pending updates to Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value is\n either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute name/value pairs\n before they were updated. Possible values are: None, 'ALL_OLD',\n 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is\n specified and the item is overwritten, the content of the old item\n is returned. If 'ALL_NEW' is specified, then all the attributes of\n the new version of the item are returned. If 'UPDATED_NEW' is\n specified, the new versions of only the updated attributes are\n returned.\n \"\"\"\n return self.table.layer2.update_item(self, expected_value,\n return_values)\n\n def delete(self, expected_value=None, return_values=None):\n \"\"\"\n Delete the item from DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.delete_item(self, expected_value,\n return_values)\n\n def put(self, expected_value=None, return_values=None):\n \"\"\"\n Store a new item or completely replace an existing item\n in Amazon DynamoDB.\n\n :type expected_value: dict\n :param expected_value: A dictionary of name/value pairs that\n you expect. This dictionary should have name/value pairs\n where the name is the name of the attribute and the value\n is either the value you are expecting or False if you expect\n the attribute not to exist.\n\n :type return_values: str\n :param return_values: Controls the return of attribute\n name-value pairs before then were changed. Possible\n values are: None or 'ALL_OLD'. If 'ALL_OLD' is\n specified and the item is overwritten, the content\n of the old item is returned.\n \"\"\"\n return self.table.layer2.put_item(self, expected_value, return_values)\n\n def __setitem__(self, key, value):\n \"\"\"Overrwrite the setter to instead update the _updates\n method so this can act like a normal dict\"\"\"\n if self._updates is not None:\n self.put_attribute(key, value)\n dict.__setitem__(self, key, value)\n\n def __delitem__(self, key):\n \"\"\"Remove this key from the items\"\"\"\n if self._updates is not None:\n self.delete_attribute(key)\n dict.__delitem__(self, key)\n\n # Allow this item to still be pickled\n def __getstate__(self):\n return self.__dict__\n def __setstate__(self, d):\n self.__dict__.update(d)\n", "path": "boto/dynamodb/item.py"}]}
2,771
152
gh_patches_debug_21603
rasdani/github-patches
git_diff
deepchecks__deepchecks-1190
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG][CV] Model Error Analysis - the exception for task OTHER is different than other checks **Describe the bug** When running a OTHER task, the Model Error Analysis exception is different from the rest of the checks. Need to standardize it. **To Reproduce** Run a "OTHER" task in the model performance suite. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deepchecks/vision/checks/performance/model_error_analysis.py` Content: ``` 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """Module containing class performance check.""" 12 import typing as t 13 from collections import defaultdict 14 15 import pandas as pd 16 import torch 17 18 from deepchecks.core import CheckResult, DatasetKind 19 from deepchecks.core.errors import DeepchecksValueError 20 from deepchecks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution 21 from deepchecks.utils.single_sample_metrics import per_sample_cross_entropy 22 from deepchecks.vision.utils.image_properties import default_image_properties, validate_properties 23 from deepchecks.vision import TrainTestCheck, Context, Batch 24 from deepchecks.vision.vision_data import TaskType 25 from deepchecks.vision.metrics_utils.iou_utils import per_sample_mean_iou 26 27 __all__ = ['ModelErrorAnalysis'] 28 29 30 class ModelErrorAnalysis(TrainTestCheck): 31 """Find the properties that best split the data into segments of high and low model error. 32 33 The check trains a regression model to predict the error of the user's model. Then, the properties scoring the 34 highest feature importance for the error regression model are selected and the distribution of the error vs the 35 property values is plotted. The check results are shown only if the error regression model manages to predict the 36 error well enough. 37 38 Parameters 39 ---------- 40 image_properties : List[Dict[str, Any]], default: None 41 List of properties. Replaces the default deepchecks properties. 42 Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str), 43 representing attributes of said method. 'output_type' must be one of 'continuous'/'discrete' 44 max_properties_to_show : int , default: 3 45 maximal number of properties to show error distribution for. 46 min_property_contribution : float , default: 0.15 47 minimum feature importance of a property to the error regression model 48 in order to show the property. 49 min_error_model_score : float , default: 0.5 50 minimum r^2 score of the error regression model for displaying the check. 51 min_segment_size : float , default: 0.05 52 minimal fraction of data that can comprise a weak segment. 53 n_display_samples : int , default: 5_000 54 number of samples to display in scatter plot. 55 random_state : int, default: 42 56 random seed for all check internals. 57 """ 58 59 def __init__(self, 60 image_properties: t.List[t.Dict[str, t.Any]] = None, 61 max_properties_to_show: int = 20, 62 min_property_contribution: float = 0.15, 63 min_error_model_score: float = 0.5, 64 min_segment_size: float = 0.05, 65 n_display_samples: int = 5_000, 66 random_state: int = 42, 67 **kwargs): 68 super().__init__(**kwargs) 69 self.random_state = random_state 70 self.min_error_model_score = min_error_model_score 71 self.min_segment_size = min_segment_size 72 self.max_properties_to_show = max_properties_to_show 73 self.min_property_contribution = min_property_contribution 74 self.n_display_samples = n_display_samples 75 self._train_properties = None 76 self._test_properties = None 77 self._train_scores = None 78 self._test_scores = None 79 80 if image_properties is None: 81 self.image_properties = default_image_properties 82 else: 83 validate_properties(image_properties) 84 self.image_properties = image_properties 85 86 def initialize_run(self, context: Context): 87 """Initialize property and score lists.""" 88 self._train_properties = defaultdict(list) 89 self._test_properties = defaultdict(list) 90 self._train_scores = [] 91 self._test_scores = [] 92 93 def update(self, context: Context, batch: Batch, dataset_kind): 94 """Accumulate property data of images and scores.""" 95 if dataset_kind == DatasetKind.TRAIN: 96 dataset = context.train 97 properties = self._train_properties 98 scores = self._train_scores 99 elif dataset_kind == DatasetKind.TEST: 100 dataset = context.test 101 properties = self._test_properties 102 scores = self._test_scores 103 else: 104 raise RuntimeError( 105 'Internal Error! Part of code that must ' 106 'be unreacheable was reached.' 107 ) 108 109 images = batch.images 110 predictions = batch.predictions 111 labels = batch.labels 112 113 for single_property in self.image_properties: 114 properties[single_property['name']].extend(single_property['method'](images)) 115 116 if dataset.task_type == TaskType.CLASSIFICATION: 117 def scoring_func(predictions, labels): 118 return per_sample_cross_entropy(labels, predictions) 119 elif dataset.task_type == TaskType.OBJECT_DETECTION: 120 def scoring_func(predictions, labels): 121 return per_sample_mean_iou(predictions, labels) 122 else: 123 raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}') 124 125 if isinstance(predictions, torch.Tensor): 126 predictions = predictions.cpu().detach().numpy() 127 if isinstance(labels, torch.Tensor): 128 labels = labels.cpu().detach().numpy() 129 130 # get score using scoring_function 131 scores.extend(scoring_func(predictions, labels)) 132 133 def compute(self, context: Context) -> CheckResult: 134 """Find segments that contribute to model error. 135 136 Returns 137 ------- 138 CheckResult: 139 value: dictionary of details for each property segment that split the effect on the error of the model 140 display: plots of results 141 """ 142 # build dataframe of properties and scores 143 train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all') 144 test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns] 145 146 error_fi, error_model_predicted = \ 147 model_error_contribution(train_property_df, 148 self._train_scores, 149 test_property_df, 150 self._test_scores, 151 train_property_df.columns.to_list(), 152 [], 153 min_error_model_score=self.min_error_model_score, 154 random_state=self.random_state) 155 156 display, value = error_model_display_dataframe(error_fi, 157 error_model_predicted, 158 test_property_df, 159 [], 160 self.max_properties_to_show, 161 self.min_property_contribution, 162 self.n_display_samples, 163 self.min_segment_size, 164 self.random_state) 165 166 headnote = """<span> 167 The following graphs show the distribution of error for top properties that are most useful for 168 distinguishing high error samples from low error samples. 169 </span>""" 170 display = [headnote] + display if display else None 171 172 return CheckResult(value, display=display) 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deepchecks/vision/checks/performance/model_error_analysis.py b/deepchecks/vision/checks/performance/model_error_analysis.py --- a/deepchecks/vision/checks/performance/model_error_analysis.py +++ b/deepchecks/vision/checks/performance/model_error_analysis.py @@ -85,6 +85,7 @@ def initialize_run(self, context: Context): """Initialize property and score lists.""" + context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION) self._train_properties = defaultdict(list) self._test_properties = defaultdict(list) self._train_scores = [] @@ -120,7 +121,7 @@ def scoring_func(predictions, labels): return per_sample_mean_iou(predictions, labels) else: - raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}') + raise DeepchecksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}') if isinstance(predictions, torch.Tensor): predictions = predictions.cpu().detach().numpy()
{"golden_diff": "diff --git a/deepchecks/vision/checks/performance/model_error_analysis.py b/deepchecks/vision/checks/performance/model_error_analysis.py\n--- a/deepchecks/vision/checks/performance/model_error_analysis.py\n+++ b/deepchecks/vision/checks/performance/model_error_analysis.py\n@@ -85,6 +85,7 @@\n \n def initialize_run(self, context: Context):\n \"\"\"Initialize property and score lists.\"\"\"\n+ context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n self._train_properties = defaultdict(list)\n self._test_properties = defaultdict(list)\n self._train_scores = []\n@@ -120,7 +121,7 @@\n def scoring_func(predictions, labels):\n return per_sample_mean_iou(predictions, labels)\n else:\n- raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')\n+ raise DeepchecksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}')\n \n if isinstance(predictions, torch.Tensor):\n predictions = predictions.cpu().detach().numpy()\n", "issue": "[BUG][CV] Model Error Analysis - the exception for task OTHER is different than other checks\n**Describe the bug**\r\nWhen running a OTHER task, the Model Error Analysis exception is different from the rest of the checks. Need to standardize it.\r\n\r\n**To Reproduce**\r\nRun a \"OTHER\" task in the model performance suite. \n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing class performance check.\"\"\"\nimport typing as t\nfrom collections import defaultdict\n\nimport pandas as pd\nimport torch\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.core.errors import DeepchecksValueError\nfrom deepchecks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution\nfrom deepchecks.utils.single_sample_metrics import per_sample_cross_entropy\nfrom deepchecks.vision.utils.image_properties import default_image_properties, validate_properties\nfrom deepchecks.vision import TrainTestCheck, Context, Batch\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import per_sample_mean_iou\n\n__all__ = ['ModelErrorAnalysis']\n\n\nclass ModelErrorAnalysis(TrainTestCheck):\n \"\"\"Find the properties that best split the data into segments of high and low model error.\n\n The check trains a regression model to predict the error of the user's model. Then, the properties scoring the\n highest feature importance for the error regression model are selected and the distribution of the error vs the\n property values is plotted. The check results are shown only if the error regression model manages to predict the\n error well enough.\n\n Parameters\n ----------\n image_properties : List[Dict[str, Any]], default: None\n List of properties. Replaces the default deepchecks properties.\n Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),\n representing attributes of said method. 'output_type' must be one of 'continuous'/'discrete'\n max_properties_to_show : int , default: 3\n maximal number of properties to show error distribution for.\n min_property_contribution : float , default: 0.15\n minimum feature importance of a property to the error regression model\n in order to show the property.\n min_error_model_score : float , default: 0.5\n minimum r^2 score of the error regression model for displaying the check.\n min_segment_size : float , default: 0.05\n minimal fraction of data that can comprise a weak segment.\n n_display_samples : int , default: 5_000\n number of samples to display in scatter plot.\n random_state : int, default: 42\n random seed for all check internals.\n \"\"\"\n\n def __init__(self,\n image_properties: t.List[t.Dict[str, t.Any]] = None,\n max_properties_to_show: int = 20,\n min_property_contribution: float = 0.15,\n min_error_model_score: float = 0.5,\n min_segment_size: float = 0.05,\n n_display_samples: int = 5_000,\n random_state: int = 42,\n **kwargs):\n super().__init__(**kwargs)\n self.random_state = random_state\n self.min_error_model_score = min_error_model_score\n self.min_segment_size = min_segment_size\n self.max_properties_to_show = max_properties_to_show\n self.min_property_contribution = min_property_contribution\n self.n_display_samples = n_display_samples\n self._train_properties = None\n self._test_properties = None\n self._train_scores = None\n self._test_scores = None\n\n if image_properties is None:\n self.image_properties = default_image_properties\n else:\n validate_properties(image_properties)\n self.image_properties = image_properties\n\n def initialize_run(self, context: Context):\n \"\"\"Initialize property and score lists.\"\"\"\n self._train_properties = defaultdict(list)\n self._test_properties = defaultdict(list)\n self._train_scores = []\n self._test_scores = []\n\n def update(self, context: Context, batch: Batch, dataset_kind):\n \"\"\"Accumulate property data of images and scores.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n properties = self._train_properties\n scores = self._train_scores\n elif dataset_kind == DatasetKind.TEST:\n dataset = context.test\n properties = self._test_properties\n scores = self._test_scores\n else:\n raise RuntimeError(\n 'Internal Error! Part of code that must '\n 'be unreacheable was reached.'\n )\n\n images = batch.images\n predictions = batch.predictions\n labels = batch.labels\n\n for single_property in self.image_properties:\n properties[single_property['name']].extend(single_property['method'](images))\n\n if dataset.task_type == TaskType.CLASSIFICATION:\n def scoring_func(predictions, labels):\n return per_sample_cross_entropy(labels, predictions)\n elif dataset.task_type == TaskType.OBJECT_DETECTION:\n def scoring_func(predictions, labels):\n return per_sample_mean_iou(predictions, labels)\n else:\n raise DeepchecksValueError(f'Unsupported task type {dataset.task_type}')\n\n if isinstance(predictions, torch.Tensor):\n predictions = predictions.cpu().detach().numpy()\n if isinstance(labels, torch.Tensor):\n labels = labels.cpu().detach().numpy()\n\n # get score using scoring_function\n scores.extend(scoring_func(predictions, labels))\n\n def compute(self, context: Context) -> CheckResult:\n \"\"\"Find segments that contribute to model error.\n\n Returns\n -------\n CheckResult:\n value: dictionary of details for each property segment that split the effect on the error of the model\n display: plots of results\n \"\"\"\n # build dataframe of properties and scores\n train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all')\n test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns]\n\n error_fi, error_model_predicted = \\\n model_error_contribution(train_property_df,\n self._train_scores,\n test_property_df,\n self._test_scores,\n train_property_df.columns.to_list(),\n [],\n min_error_model_score=self.min_error_model_score,\n random_state=self.random_state)\n\n display, value = error_model_display_dataframe(error_fi,\n error_model_predicted,\n test_property_df,\n [],\n self.max_properties_to_show,\n self.min_property_contribution,\n self.n_display_samples,\n self.min_segment_size,\n self.random_state)\n\n headnote = \"\"\"<span>\n The following graphs show the distribution of error for top properties that are most useful for\n distinguishing high error samples from low error samples.\n </span>\"\"\"\n display = [headnote] + display if display else None\n\n return CheckResult(value, display=display)\n", "path": "deepchecks/vision/checks/performance/model_error_analysis.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing class performance check.\"\"\"\nimport typing as t\nfrom collections import defaultdict\n\nimport pandas as pd\nimport torch\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.core.errors import DeepchecksValueError\nfrom deepchecks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution\nfrom deepchecks.utils.single_sample_metrics import per_sample_cross_entropy\nfrom deepchecks.vision.utils.image_properties import default_image_properties, validate_properties\nfrom deepchecks.vision import TrainTestCheck, Context, Batch\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import per_sample_mean_iou\n\n__all__ = ['ModelErrorAnalysis']\n\n\nclass ModelErrorAnalysis(TrainTestCheck):\n \"\"\"Find the properties that best split the data into segments of high and low model error.\n\n The check trains a regression model to predict the error of the user's model. Then, the properties scoring the\n highest feature importance for the error regression model are selected and the distribution of the error vs the\n property values is plotted. The check results are shown only if the error regression model manages to predict the\n error well enough.\n\n Parameters\n ----------\n image_properties : List[Dict[str, Any]], default: None\n List of properties. Replaces the default deepchecks properties.\n Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),\n representing attributes of said method. 'output_type' must be one of 'continuous'/'discrete'\n max_properties_to_show : int , default: 3\n maximal number of properties to show error distribution for.\n min_property_contribution : float , default: 0.15\n minimum feature importance of a property to the error regression model\n in order to show the property.\n min_error_model_score : float , default: 0.5\n minimum r^2 score of the error regression model for displaying the check.\n min_segment_size : float , default: 0.05\n minimal fraction of data that can comprise a weak segment.\n n_display_samples : int , default: 5_000\n number of samples to display in scatter plot.\n random_state : int, default: 42\n random seed for all check internals.\n \"\"\"\n\n def __init__(self,\n image_properties: t.List[t.Dict[str, t.Any]] = None,\n max_properties_to_show: int = 20,\n min_property_contribution: float = 0.15,\n min_error_model_score: float = 0.5,\n min_segment_size: float = 0.05,\n n_display_samples: int = 5_000,\n random_state: int = 42,\n **kwargs):\n super().__init__(**kwargs)\n self.random_state = random_state\n self.min_error_model_score = min_error_model_score\n self.min_segment_size = min_segment_size\n self.max_properties_to_show = max_properties_to_show\n self.min_property_contribution = min_property_contribution\n self.n_display_samples = n_display_samples\n self._train_properties = None\n self._test_properties = None\n self._train_scores = None\n self._test_scores = None\n\n if image_properties is None:\n self.image_properties = default_image_properties\n else:\n validate_properties(image_properties)\n self.image_properties = image_properties\n\n def initialize_run(self, context: Context):\n \"\"\"Initialize property and score lists.\"\"\"\n context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n self._train_properties = defaultdict(list)\n self._test_properties = defaultdict(list)\n self._train_scores = []\n self._test_scores = []\n\n def update(self, context: Context, batch: Batch, dataset_kind):\n \"\"\"Accumulate property data of images and scores.\"\"\"\n if dataset_kind == DatasetKind.TRAIN:\n dataset = context.train\n properties = self._train_properties\n scores = self._train_scores\n elif dataset_kind == DatasetKind.TEST:\n dataset = context.test\n properties = self._test_properties\n scores = self._test_scores\n else:\n raise RuntimeError(\n 'Internal Error! Part of code that must '\n 'be unreacheable was reached.'\n )\n\n images = batch.images\n predictions = batch.predictions\n labels = batch.labels\n\n for single_property in self.image_properties:\n properties[single_property['name']].extend(single_property['method'](images))\n\n if dataset.task_type == TaskType.CLASSIFICATION:\n def scoring_func(predictions, labels):\n return per_sample_cross_entropy(labels, predictions)\n elif dataset.task_type == TaskType.OBJECT_DETECTION:\n def scoring_func(predictions, labels):\n return per_sample_mean_iou(predictions, labels)\n else:\n raise DeepchecksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}')\n\n if isinstance(predictions, torch.Tensor):\n predictions = predictions.cpu().detach().numpy()\n if isinstance(labels, torch.Tensor):\n labels = labels.cpu().detach().numpy()\n\n # get score using scoring_function\n scores.extend(scoring_func(predictions, labels))\n\n def compute(self, context: Context) -> CheckResult:\n \"\"\"Find segments that contribute to model error.\n\n Returns\n -------\n CheckResult:\n value: dictionary of details for each property segment that split the effect on the error of the model\n display: plots of results\n \"\"\"\n # build dataframe of properties and scores\n train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all')\n test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns]\n\n error_fi, error_model_predicted = \\\n model_error_contribution(train_property_df,\n self._train_scores,\n test_property_df,\n self._test_scores,\n train_property_df.columns.to_list(),\n [],\n min_error_model_score=self.min_error_model_score,\n random_state=self.random_state)\n\n display, value = error_model_display_dataframe(error_fi,\n error_model_predicted,\n test_property_df,\n [],\n self.max_properties_to_show,\n self.min_property_contribution,\n self.n_display_samples,\n self.min_segment_size,\n self.random_state)\n\n headnote = \"\"\"<span>\n The following graphs show the distribution of error for top properties that are most useful for\n distinguishing high error samples from low error samples.\n </span>\"\"\"\n display = [headnote] + display if display else None\n\n return CheckResult(value, display=display)\n", "path": "deepchecks/vision/checks/performance/model_error_analysis.py"}]}
2,242
233
gh_patches_debug_11212
rasdani/github-patches
git_diff
conda__conda-build-2420
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- noarch: python packages strip data_files in setup.py, `data_files` can be used to deliver support files, especially to `$PREFIX/share`. `noarch: python` [doesn't know how to handle these](https://github.com/conda/conda-build/blob/3.0.23/conda_build/noarch_python.py#L90) and ends up omitting them. It seems like 'unknown' files in the prefix should be included as-is relative to $PREFIX (exactly as in a non-noarch package) rather than excluded. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_build/noarch_python.py` Content: ``` 1 import io 2 import json 3 import locale 4 import logging 5 import os 6 from os.path import basename, dirname, isdir, join, isfile 7 import shutil 8 import sys 9 10 ISWIN = sys.platform.startswith('win') 11 12 13 def _force_dir(dirname): 14 if not isdir(dirname): 15 os.makedirs(dirname) 16 17 18 def _error_exit(exit_message): 19 sys.exit("[noarch_python] %s" % exit_message) 20 21 22 def rewrite_script(fn, prefix): 23 """Take a file from the bin directory and rewrite it into the python-scripts 24 directory with the same permissions after it passes some sanity checks for 25 noarch pacakges""" 26 27 # Load and check the source file for not being a binary 28 src = join(prefix, 'Scripts' if ISWIN else 'bin', fn) 29 with io.open(src, encoding=locale.getpreferredencoding()) as fi: 30 try: 31 data = fi.read() 32 except UnicodeDecodeError: # file is binary 33 _error_exit("Noarch package contains binary script: %s" % fn) 34 src_mode = os.stat(src).st_mode 35 os.unlink(src) 36 37 # Get rid of '-script.py' suffix on Windows 38 if ISWIN and fn.endswith('-script.py'): 39 fn = fn[:-10] 40 41 # Rewrite the file to the python-scripts directory 42 dst_dir = join(prefix, 'python-scripts') 43 _force_dir(dst_dir) 44 dst = join(dst_dir, fn) 45 with open(dst, 'w') as fo: 46 fo.write(data) 47 os.chmod(dst, src_mode) 48 return fn 49 50 51 def handle_file(f, d, prefix): 52 """Process a file for inclusion in a noarch python package. 53 """ 54 path = join(prefix, f) 55 56 # Ignore egg-info and pyc files. 57 if f.endswith(('.egg-info', '.pyc', '.pyo')): 58 os.unlink(path) 59 60 # The presence of .so indicated this is not a noarch package 61 elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')): 62 if f.endswith('.exe') and (isfile(os.path.join(prefix, f[:-4] + '-script.py')) or 63 basename(f[:-4]) in d['python-scripts']): 64 os.unlink(path) # this is an entry point with a matching xx-script.py 65 return 66 _error_exit("Error: Binary library or executable found: %s" % f) 67 68 elif 'site-packages' in f: 69 nsp = join(prefix, 'site-packages') 70 _force_dir(nsp) 71 72 g = f[f.find('site-packages'):] 73 dst = join(prefix, g) 74 dst_dir = dirname(dst) 75 _force_dir(dst_dir) 76 os.rename(path, dst) 77 d['site-packages'].append(g[14:]) 78 79 # Treat scripts specially with the logic from above 80 elif f.startswith(('bin/', 'Scripts')): 81 fn = basename(path) 82 fn = rewrite_script(fn, prefix) 83 d['python-scripts'].append(fn) 84 85 # Include examples in the metadata doc 86 elif f.startswith(('Examples/', 'Examples\\')): 87 d['Examples'].append(f[9:]) 88 else: 89 log = logging.getLogger(__name__) 90 log.warn("Don't know how to handle file: %s. Omitting it from package." % f) 91 os.unlink(path) 92 93 94 def populate_files(m, files, prefix, entry_point_scripts=None): 95 d = {'dist': m.dist(), 96 'site-packages': [], 97 'python-scripts': [], 98 'Examples': []} 99 100 # Populate site-package, python-scripts, and Examples into above 101 for f in files: 102 handle_file(f, d, prefix) 103 104 # Windows path conversion 105 if ISWIN: 106 for fns in (d['site-packages'], d['Examples']): 107 for i, fn in enumerate(fns): 108 fns[i] = fn.replace('\\', '/') 109 110 if entry_point_scripts: 111 for entry_point in entry_point_scripts: 112 src = join(prefix, entry_point) 113 if os.path.isfile(src): 114 os.unlink(src) 115 116 return d 117 118 119 def transform(m, files, prefix): 120 bin_dir = join(prefix, 'bin') 121 _force_dir(bin_dir) 122 123 scripts_dir = join(prefix, 'Scripts') 124 _force_dir(scripts_dir) 125 126 name = m.name() 127 128 # Create *nix prelink script 129 # Note: it's important to use LF newlines or it wont work if we build on Win 130 with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo: 131 fo.write('''\ 132 #!/bin/bash 133 $PREFIX/bin/python $SOURCE_DIR/link.py 134 '''.encode('utf-8')) 135 136 # Create windows prelink script (be nice and use Windows newlines) 137 with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo: 138 fo.write('''\ 139 @echo off 140 "%PREFIX%\\python.exe" "%SOURCE_DIR%\\link.py" 141 '''.replace('\n', '\r\n').encode('utf-8')) 142 143 d = populate_files(m, files, prefix) 144 145 # Find our way to this directory 146 this_dir = dirname(__file__) 147 148 # copy in windows exe shims if there are any python-scripts 149 if d['python-scripts']: 150 for fn in 'cli-32.exe', 'cli-64.exe': 151 shutil.copyfile(join(this_dir, fn), join(prefix, fn)) 152 153 # Read the local _link.py 154 with open(join(this_dir, '_link.py')) as fi: 155 link_code = fi.read() 156 157 # Write the package metadata, and bumper with code for linking 158 with open(join(prefix, 'link.py'), 'w') as fo: 159 fo.write('DATA = ') 160 json.dump(d, fo, indent=2, sort_keys=True) 161 fo.write('\n## END DATA\n\n') 162 fo.write(link_code) 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_build/noarch_python.py b/conda_build/noarch_python.py --- a/conda_build/noarch_python.py +++ b/conda_build/noarch_python.py @@ -85,10 +85,11 @@ # Include examples in the metadata doc elif f.startswith(('Examples/', 'Examples\\')): d['Examples'].append(f[9:]) + # No special treatment for other files + # leave them as-is else: log = logging.getLogger(__name__) - log.warn("Don't know how to handle file: %s. Omitting it from package." % f) - os.unlink(path) + log.debug("Don't know how to handle file: %s. Including it as-is." % f) def populate_files(m, files, prefix, entry_point_scripts=None):
{"golden_diff": "diff --git a/conda_build/noarch_python.py b/conda_build/noarch_python.py\n--- a/conda_build/noarch_python.py\n+++ b/conda_build/noarch_python.py\n@@ -85,10 +85,11 @@\n # Include examples in the metadata doc\n elif f.startswith(('Examples/', 'Examples\\\\')):\n d['Examples'].append(f[9:])\n+ # No special treatment for other files\n+ # leave them as-is\n else:\n log = logging.getLogger(__name__)\n- log.warn(\"Don't know how to handle file: %s. Omitting it from package.\" % f)\n- os.unlink(path)\n+ log.debug(\"Don't know how to handle file: %s. Including it as-is.\" % f)\n \n \n def populate_files(m, files, prefix, entry_point_scripts=None):\n", "issue": "noarch: python packages strip data_files\nin setup.py, `data_files` can be used to deliver support files, especially to `$PREFIX/share`. `noarch: python` [doesn't know how to handle these](https://github.com/conda/conda-build/blob/3.0.23/conda_build/noarch_python.py#L90) and ends up omitting them.\r\n\r\nIt seems like 'unknown' files in the prefix should be included as-is relative to $PREFIX (exactly as in a non-noarch package) rather than excluded.\n", "before_files": [{"content": "import io\nimport json\nimport locale\nimport logging\nimport os\nfrom os.path import basename, dirname, isdir, join, isfile\nimport shutil\nimport sys\n\nISWIN = sys.platform.startswith('win')\n\n\ndef _force_dir(dirname):\n if not isdir(dirname):\n os.makedirs(dirname)\n\n\ndef _error_exit(exit_message):\n sys.exit(\"[noarch_python] %s\" % exit_message)\n\n\ndef rewrite_script(fn, prefix):\n \"\"\"Take a file from the bin directory and rewrite it into the python-scripts\n directory with the same permissions after it passes some sanity checks for\n noarch pacakges\"\"\"\n\n # Load and check the source file for not being a binary\n src = join(prefix, 'Scripts' if ISWIN else 'bin', fn)\n with io.open(src, encoding=locale.getpreferredencoding()) as fi:\n try:\n data = fi.read()\n except UnicodeDecodeError: # file is binary\n _error_exit(\"Noarch package contains binary script: %s\" % fn)\n src_mode = os.stat(src).st_mode\n os.unlink(src)\n\n # Get rid of '-script.py' suffix on Windows\n if ISWIN and fn.endswith('-script.py'):\n fn = fn[:-10]\n\n # Rewrite the file to the python-scripts directory\n dst_dir = join(prefix, 'python-scripts')\n _force_dir(dst_dir)\n dst = join(dst_dir, fn)\n with open(dst, 'w') as fo:\n fo.write(data)\n os.chmod(dst, src_mode)\n return fn\n\n\ndef handle_file(f, d, prefix):\n \"\"\"Process a file for inclusion in a noarch python package.\n \"\"\"\n path = join(prefix, f)\n\n # Ignore egg-info and pyc files.\n if f.endswith(('.egg-info', '.pyc', '.pyo')):\n os.unlink(path)\n\n # The presence of .so indicated this is not a noarch package\n elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')):\n if f.endswith('.exe') and (isfile(os.path.join(prefix, f[:-4] + '-script.py')) or\n basename(f[:-4]) in d['python-scripts']):\n os.unlink(path) # this is an entry point with a matching xx-script.py\n return\n _error_exit(\"Error: Binary library or executable found: %s\" % f)\n\n elif 'site-packages' in f:\n nsp = join(prefix, 'site-packages')\n _force_dir(nsp)\n\n g = f[f.find('site-packages'):]\n dst = join(prefix, g)\n dst_dir = dirname(dst)\n _force_dir(dst_dir)\n os.rename(path, dst)\n d['site-packages'].append(g[14:])\n\n # Treat scripts specially with the logic from above\n elif f.startswith(('bin/', 'Scripts')):\n fn = basename(path)\n fn = rewrite_script(fn, prefix)\n d['python-scripts'].append(fn)\n\n # Include examples in the metadata doc\n elif f.startswith(('Examples/', 'Examples\\\\')):\n d['Examples'].append(f[9:])\n else:\n log = logging.getLogger(__name__)\n log.warn(\"Don't know how to handle file: %s. Omitting it from package.\" % f)\n os.unlink(path)\n\n\ndef populate_files(m, files, prefix, entry_point_scripts=None):\n d = {'dist': m.dist(),\n 'site-packages': [],\n 'python-scripts': [],\n 'Examples': []}\n\n # Populate site-package, python-scripts, and Examples into above\n for f in files:\n handle_file(f, d, prefix)\n\n # Windows path conversion\n if ISWIN:\n for fns in (d['site-packages'], d['Examples']):\n for i, fn in enumerate(fns):\n fns[i] = fn.replace('\\\\', '/')\n\n if entry_point_scripts:\n for entry_point in entry_point_scripts:\n src = join(prefix, entry_point)\n if os.path.isfile(src):\n os.unlink(src)\n\n return d\n\n\ndef transform(m, files, prefix):\n bin_dir = join(prefix, 'bin')\n _force_dir(bin_dir)\n\n scripts_dir = join(prefix, 'Scripts')\n _force_dir(scripts_dir)\n\n name = m.name()\n\n # Create *nix prelink script\n # Note: it's important to use LF newlines or it wont work if we build on Win\n with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo:\n fo.write('''\\\n #!/bin/bash\n $PREFIX/bin/python $SOURCE_DIR/link.py\n '''.encode('utf-8'))\n\n # Create windows prelink script (be nice and use Windows newlines)\n with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo:\n fo.write('''\\\n @echo off\n \"%PREFIX%\\\\python.exe\" \"%SOURCE_DIR%\\\\link.py\"\n '''.replace('\\n', '\\r\\n').encode('utf-8'))\n\n d = populate_files(m, files, prefix)\n\n # Find our way to this directory\n this_dir = dirname(__file__)\n\n # copy in windows exe shims if there are any python-scripts\n if d['python-scripts']:\n for fn in 'cli-32.exe', 'cli-64.exe':\n shutil.copyfile(join(this_dir, fn), join(prefix, fn))\n\n # Read the local _link.py\n with open(join(this_dir, '_link.py')) as fi:\n link_code = fi.read()\n\n # Write the package metadata, and bumper with code for linking\n with open(join(prefix, 'link.py'), 'w') as fo:\n fo.write('DATA = ')\n json.dump(d, fo, indent=2, sort_keys=True)\n fo.write('\\n## END DATA\\n\\n')\n fo.write(link_code)\n", "path": "conda_build/noarch_python.py"}], "after_files": [{"content": "import io\nimport json\nimport locale\nimport logging\nimport os\nfrom os.path import basename, dirname, isdir, join, isfile\nimport shutil\nimport sys\n\nISWIN = sys.platform.startswith('win')\n\n\ndef _force_dir(dirname):\n if not isdir(dirname):\n os.makedirs(dirname)\n\n\ndef _error_exit(exit_message):\n sys.exit(\"[noarch_python] %s\" % exit_message)\n\n\ndef rewrite_script(fn, prefix):\n \"\"\"Take a file from the bin directory and rewrite it into the python-scripts\n directory with the same permissions after it passes some sanity checks for\n noarch pacakges\"\"\"\n\n # Load and check the source file for not being a binary\n src = join(prefix, 'Scripts' if ISWIN else 'bin', fn)\n with io.open(src, encoding=locale.getpreferredencoding()) as fi:\n try:\n data = fi.read()\n except UnicodeDecodeError: # file is binary\n _error_exit(\"Noarch package contains binary script: %s\" % fn)\n src_mode = os.stat(src).st_mode\n os.unlink(src)\n\n # Get rid of '-script.py' suffix on Windows\n if ISWIN and fn.endswith('-script.py'):\n fn = fn[:-10]\n\n # Rewrite the file to the python-scripts directory\n dst_dir = join(prefix, 'python-scripts')\n _force_dir(dst_dir)\n dst = join(dst_dir, fn)\n with open(dst, 'w') as fo:\n fo.write(data)\n os.chmod(dst, src_mode)\n return fn\n\n\ndef handle_file(f, d, prefix):\n \"\"\"Process a file for inclusion in a noarch python package.\n \"\"\"\n path = join(prefix, f)\n\n # Ignore egg-info and pyc files.\n if f.endswith(('.egg-info', '.pyc', '.pyo')):\n os.unlink(path)\n\n # The presence of .so indicated this is not a noarch package\n elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')):\n if f.endswith('.exe') and (isfile(os.path.join(prefix, f[:-4] + '-script.py')) or\n basename(f[:-4]) in d['python-scripts']):\n os.unlink(path) # this is an entry point with a matching xx-script.py\n return\n _error_exit(\"Error: Binary library or executable found: %s\" % f)\n\n elif 'site-packages' in f:\n nsp = join(prefix, 'site-packages')\n _force_dir(nsp)\n\n g = f[f.find('site-packages'):]\n dst = join(prefix, g)\n dst_dir = dirname(dst)\n _force_dir(dst_dir)\n os.rename(path, dst)\n d['site-packages'].append(g[14:])\n\n # Treat scripts specially with the logic from above\n elif f.startswith(('bin/', 'Scripts')):\n fn = basename(path)\n fn = rewrite_script(fn, prefix)\n d['python-scripts'].append(fn)\n\n # Include examples in the metadata doc\n elif f.startswith(('Examples/', 'Examples\\\\')):\n d['Examples'].append(f[9:])\n # No special treatment for other files\n # leave them as-is\n else:\n log = logging.getLogger(__name__)\n log.debug(\"Don't know how to handle file: %s. Including it as-is.\" % f)\n\n\ndef populate_files(m, files, prefix, entry_point_scripts=None):\n d = {'dist': m.dist(),\n 'site-packages': [],\n 'python-scripts': [],\n 'Examples': []}\n\n # Populate site-package, python-scripts, and Examples into above\n for f in files:\n handle_file(f, d, prefix)\n\n # Windows path conversion\n if ISWIN:\n for fns in (d['site-packages'], d['Examples']):\n for i, fn in enumerate(fns):\n fns[i] = fn.replace('\\\\', '/')\n\n if entry_point_scripts:\n for entry_point in entry_point_scripts:\n src = join(prefix, entry_point)\n if os.path.isfile(src):\n os.unlink(src)\n\n return d\n\n\ndef transform(m, files, prefix):\n bin_dir = join(prefix, 'bin')\n _force_dir(bin_dir)\n\n scripts_dir = join(prefix, 'Scripts')\n _force_dir(scripts_dir)\n\n name = m.name()\n\n # Create *nix prelink script\n # Note: it's important to use LF newlines or it wont work if we build on Win\n with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo:\n fo.write('''\\\n #!/bin/bash\n $PREFIX/bin/python $SOURCE_DIR/link.py\n '''.encode('utf-8'))\n\n # Create windows prelink script (be nice and use Windows newlines)\n with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo:\n fo.write('''\\\n @echo off\n \"%PREFIX%\\\\python.exe\" \"%SOURCE_DIR%\\\\link.py\"\n '''.replace('\\n', '\\r\\n').encode('utf-8'))\n\n d = populate_files(m, files, prefix)\n\n # Find our way to this directory\n this_dir = dirname(__file__)\n\n # copy in windows exe shims if there are any python-scripts\n if d['python-scripts']:\n for fn in 'cli-32.exe', 'cli-64.exe':\n shutil.copyfile(join(this_dir, fn), join(prefix, fn))\n\n # Read the local _link.py\n with open(join(this_dir, '_link.py')) as fi:\n link_code = fi.read()\n\n # Write the package metadata, and bumper with code for linking\n with open(join(prefix, 'link.py'), 'w') as fo:\n fo.write('DATA = ')\n json.dump(d, fo, indent=2, sort_keys=True)\n fo.write('\\n## END DATA\\n\\n')\n fo.write(link_code)\n", "path": "conda_build/noarch_python.py"}]}
2,061
186
gh_patches_debug_18850
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1066
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- opentelemetry-instrument command can cause recursive creation of subprocesses **Describe your environment** Python3.9, linux. **Steps to reproduce** Using `opentelemetry-instrument` with any exporter or instrumentation which invokes a python subprocess **during initialization**. For example, the `opentelemetry-exporter-gcp-trace` exporter may invoke the `gcloud` (written in python) command in a subprocess to get project information and authentication tokens. The subprocess will then try to autoinstrument, creating a recursive loop of subprocesses being created. **What is the expected behavior?** Auto-instrumentation should not apply to subprocesses created in the `initialize()` phase of auto-instrumentation. The `PYTHONPATH` environment variable should have the `sitecustomize.py` dirname stripped out at the beginning of `sitecustomize.py`. This would prevent subprocesses from being autoinstrumented during setup, which can cause a loop. **What is the actual behavior?** `PYTHONPATH` is correctly stripped later on to avoid this https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L120-L125 However, any subprocesses created in [these lines](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L114-L117) will cause a loop. **Additional context** I can write a repro if necessary. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from logging import getLogger 16 from os import environ 17 from os.path import abspath, dirname, pathsep 18 from re import sub 19 20 from pkg_resources import iter_entry_points 21 22 from opentelemetry.instrumentation.dependencies import ( 23 get_dist_dependency_conflicts, 24 ) 25 from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro 26 from opentelemetry.instrumentation.environment_variables import ( 27 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, 28 ) 29 from opentelemetry.instrumentation.version import __version__ 30 31 logger = getLogger(__name__) 32 33 34 def _load_distros() -> BaseDistro: 35 for entry_point in iter_entry_points("opentelemetry_distro"): 36 try: 37 distro = entry_point.load()() 38 if not isinstance(distro, BaseDistro): 39 logger.debug( 40 "%s is not an OpenTelemetry Distro. Skipping", 41 entry_point.name, 42 ) 43 continue 44 logger.debug( 45 "Distribution %s will be configured", entry_point.name 46 ) 47 return distro 48 except Exception as exc: # pylint: disable=broad-except 49 logger.exception( 50 "Distribution %s configuration failed", entry_point.name 51 ) 52 raise exc 53 return DefaultDistro() 54 55 56 def _load_instrumentors(distro): 57 package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, []) 58 if isinstance(package_to_exclude, str): 59 package_to_exclude = package_to_exclude.split(",") 60 # to handle users entering "requests , flask" or "requests, flask" with spaces 61 package_to_exclude = [x.strip() for x in package_to_exclude] 62 63 for entry_point in iter_entry_points("opentelemetry_pre_instrument"): 64 entry_point.load()() 65 66 for entry_point in iter_entry_points("opentelemetry_instrumentor"): 67 if entry_point.name in package_to_exclude: 68 logger.debug( 69 "Instrumentation skipped for library %s", entry_point.name 70 ) 71 continue 72 73 try: 74 conflict = get_dist_dependency_conflicts(entry_point.dist) 75 if conflict: 76 logger.debug( 77 "Skipping instrumentation %s: %s", 78 entry_point.name, 79 conflict, 80 ) 81 continue 82 83 # tell instrumentation to not run dep checks again as we already did it above 84 distro.load_instrumentor(entry_point, skip_dep_check=True) 85 logger.debug("Instrumented %s", entry_point.name) 86 except Exception as exc: # pylint: disable=broad-except 87 logger.exception("Instrumenting of %s failed", entry_point.name) 88 raise exc 89 90 for entry_point in iter_entry_points("opentelemetry_post_instrument"): 91 entry_point.load()() 92 93 94 def _load_configurators(): 95 configured = None 96 for entry_point in iter_entry_points("opentelemetry_configurator"): 97 if configured is not None: 98 logger.warning( 99 "Configuration of %s not loaded, %s already loaded", 100 entry_point.name, 101 configured, 102 ) 103 continue 104 try: 105 entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore 106 configured = entry_point.name 107 except Exception as exc: # pylint: disable=broad-except 108 logger.exception("Configuration of %s failed", entry_point.name) 109 raise exc 110 111 112 def initialize(): 113 try: 114 distro = _load_distros() 115 distro.configure() 116 _load_configurators() 117 _load_instrumentors(distro) 118 except Exception: # pylint: disable=broad-except 119 logger.exception("Failed to auto initialize opentelemetry") 120 finally: 121 environ["PYTHONPATH"] = sub( 122 rf"{dirname(abspath(__file__))}{pathsep}?", 123 "", 124 environ["PYTHONPATH"], 125 ) 126 127 128 initialize() 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py @@ -110,6 +110,13 @@ def initialize(): + # prevents auto-instrumentation of subprocesses if code execs another python process + environ["PYTHONPATH"] = sub( + rf"{dirname(abspath(__file__))}{pathsep}?", + "", + environ["PYTHONPATH"], + ) + try: distro = _load_distros() distro.configure() @@ -117,12 +124,6 @@ _load_instrumentors(distro) except Exception: # pylint: disable=broad-except logger.exception("Failed to auto initialize opentelemetry") - finally: - environ["PYTHONPATH"] = sub( - rf"{dirname(abspath(__file__))}{pathsep}?", - "", - environ["PYTHONPATH"], - ) initialize()
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n@@ -110,6 +110,13 @@\n \n \n def initialize():\n+ # prevents auto-instrumentation of subprocesses if code execs another python process\n+ environ[\"PYTHONPATH\"] = sub(\n+ rf\"{dirname(abspath(__file__))}{pathsep}?\",\n+ \"\",\n+ environ[\"PYTHONPATH\"],\n+ )\n+\n try:\n distro = _load_distros()\n distro.configure()\n@@ -117,12 +124,6 @@\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n- finally:\n- environ[\"PYTHONPATH\"] = sub(\n- rf\"{dirname(abspath(__file__))}{pathsep}?\",\n- \"\",\n- environ[\"PYTHONPATH\"],\n- )\n \n \n initialize()\n", "issue": "opentelemetry-instrument command can cause recursive creation of subprocesses\n**Describe your environment**\r\n\r\nPython3.9, linux.\r\n\r\n**Steps to reproduce**\r\nUsing `opentelemetry-instrument` with any exporter or instrumentation which invokes a python subprocess **during initialization**. For example, the `opentelemetry-exporter-gcp-trace` exporter may invoke the `gcloud` (written in python) command in a subprocess to get project information and authentication tokens. The subprocess will then try to autoinstrument, creating a recursive loop of subprocesses being created.\r\n\r\n**What is the expected behavior?**\r\nAuto-instrumentation should not apply to subprocesses created in the `initialize()` phase of auto-instrumentation. The `PYTHONPATH` environment variable should have the `sitecustomize.py` dirname stripped out at the beginning of `sitecustomize.py`. This would prevent subprocesses from being autoinstrumented during setup, which can cause a loop.\r\n\r\n**What is the actual behavior?**\r\n`PYTHONPATH` is correctly stripped later on to avoid this https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L120-L125\r\n\r\nHowever, any subprocesses created in [these lines](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e9f83e1292b0fe5f3478c9b23f3b5a5508481e68/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py#L114-L117) will cause a loop.\r\n\r\n**Additional context**\r\nI can write a repro if necessary.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\nfrom opentelemetry.instrumentation.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.version import __version__\n\nlogger = getLogger(__name__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n entry_point.load()()\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n entry_point.load()()\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n finally:\n environ[\"PYTHONPATH\"] = sub(\n rf\"{dirname(abspath(__file__))}{pathsep}?\",\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n\ninitialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom os.path import abspath, dirname, pathsep\nfrom re import sub\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.instrumentation.dependencies import (\n get_dist_dependency_conflicts,\n)\nfrom opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro\nfrom opentelemetry.instrumentation.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\nfrom opentelemetry.instrumentation.version import __version__\n\nlogger = getLogger(__name__)\n\n\ndef _load_distros() -> BaseDistro:\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n distro = entry_point.load()()\n if not isinstance(distro, BaseDistro):\n logger.debug(\n \"%s is not an OpenTelemetry Distro. Skipping\",\n entry_point.name,\n )\n continue\n logger.debug(\n \"Distribution %s will be configured\", entry_point.name\n )\n return distro\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n return DefaultDistro()\n\n\ndef _load_instrumentors(distro):\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_pre_instrument\"):\n entry_point.load()()\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n\n try:\n conflict = get_dist_dependency_conflicts(entry_point.dist)\n if conflict:\n logger.debug(\n \"Skipping instrumentation %s: %s\",\n entry_point.name,\n conflict,\n )\n continue\n\n # tell instrumentation to not run dep checks again as we already did it above\n distro.load_instrumentor(entry_point, skip_dep_check=True)\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n for entry_point in iter_entry_points(\"opentelemetry_post_instrument\"):\n entry_point.load()()\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n # prevents auto-instrumentation of subprocesses if code execs another python process\n environ[\"PYTHONPATH\"] = sub(\n rf\"{dirname(abspath(__file__))}{pathsep}?\",\n \"\",\n environ[\"PYTHONPATH\"],\n )\n\n try:\n distro = _load_distros()\n distro.configure()\n _load_configurators()\n _load_instrumentors(distro)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n\n\ninitialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}]}
1,888
281
gh_patches_debug_6822
rasdani/github-patches
git_diff
getnikola__nikola-2163
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NIkola does not see plugins listed under DISABLED_PLUGINS First noticed while running `nikola build`: `ERROR: _switch to py3:please!. Task dependency 'sitemap' does not exist.` > polyzen> there's no longer a way to disable the sitemap? i don't see it under `nikola plugin --list-installed` nor `nikola list` unless it's part of render_site > +ralsina> sitemap at /home/ralsina/Desktop/proyectos/nikola/master/nikola/plugins/task/sitemap/ > +ralsina> I suspect disabled plugins are not listed anymore, try that in some other folder > polyzen> ah yes --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nikola/plugins/command/plugin.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2015 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Manage plugins.""" 28 29 from __future__ import print_function 30 import io 31 import os 32 import shutil 33 import subprocess 34 import time 35 import requests 36 37 import pygments 38 from pygments.lexers import PythonLexer 39 from pygments.formatters import TerminalFormatter 40 41 from nikola.plugin_categories import Command 42 from nikola import utils 43 44 LOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER) 45 46 47 class CommandPlugin(Command): 48 """Manage plugins.""" 49 50 json = None 51 name = "plugin" 52 doc_usage = "[[-u][--user] --install name] | [[-u] [-l |--upgrade|--list-installed] | [--uninstall name]]" 53 doc_purpose = "manage plugins" 54 output_dir = None 55 needs_config = False 56 cmd_options = [ 57 { 58 'name': 'install', 59 'short': 'i', 60 'long': 'install', 61 'type': str, 62 'default': '', 63 'help': 'Install a plugin.', 64 }, 65 { 66 'name': 'uninstall', 67 'long': 'uninstall', 68 'short': 'r', 69 'type': str, 70 'default': '', 71 'help': 'Uninstall a plugin.' 72 }, 73 { 74 'name': 'list', 75 'short': 'l', 76 'long': 'list', 77 'type': bool, 78 'default': False, 79 'help': 'Show list of available plugins.' 80 }, 81 { 82 'name': 'url', 83 'short': 'u', 84 'long': 'url', 85 'type': str, 86 'help': "URL for the plugin repository (default: " 87 "https://plugins.getnikola.com/v7/plugins.json)", 88 'default': 'https://plugins.getnikola.com/v7/plugins.json' 89 }, 90 { 91 'name': 'user', 92 'long': 'user', 93 'type': bool, 94 'help': "Install user-wide, available for all sites.", 95 'default': False 96 }, 97 { 98 'name': 'upgrade', 99 'long': 'upgrade', 100 'type': bool, 101 'help': "Upgrade all installed plugins.", 102 'default': False 103 }, 104 { 105 'name': 'list_installed', 106 'long': 'list-installed', 107 'type': bool, 108 'help': "List the installed plugins with their location.", 109 'default': False 110 }, 111 ] 112 113 def _execute(self, options, args): 114 """Install plugin into current site.""" 115 url = options['url'] 116 user_mode = options['user'] 117 118 # See the "mode" we need to operate in 119 install = options.get('install') 120 uninstall = options.get('uninstall') 121 upgrade = options.get('upgrade') 122 list_available = options.get('list') 123 list_installed = options.get('list_installed') 124 show_install_notes = options.get('show_install_notes', True) 125 command_count = [bool(x) for x in ( 126 install, 127 uninstall, 128 upgrade, 129 list_available, 130 list_installed)].count(True) 131 if command_count > 1 or command_count == 0: 132 print(self.help()) 133 return 2 134 135 if options.get('output_dir') is not None: 136 self.output_dir = options.get('output_dir') 137 else: 138 if not self.site.configured and not user_mode and install: 139 LOGGER.notice('No site found, assuming --user') 140 user_mode = True 141 142 if user_mode: 143 self.output_dir = os.path.expanduser('~/.nikola/plugins') 144 else: 145 self.output_dir = 'plugins' 146 147 if list_available: 148 return self.list_available(url) 149 elif list_installed: 150 return self.list_installed() 151 elif upgrade: 152 return self.do_upgrade(url) 153 elif uninstall: 154 return self.do_uninstall(uninstall) 155 elif install: 156 return self.do_install(url, install, show_install_notes) 157 158 def list_available(self, url): 159 """List all available plugins.""" 160 data = self.get_json(url) 161 print("Available Plugins:") 162 print("------------------") 163 for plugin in sorted(data.keys()): 164 print(plugin) 165 return 0 166 167 def list_installed(self): 168 """List installed plugins.""" 169 plugins = [] 170 for plugin in self.site.plugin_manager.getAllPlugins(): 171 p = plugin.path 172 if os.path.isdir(p): 173 p = p + os.sep 174 else: 175 p = p + '.py' 176 plugins.append([plugin.name, p]) 177 178 plugins.sort() 179 for name, path in plugins: 180 print('{0} at {1}'.format(name, path)) 181 return 0 182 183 def do_upgrade(self, url): 184 """Upgrade all installed plugins.""" 185 LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best') 186 data = self.get_json(url) 187 plugins = [] 188 for plugin in self.site.plugin_manager.getAllPlugins(): 189 p = plugin.path 190 if os.path.isdir(p): 191 p = p + os.sep 192 else: 193 p = p + '.py' 194 if plugin.name in data: 195 plugins.append([plugin.name, p]) 196 print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins))) 197 for name, path in plugins: 198 print('Upgrading {0}'.format(name)) 199 p = path 200 while True: 201 tail, head = os.path.split(path) 202 if head == 'plugins': 203 self.output_dir = path 204 break 205 elif tail == '': 206 LOGGER.error("Can't find the plugins folder for path: {0}".format(p)) 207 return 1 208 else: 209 path = tail 210 self.do_install(url, name) 211 return 0 212 213 def do_install(self, url, name, show_install_notes=True): 214 """Download and install a plugin.""" 215 data = self.get_json(url) 216 if name in data: 217 utils.makedirs(self.output_dir) 218 url = data[name] 219 LOGGER.info("Downloading '{0}'".format(url)) 220 try: 221 zip_data = requests.get(url).content 222 except requests.exceptions.SSLError: 223 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)") 224 time.sleep(1) 225 url = url.replace('https', 'http', 1) 226 zip_data = requests.get(url).content 227 228 zip_file = io.BytesIO() 229 zip_file.write(zip_data) 230 LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir)) 231 utils.extract_all(zip_file, self.output_dir) 232 dest_path = os.path.join(self.output_dir, name) 233 else: 234 try: 235 plugin_path = utils.get_plugin_path(name) 236 except: 237 LOGGER.error("Can't find plugin " + name) 238 return 1 239 240 utils.makedirs(self.output_dir) 241 dest_path = os.path.join(self.output_dir, name) 242 if os.path.exists(dest_path): 243 LOGGER.error("{0} is already installed".format(name)) 244 return 1 245 246 LOGGER.info('Copying {0} into plugins'.format(plugin_path)) 247 shutil.copytree(plugin_path, dest_path) 248 249 reqpath = os.path.join(dest_path, 'requirements.txt') 250 if os.path.exists(reqpath): 251 LOGGER.notice('This plugin has Python dependencies.') 252 LOGGER.info('Installing dependencies with pip...') 253 try: 254 subprocess.check_call(('pip', 'install', '-r', reqpath)) 255 except subprocess.CalledProcessError: 256 LOGGER.error('Could not install the dependencies.') 257 print('Contents of the requirements.txt file:\n') 258 with io.open(reqpath, 'r', encoding='utf-8') as fh: 259 print(utils.indent(fh.read(), 4 * ' ')) 260 print('You have to install those yourself or through a ' 261 'package manager.') 262 else: 263 LOGGER.info('Dependency installation succeeded.') 264 reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt') 265 if os.path.exists(reqnpypath): 266 LOGGER.notice('This plugin has third-party ' 267 'dependencies you need to install ' 268 'manually.') 269 print('Contents of the requirements-nonpy.txt file:\n') 270 with io.open(reqnpypath, 'r', encoding='utf-8') as fh: 271 for l in fh.readlines(): 272 i, j = l.split('::') 273 print(utils.indent(i.strip(), 4 * ' ')) 274 print(utils.indent(j.strip(), 8 * ' ')) 275 print() 276 277 print('You have to install those yourself or through a package ' 278 'manager.') 279 confpypath = os.path.join(dest_path, 'conf.py.sample') 280 if os.path.exists(confpypath) and show_install_notes: 281 LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!') 282 print('Contents of the conf.py.sample file:\n') 283 with io.open(confpypath, 'r', encoding='utf-8') as fh: 284 if self.site.colorful: 285 print(utils.indent(pygments.highlight( 286 fh.read(), PythonLexer(), TerminalFormatter()), 287 4 * ' ')) 288 else: 289 print(utils.indent(fh.read(), 4 * ' ')) 290 return 0 291 292 def do_uninstall(self, name): 293 """Uninstall a plugin.""" 294 for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice 295 p = plugin.path 296 if os.path.isdir(p): 297 p = p + os.sep 298 else: 299 p = os.path.dirname(p) 300 if name == plugin.name: # Uninstall this one 301 LOGGER.warning('About to uninstall plugin: {0}'.format(name)) 302 LOGGER.warning('This will delete {0}'.format(p)) 303 sure = utils.ask_yesno('Are you sure?') 304 if sure: 305 LOGGER.warning('Removing {0}'.format(p)) 306 shutil.rmtree(p) 307 return 0 308 return 1 309 LOGGER.error('Unknown plugin: {0}'.format(name)) 310 return 1 311 312 def get_json(self, url): 313 """Download the JSON file with all plugins.""" 314 if self.json is None: 315 try: 316 self.json = requests.get(url).json() 317 except requests.exceptions.SSLError: 318 LOGGER.warning("SSL error, using http instead of https (press ^C to abort)") 319 time.sleep(1) 320 url = url.replace('https', 'http', 1) 321 self.json = requests.get(url).json() 322 return self.json 323 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py --- a/nikola/plugins/command/plugin.py +++ b/nikola/plugins/command/plugin.py @@ -176,8 +176,11 @@ plugins.append([plugin.name, p]) plugins.sort() + print('Installed Plugins') + print('-----------------') for name, path in plugins: print('{0} at {1}'.format(name, path)) + print('\n\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS'])) return 0 def do_upgrade(self, url):
{"golden_diff": "diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py\n--- a/nikola/plugins/command/plugin.py\n+++ b/nikola/plugins/command/plugin.py\n@@ -176,8 +176,11 @@\n plugins.append([plugin.name, p])\n \n plugins.sort()\n+ print('Installed Plugins')\n+ print('-----------------')\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n+ print('\\n\\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))\n return 0\n \n def do_upgrade(self, url):\n", "issue": "NIkola does not see plugins listed under DISABLED_PLUGINS\nFirst noticed while running `nikola build`:\n`ERROR: _switch to py3:please!. Task dependency 'sitemap' does not exist.`\n\n> polyzen> there's no longer a way to disable the sitemap? i don't see it under `nikola plugin --list-installed` nor `nikola list` unless it's part of render_site\n> +ralsina> sitemap at /home/ralsina/Desktop/proyectos/nikola/master/nikola/plugins/task/sitemap/\n> +ralsina> I suspect disabled plugins are not listed anymore, try that in some other folder\n> polyzen> ah yes\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Manage plugins.\"\"\"\n\nfrom __future__ import print_function\nimport io\nimport os\nimport shutil\nimport subprocess\nimport time\nimport requests\n\nimport pygments\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import TerminalFormatter\n\nfrom nikola.plugin_categories import Command\nfrom nikola import utils\n\nLOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)\n\n\nclass CommandPlugin(Command):\n \"\"\"Manage plugins.\"\"\"\n\n json = None\n name = \"plugin\"\n doc_usage = \"[[-u][--user] --install name] | [[-u] [-l |--upgrade|--list-installed] | [--uninstall name]]\"\n doc_purpose = \"manage plugins\"\n output_dir = None\n needs_config = False\n cmd_options = [\n {\n 'name': 'install',\n 'short': 'i',\n 'long': 'install',\n 'type': str,\n 'default': '',\n 'help': 'Install a plugin.',\n },\n {\n 'name': 'uninstall',\n 'long': 'uninstall',\n 'short': 'r',\n 'type': str,\n 'default': '',\n 'help': 'Uninstall a plugin.'\n },\n {\n 'name': 'list',\n 'short': 'l',\n 'long': 'list',\n 'type': bool,\n 'default': False,\n 'help': 'Show list of available plugins.'\n },\n {\n 'name': 'url',\n 'short': 'u',\n 'long': 'url',\n 'type': str,\n 'help': \"URL for the plugin repository (default: \"\n \"https://plugins.getnikola.com/v7/plugins.json)\",\n 'default': 'https://plugins.getnikola.com/v7/plugins.json'\n },\n {\n 'name': 'user',\n 'long': 'user',\n 'type': bool,\n 'help': \"Install user-wide, available for all sites.\",\n 'default': False\n },\n {\n 'name': 'upgrade',\n 'long': 'upgrade',\n 'type': bool,\n 'help': \"Upgrade all installed plugins.\",\n 'default': False\n },\n {\n 'name': 'list_installed',\n 'long': 'list-installed',\n 'type': bool,\n 'help': \"List the installed plugins with their location.\",\n 'default': False\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Install plugin into current site.\"\"\"\n url = options['url']\n user_mode = options['user']\n\n # See the \"mode\" we need to operate in\n install = options.get('install')\n uninstall = options.get('uninstall')\n upgrade = options.get('upgrade')\n list_available = options.get('list')\n list_installed = options.get('list_installed')\n show_install_notes = options.get('show_install_notes', True)\n command_count = [bool(x) for x in (\n install,\n uninstall,\n upgrade,\n list_available,\n list_installed)].count(True)\n if command_count > 1 or command_count == 0:\n print(self.help())\n return 2\n\n if options.get('output_dir') is not None:\n self.output_dir = options.get('output_dir')\n else:\n if not self.site.configured and not user_mode and install:\n LOGGER.notice('No site found, assuming --user')\n user_mode = True\n\n if user_mode:\n self.output_dir = os.path.expanduser('~/.nikola/plugins')\n else:\n self.output_dir = 'plugins'\n\n if list_available:\n return self.list_available(url)\n elif list_installed:\n return self.list_installed()\n elif upgrade:\n return self.do_upgrade(url)\n elif uninstall:\n return self.do_uninstall(uninstall)\n elif install:\n return self.do_install(url, install, show_install_notes)\n\n def list_available(self, url):\n \"\"\"List all available plugins.\"\"\"\n data = self.get_json(url)\n print(\"Available Plugins:\")\n print(\"------------------\")\n for plugin in sorted(data.keys()):\n print(plugin)\n return 0\n\n def list_installed(self):\n \"\"\"List installed plugins.\"\"\"\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n plugins.append([plugin.name, p])\n\n plugins.sort()\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n return 0\n\n def do_upgrade(self, url):\n \"\"\"Upgrade all installed plugins.\"\"\"\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0\n\n def do_install(self, url, name, show_install_notes=True):\n \"\"\"Download and install a plugin.\"\"\"\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n try:\n plugin_path = utils.get_plugin_path(name)\n except:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n utils.makedirs(self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n if os.path.exists(dest_path):\n LOGGER.error(\"{0} is already installed\".format(name))\n return 1\n\n LOGGER.info('Copying {0} into plugins'.format(plugin_path))\n shutil.copytree(plugin_path, dest_path)\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.notice('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call(('pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8') as fh:\n if self.site.colorful:\n print(utils.indent(pygments.highlight(\n fh.read(), PythonLexer(), TerminalFormatter()),\n 4 * ' '))\n else:\n print(utils.indent(fh.read(), 4 * ' '))\n return 0\n\n def do_uninstall(self, name):\n \"\"\"Uninstall a plugin.\"\"\"\n for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = os.path.dirname(p)\n if name == plugin.name: # Uninstall this one\n LOGGER.warning('About to uninstall plugin: {0}'.format(name))\n LOGGER.warning('This will delete {0}'.format(p))\n sure = utils.ask_yesno('Are you sure?')\n if sure:\n LOGGER.warning('Removing {0}'.format(p))\n shutil.rmtree(p)\n return 0\n return 1\n LOGGER.error('Unknown plugin: {0}'.format(name))\n return 1\n\n def get_json(self, url):\n \"\"\"Download the JSON file with all plugins.\"\"\"\n if self.json is None:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n return self.json\n", "path": "nikola/plugins/command/plugin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Manage plugins.\"\"\"\n\nfrom __future__ import print_function\nimport io\nimport os\nimport shutil\nimport subprocess\nimport time\nimport requests\n\nimport pygments\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import TerminalFormatter\n\nfrom nikola.plugin_categories import Command\nfrom nikola import utils\n\nLOGGER = utils.get_logger('plugin', utils.STDERR_HANDLER)\n\n\nclass CommandPlugin(Command):\n \"\"\"Manage plugins.\"\"\"\n\n json = None\n name = \"plugin\"\n doc_usage = \"[[-u][--user] --install name] | [[-u] [-l |--upgrade|--list-installed] | [--uninstall name]]\"\n doc_purpose = \"manage plugins\"\n output_dir = None\n needs_config = False\n cmd_options = [\n {\n 'name': 'install',\n 'short': 'i',\n 'long': 'install',\n 'type': str,\n 'default': '',\n 'help': 'Install a plugin.',\n },\n {\n 'name': 'uninstall',\n 'long': 'uninstall',\n 'short': 'r',\n 'type': str,\n 'default': '',\n 'help': 'Uninstall a plugin.'\n },\n {\n 'name': 'list',\n 'short': 'l',\n 'long': 'list',\n 'type': bool,\n 'default': False,\n 'help': 'Show list of available plugins.'\n },\n {\n 'name': 'url',\n 'short': 'u',\n 'long': 'url',\n 'type': str,\n 'help': \"URL for the plugin repository (default: \"\n \"https://plugins.getnikola.com/v7/plugins.json)\",\n 'default': 'https://plugins.getnikola.com/v7/plugins.json'\n },\n {\n 'name': 'user',\n 'long': 'user',\n 'type': bool,\n 'help': \"Install user-wide, available for all sites.\",\n 'default': False\n },\n {\n 'name': 'upgrade',\n 'long': 'upgrade',\n 'type': bool,\n 'help': \"Upgrade all installed plugins.\",\n 'default': False\n },\n {\n 'name': 'list_installed',\n 'long': 'list-installed',\n 'type': bool,\n 'help': \"List the installed plugins with their location.\",\n 'default': False\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Install plugin into current site.\"\"\"\n url = options['url']\n user_mode = options['user']\n\n # See the \"mode\" we need to operate in\n install = options.get('install')\n uninstall = options.get('uninstall')\n upgrade = options.get('upgrade')\n list_available = options.get('list')\n list_installed = options.get('list_installed')\n show_install_notes = options.get('show_install_notes', True)\n command_count = [bool(x) for x in (\n install,\n uninstall,\n upgrade,\n list_available,\n list_installed)].count(True)\n if command_count > 1 or command_count == 0:\n print(self.help())\n return 2\n\n if options.get('output_dir') is not None:\n self.output_dir = options.get('output_dir')\n else:\n if not self.site.configured and not user_mode and install:\n LOGGER.notice('No site found, assuming --user')\n user_mode = True\n\n if user_mode:\n self.output_dir = os.path.expanduser('~/.nikola/plugins')\n else:\n self.output_dir = 'plugins'\n\n if list_available:\n return self.list_available(url)\n elif list_installed:\n return self.list_installed()\n elif upgrade:\n return self.do_upgrade(url)\n elif uninstall:\n return self.do_uninstall(uninstall)\n elif install:\n return self.do_install(url, install, show_install_notes)\n\n def list_available(self, url):\n \"\"\"List all available plugins.\"\"\"\n data = self.get_json(url)\n print(\"Available Plugins:\")\n print(\"------------------\")\n for plugin in sorted(data.keys()):\n print(plugin)\n return 0\n\n def list_installed(self):\n \"\"\"List installed plugins.\"\"\"\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n plugins.append([plugin.name, p])\n\n plugins.sort()\n print('Installed Plugins')\n print('-----------------')\n for name, path in plugins:\n print('{0} at {1}'.format(name, path))\n print('\\n\\nAlso, you have disabled these plugins: {}'.format(self.site.config['DISABLED_PLUGINS']))\n return 0\n\n def do_upgrade(self, url):\n \"\"\"Upgrade all installed plugins.\"\"\"\n LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')\n data = self.get_json(url)\n plugins = []\n for plugin in self.site.plugin_manager.getAllPlugins():\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = p + '.py'\n if plugin.name in data:\n plugins.append([plugin.name, p])\n print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))\n for name, path in plugins:\n print('Upgrading {0}'.format(name))\n p = path\n while True:\n tail, head = os.path.split(path)\n if head == 'plugins':\n self.output_dir = path\n break\n elif tail == '':\n LOGGER.error(\"Can't find the plugins folder for path: {0}\".format(p))\n return 1\n else:\n path = tail\n self.do_install(url, name)\n return 0\n\n def do_install(self, url, name, show_install_notes=True):\n \"\"\"Download and install a plugin.\"\"\"\n data = self.get_json(url)\n if name in data:\n utils.makedirs(self.output_dir)\n url = data[name]\n LOGGER.info(\"Downloading '{0}'\".format(url))\n try:\n zip_data = requests.get(url).content\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n zip_data = requests.get(url).content\n\n zip_file = io.BytesIO()\n zip_file.write(zip_data)\n LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))\n utils.extract_all(zip_file, self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n else:\n try:\n plugin_path = utils.get_plugin_path(name)\n except:\n LOGGER.error(\"Can't find plugin \" + name)\n return 1\n\n utils.makedirs(self.output_dir)\n dest_path = os.path.join(self.output_dir, name)\n if os.path.exists(dest_path):\n LOGGER.error(\"{0} is already installed\".format(name))\n return 1\n\n LOGGER.info('Copying {0} into plugins'.format(plugin_path))\n shutil.copytree(plugin_path, dest_path)\n\n reqpath = os.path.join(dest_path, 'requirements.txt')\n if os.path.exists(reqpath):\n LOGGER.notice('This plugin has Python dependencies.')\n LOGGER.info('Installing dependencies with pip...')\n try:\n subprocess.check_call(('pip', 'install', '-r', reqpath))\n except subprocess.CalledProcessError:\n LOGGER.error('Could not install the dependencies.')\n print('Contents of the requirements.txt file:\\n')\n with io.open(reqpath, 'r', encoding='utf-8') as fh:\n print(utils.indent(fh.read(), 4 * ' '))\n print('You have to install those yourself or through a '\n 'package manager.')\n else:\n LOGGER.info('Dependency installation succeeded.')\n reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')\n if os.path.exists(reqnpypath):\n LOGGER.notice('This plugin has third-party '\n 'dependencies you need to install '\n 'manually.')\n print('Contents of the requirements-nonpy.txt file:\\n')\n with io.open(reqnpypath, 'r', encoding='utf-8') as fh:\n for l in fh.readlines():\n i, j = l.split('::')\n print(utils.indent(i.strip(), 4 * ' '))\n print(utils.indent(j.strip(), 8 * ' '))\n print()\n\n print('You have to install those yourself or through a package '\n 'manager.')\n confpypath = os.path.join(dest_path, 'conf.py.sample')\n if os.path.exists(confpypath) and show_install_notes:\n LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')\n print('Contents of the conf.py.sample file:\\n')\n with io.open(confpypath, 'r', encoding='utf-8') as fh:\n if self.site.colorful:\n print(utils.indent(pygments.highlight(\n fh.read(), PythonLexer(), TerminalFormatter()),\n 4 * ' '))\n else:\n print(utils.indent(fh.read(), 4 * ' '))\n return 0\n\n def do_uninstall(self, name):\n \"\"\"Uninstall a plugin.\"\"\"\n for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice\n p = plugin.path\n if os.path.isdir(p):\n p = p + os.sep\n else:\n p = os.path.dirname(p)\n if name == plugin.name: # Uninstall this one\n LOGGER.warning('About to uninstall plugin: {0}'.format(name))\n LOGGER.warning('This will delete {0}'.format(p))\n sure = utils.ask_yesno('Are you sure?')\n if sure:\n LOGGER.warning('Removing {0}'.format(p))\n shutil.rmtree(p)\n return 0\n return 1\n LOGGER.error('Unknown plugin: {0}'.format(name))\n return 1\n\n def get_json(self, url):\n \"\"\"Download the JSON file with all plugins.\"\"\"\n if self.json is None:\n try:\n self.json = requests.get(url).json()\n except requests.exceptions.SSLError:\n LOGGER.warning(\"SSL error, using http instead of https (press ^C to abort)\")\n time.sleep(1)\n url = url.replace('https', 'http', 1)\n self.json = requests.get(url).json()\n return self.json\n", "path": "nikola/plugins/command/plugin.py"}]}
3,781
143
gh_patches_debug_19558
rasdani/github-patches
git_diff
open-mmlab__mmcv-97
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mmcv error My environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1. I directly use "pip install mmcv and got: "Running setup.py clean for mmcv Failed to build mmcv Installing collected packages: mmcv Running setup.py install for mmcv ... error" and : "In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1: ./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found #include <iostream>" Anybody help? Thank you very much. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import sys 2 from io import open # for Python 2 (identical to builtin in Python 3) 3 4 from setuptools import Extension, find_packages, setup 5 6 import numpy 7 from Cython.Distutils import build_ext 8 9 install_requires = [ 10 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python', 11 'Cython' 12 ] 13 if sys.version_info < (3, 3): 14 install_requires.append('backports.shutil_get_terminal_size') 15 if sys.version_info < (3, 4): 16 install_requires.extend(['enum34', 'pathlib']) 17 18 19 def readme(): 20 with open('README.rst', encoding='utf-8') as f: 21 content = f.read() 22 return content 23 24 25 def get_version(): 26 version_file = 'mmcv/version.py' 27 with open(version_file, 'r', encoding='utf-8') as f: 28 exec(compile(f.read(), version_file, 'exec')) 29 return locals()['__version__'] 30 31 32 EXT_MODULES = [ 33 Extension( 34 name='mmcv._ext', 35 sources=[ 36 './mmcv/video/optflow_warp/flow_warp.cpp', 37 './mmcv/video/optflow_warp/flow_warp_module.pyx' 38 ], 39 include_dirs=[numpy.get_include()], 40 language="c++", 41 ), 42 ] 43 44 setup( 45 name='mmcv', 46 version=get_version(), 47 description='Open MMLab Computer Vision Foundation', 48 long_description=readme(), 49 keywords='computer vision', 50 packages=find_packages(), 51 classifiers=[ 52 'Development Status :: 4 - Beta', 53 'License :: OSI Approved :: Apache Software License', 54 'Operating System :: OS Independent', 55 'Programming Language :: Python :: 2', 56 'Programming Language :: Python :: 2.7', 57 'Programming Language :: Python :: 3', 58 'Programming Language :: Python :: 3.4', 59 'Programming Language :: Python :: 3.5', 60 'Programming Language :: Python :: 3.6', 61 'Programming Language :: Python :: 3.7', 62 'Topic :: Utilities', 63 ], 64 url='https://github.com/open-mmlab/mmcv', 65 author='Kai Chen', 66 author_email='[email protected]', 67 setup_requires=['pytest-runner'], 68 tests_require=['pytest'], 69 install_requires=install_requires, 70 ext_modules=EXT_MODULES, 71 cmdclass={'build_ext': build_ext}, 72 zip_safe=False) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ +import platform import sys from io import open # for Python 2 (identical to builtin in Python 3) - from setuptools import Extension, find_packages, setup import numpy @@ -29,6 +29,13 @@ return locals()['__version__'] +if platform.system() == 'Darwin': + extra_compile_args = ['-stdlib=libc++'] + extra_link_args = ['-stdlib=libc++'] +else: + extra_compile_args = [] + extra_link_args = [] + EXT_MODULES = [ Extension( name='mmcv._ext', @@ -37,7 +44,9 @@ './mmcv/video/optflow_warp/flow_warp_module.pyx' ], include_dirs=[numpy.get_include()], - language="c++", + language='c++', + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, ), ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,6 @@\n+import platform\n import sys\n from io import open # for Python 2 (identical to builtin in Python 3)\n-\n from setuptools import Extension, find_packages, setup\n \n import numpy\n@@ -29,6 +29,13 @@\n return locals()['__version__']\n \n \n+if platform.system() == 'Darwin':\n+ extra_compile_args = ['-stdlib=libc++']\n+ extra_link_args = ['-stdlib=libc++']\n+else:\n+ extra_compile_args = []\n+ extra_link_args = []\n+\n EXT_MODULES = [\n Extension(\n name='mmcv._ext',\n@@ -37,7 +44,9 @@\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n- language=\"c++\",\n+ language='c++',\n+ extra_compile_args=extra_compile_args,\n+ extra_link_args=extra_link_args,\n ),\n ]\n", "issue": "mmcv error\nMy environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1.\r\n I directly use \"pip install mmcv and got:\r\n\"Running setup.py clean for mmcv\r\nFailed to build mmcv\r\nInstalling collected packages: mmcv\r\nRunning setup.py install for mmcv ... error\" and :\r\n\"In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1:\r\n./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found\r\n#include <iostream>\"\r\nAnybody help? Thank you very much.\n", "before_files": [{"content": "import sys\nfrom io import open # for Python 2 (identical to builtin in Python 3)\n\nfrom setuptools import Extension, find_packages, setup\n\nimport numpy\nfrom Cython.Distutils import build_ext\n\ninstall_requires = [\n 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',\n 'Cython'\n]\nif sys.version_info < (3, 3):\n install_requires.append('backports.shutil_get_terminal_size')\nif sys.version_info < (3, 4):\n install_requires.extend(['enum34', 'pathlib'])\n\n\ndef readme():\n with open('README.rst', encoding='utf-8') as f:\n content = f.read()\n return content\n\n\ndef get_version():\n version_file = 'mmcv/version.py'\n with open(version_file, 'r', encoding='utf-8') as f:\n exec(compile(f.read(), version_file, 'exec'))\n return locals()['__version__']\n\n\nEXT_MODULES = [\n Extension(\n name='mmcv._ext',\n sources=[\n './mmcv/video/optflow_warp/flow_warp.cpp',\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n language=\"c++\",\n ),\n]\n\nsetup(\n name='mmcv',\n version=get_version(),\n description='Open MMLab Computer Vision Foundation',\n long_description=readme(),\n keywords='computer vision',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Utilities',\n ],\n url='https://github.com/open-mmlab/mmcv',\n author='Kai Chen',\n author_email='[email protected]',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=install_requires,\n ext_modules=EXT_MODULES,\n cmdclass={'build_ext': build_ext},\n zip_safe=False)\n", "path": "setup.py"}], "after_files": [{"content": "import platform\nimport sys\nfrom io import open # for Python 2 (identical to builtin in Python 3)\nfrom setuptools import Extension, find_packages, setup\n\nimport numpy\nfrom Cython.Distutils import build_ext\n\ninstall_requires = [\n 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',\n 'Cython'\n]\nif sys.version_info < (3, 3):\n install_requires.append('backports.shutil_get_terminal_size')\nif sys.version_info < (3, 4):\n install_requires.extend(['enum34', 'pathlib'])\n\n\ndef readme():\n with open('README.rst', encoding='utf-8') as f:\n content = f.read()\n return content\n\n\ndef get_version():\n version_file = 'mmcv/version.py'\n with open(version_file, 'r', encoding='utf-8') as f:\n exec(compile(f.read(), version_file, 'exec'))\n return locals()['__version__']\n\n\nif platform.system() == 'Darwin':\n extra_compile_args = ['-stdlib=libc++']\n extra_link_args = ['-stdlib=libc++']\nelse:\n extra_compile_args = []\n extra_link_args = []\n\nEXT_MODULES = [\n Extension(\n name='mmcv._ext',\n sources=[\n './mmcv/video/optflow_warp/flow_warp.cpp',\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n language='c++',\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n ),\n]\n\nsetup(\n name='mmcv',\n version=get_version(),\n description='Open MMLab Computer Vision Foundation',\n long_description=readme(),\n keywords='computer vision',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Utilities',\n ],\n url='https://github.com/open-mmlab/mmcv',\n author='Kai Chen',\n author_email='[email protected]',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=install_requires,\n ext_modules=EXT_MODULES,\n cmdclass={'build_ext': build_ext},\n zip_safe=False)\n", "path": "setup.py"}]}
1,067
236
gh_patches_debug_19740
rasdani/github-patches
git_diff
tough-dev-school__education-backend-180
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Кастомные теги в мейлчимпе Чтобы можно было в лид-магните указать теги, которые пробрасываются в аудиторию мейлчимпа --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/magnets/creator.py` Content: ``` 1 from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry 2 from users.creator import UserCreator 3 from users.models import User 4 5 6 class LeadCreator: 7 def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None): 8 self.data = { 9 'name': name, 10 'email': email, 11 } 12 13 self.campaign = campaign 14 15 def __call__(self): 16 self.user = self._create_user() 17 self._create_log_entry() 18 19 self.campaign.execute(self.user) 20 21 def _create_user(self) -> User: 22 return UserCreator( 23 name=self.data['name'], 24 email=self.data['email'], 25 subscribe=True, 26 )() 27 28 def _create_log_entry(self): 29 LeadCampaignLogEntry.objects.create( 30 user=self.user, 31 campaign=self.campaign, 32 ) 33 ``` Path: `src/shipping/shipments/course.py` Content: ``` 1 from typing import Optional 2 3 from app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp 4 from products.models import Course 5 from shipping import factory 6 from shipping.shipments.base import BaseShipment 7 8 9 @factory.register(Course) 10 class CourseShipment(BaseShipment): 11 @property 12 def course(self): 13 return self.stuff_to_ship 14 15 def ship(self): 16 self.invite_to_clickmeeting() 17 self.invite_to_zoomus() 18 self.subscribe_to_mailchimp() 19 20 self.send_welcome_letter() 21 22 def subscribe_to_mailchimp(self): 23 if self.course.mailchimp_list_id is not None: 24 subscribe_to_mailchimp.delay( 25 list_id=self.course.mailchimp_list_id, 26 user_id=self.user.pk, 27 tags=[self.course.slug], 28 ) 29 30 def invite_to_clickmeeting(self): 31 if self.course.clickmeeting_room_url is not None: 32 invite_to_clickmeeting.delay( 33 room_url=self.course.clickmeeting_room_url, 34 email=self.user.email, 35 ) 36 37 def invite_to_zoomus(self): 38 if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id): 39 invite_to_zoomus.delay( 40 webinar_id=self.course.zoomus_webinar_id, 41 user_id=self.user.id, 42 ) 43 44 def send_welcome_letter(self): 45 if self.welcome_letter_template_id is not None: 46 send_mail.delay( 47 to=self.user.email, 48 template_id=self.welcome_letter_template_id, 49 ctx=self.get_template_context(), 50 disable_antispam=True, 51 ) 52 53 def get_template_context(self) -> dict: 54 return { 55 'name': self.course.name, 56 'slug': self.course.slug, 57 'name_genitive': self.course.name_genitive, 58 **self.get_gift_template_context(), 59 } 60 61 @property 62 def welcome_letter_template_id(self) -> Optional[str]: 63 """Get special gift template letter id if order is a gift and it is present""" 64 template_id = self.course.welcome_letter_template_id 65 66 if self.order is not None and self.order.giver is not None: # this is a gift 67 template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id 68 69 if template_id is None or not len(template_id): # fuck this null=True in CharFields 70 return None 71 72 return template_id 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/magnets/creator.py b/src/magnets/creator.py --- a/src/magnets/creator.py +++ b/src/magnets/creator.py @@ -23,6 +23,7 @@ name=self.data['name'], email=self.data['email'], subscribe=True, + tags=self.tags, )() def _create_log_entry(self): @@ -30,3 +31,7 @@ user=self.user, campaign=self.campaign, ) + + @property + def tags(self): + return [f'{self.campaign.slug}-lead-magnet'] diff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py --- a/src/shipping/shipments/course.py +++ b/src/shipping/shipments/course.py @@ -24,7 +24,7 @@ subscribe_to_mailchimp.delay( list_id=self.course.mailchimp_list_id, user_id=self.user.pk, - tags=[self.course.slug], + tags=[self.course.slug, f'{self.course.slug}-purchased'], ) def invite_to_clickmeeting(self):
{"golden_diff": "diff --git a/src/magnets/creator.py b/src/magnets/creator.py\n--- a/src/magnets/creator.py\n+++ b/src/magnets/creator.py\n@@ -23,6 +23,7 @@\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n+ tags=self.tags,\n )()\n \n def _create_log_entry(self):\n@@ -30,3 +31,7 @@\n user=self.user,\n campaign=self.campaign,\n )\n+\n+ @property\n+ def tags(self):\n+ return [f'{self.campaign.slug}-lead-magnet']\ndiff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py\n--- a/src/shipping/shipments/course.py\n+++ b/src/shipping/shipments/course.py\n@@ -24,7 +24,7 @@\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n- tags=[self.course.slug],\n+ tags=[self.course.slug, f'{self.course.slug}-purchased'],\n )\n \n def invite_to_clickmeeting(self):\n", "issue": "\u041a\u0430\u0441\u0442\u043e\u043c\u043d\u044b\u0435 \u0442\u0435\u0433\u0438 \u0432 \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0435\n\u0427\u0442\u043e\u0431\u044b \u043c\u043e\u0436\u043d\u043e \u0431\u044b\u043b\u043e \u0432 \u043b\u0438\u0434-\u043c\u0430\u0433\u043d\u0438\u0442\u0435 \u0443\u043a\u0430\u0437\u0430\u0442\u044c \u0442\u0435\u0433\u0438, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u043f\u0440\u043e\u0431\u0440\u0430\u0441\u044b\u0432\u0430\u044e\u0442\u0441\u044f \u0432 \u0430\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u044e \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0430\n", "before_files": [{"content": "from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry\nfrom users.creator import UserCreator\nfrom users.models import User\n\n\nclass LeadCreator:\n def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):\n self.data = {\n 'name': name,\n 'email': email,\n }\n\n self.campaign = campaign\n\n def __call__(self):\n self.user = self._create_user()\n self._create_log_entry()\n\n self.campaign.execute(self.user)\n\n def _create_user(self) -> User:\n return UserCreator(\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n )()\n\n def _create_log_entry(self):\n LeadCampaignLogEntry.objects.create(\n user=self.user,\n campaign=self.campaign,\n )\n", "path": "src/magnets/creator.py"}, {"content": "from typing import Optional\n\nfrom app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp\nfrom products.models import Course\nfrom shipping import factory\nfrom shipping.shipments.base import BaseShipment\n\n\[email protected](Course)\nclass CourseShipment(BaseShipment):\n @property\n def course(self):\n return self.stuff_to_ship\n\n def ship(self):\n self.invite_to_clickmeeting()\n self.invite_to_zoomus()\n self.subscribe_to_mailchimp()\n\n self.send_welcome_letter()\n\n def subscribe_to_mailchimp(self):\n if self.course.mailchimp_list_id is not None:\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n tags=[self.course.slug],\n )\n\n def invite_to_clickmeeting(self):\n if self.course.clickmeeting_room_url is not None:\n invite_to_clickmeeting.delay(\n room_url=self.course.clickmeeting_room_url,\n email=self.user.email,\n )\n\n def invite_to_zoomus(self):\n if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):\n invite_to_zoomus.delay(\n webinar_id=self.course.zoomus_webinar_id,\n user_id=self.user.id,\n )\n\n def send_welcome_letter(self):\n if self.welcome_letter_template_id is not None:\n send_mail.delay(\n to=self.user.email,\n template_id=self.welcome_letter_template_id,\n ctx=self.get_template_context(),\n disable_antispam=True,\n )\n\n def get_template_context(self) -> dict:\n return {\n 'name': self.course.name,\n 'slug': self.course.slug,\n 'name_genitive': self.course.name_genitive,\n **self.get_gift_template_context(),\n }\n\n @property\n def welcome_letter_template_id(self) -> Optional[str]:\n \"\"\"Get special gift template letter id if order is a gift and it is present\"\"\"\n template_id = self.course.welcome_letter_template_id\n\n if self.order is not None and self.order.giver is not None: # this is a gift\n template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id\n\n if template_id is None or not len(template_id): # fuck this null=True in CharFields\n return None\n\n return template_id\n", "path": "src/shipping/shipments/course.py"}], "after_files": [{"content": "from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry\nfrom users.creator import UserCreator\nfrom users.models import User\n\n\nclass LeadCreator:\n def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):\n self.data = {\n 'name': name,\n 'email': email,\n }\n\n self.campaign = campaign\n\n def __call__(self):\n self.user = self._create_user()\n self._create_log_entry()\n\n self.campaign.execute(self.user)\n\n def _create_user(self) -> User:\n return UserCreator(\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n tags=self.tags,\n )()\n\n def _create_log_entry(self):\n LeadCampaignLogEntry.objects.create(\n user=self.user,\n campaign=self.campaign,\n )\n\n @property\n def tags(self):\n return [f'{self.campaign.slug}-lead-magnet']\n", "path": "src/magnets/creator.py"}, {"content": "from typing import Optional\n\nfrom app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp\nfrom products.models import Course\nfrom shipping import factory\nfrom shipping.shipments.base import BaseShipment\n\n\[email protected](Course)\nclass CourseShipment(BaseShipment):\n @property\n def course(self):\n return self.stuff_to_ship\n\n def ship(self):\n self.invite_to_clickmeeting()\n self.invite_to_zoomus()\n self.subscribe_to_mailchimp()\n\n self.send_welcome_letter()\n\n def subscribe_to_mailchimp(self):\n if self.course.mailchimp_list_id is not None:\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n tags=[self.course.slug, f'{self.course.slug}-purchased'],\n )\n\n def invite_to_clickmeeting(self):\n if self.course.clickmeeting_room_url is not None:\n invite_to_clickmeeting.delay(\n room_url=self.course.clickmeeting_room_url,\n email=self.user.email,\n )\n\n def invite_to_zoomus(self):\n if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):\n invite_to_zoomus.delay(\n webinar_id=self.course.zoomus_webinar_id,\n user_id=self.user.id,\n )\n\n def send_welcome_letter(self):\n if self.welcome_letter_template_id is not None:\n send_mail.delay(\n to=self.user.email,\n template_id=self.welcome_letter_template_id,\n ctx=self.get_template_context(),\n disable_antispam=True,\n )\n\n def get_template_context(self) -> dict:\n return {\n 'name': self.course.name,\n 'slug': self.course.slug,\n 'name_genitive': self.course.name_genitive,\n **self.get_gift_template_context(),\n }\n\n @property\n def welcome_letter_template_id(self) -> Optional[str]:\n \"\"\"Get special gift template letter id if order is a gift and it is present\"\"\"\n template_id = self.course.welcome_letter_template_id\n\n if self.order is not None and self.order.giver is not None: # this is a gift\n template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id\n\n if template_id is None or not len(template_id): # fuck this null=True in CharFields\n return None\n\n return template_id\n", "path": "src/shipping/shipments/course.py"}]}
1,223
256
gh_patches_debug_23292
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-327
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing plot lines in VQE tutorial #### Issue description The second and third plots in the VQE tutorial do not show the plot line of the gradient descent. The legends are shown, however. Is this problem coming from making this file a "run" file? https://pennylane.readthedocs.io/en/latest/tutorials/pennylane_run_variational_quantum_eigensolver.html#vqe * *Expected behavior:* The plots should show the gradient descent on top of the optimization landscape. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/pennylane_run_variational_quantum_eigensolver.py` Content: ``` 1 r""" 2 .. _vqe: 3 4 Variational quantum eigensolver 5 =============================== 6 7 This example demonstrates the principle of a variational quantum 8 eigensolver (VQE), originally proposed in `Peruzzo et al. 9 (2014) <https://www.nature.com/articles/ncomms5213>`__. To showcase the 10 hybrid computational capabilities of PennyLane, we first train a quantum 11 circuit to minimize the squared energy expectation for a Hamiltonian 12 :math:`H`, 13 14 .. math:: 15 16 \langle \psi_v | H | \psi_v \rangle^2 =( 0.1 \langle \psi_{v} | X_2 | 17 \psi_v \rangle + 0.5 \langle \psi_v | Y_2 | \psi_v \rangle )^2. 18 19 Here, :math:`|\psi_v\rangle` is the state 20 obtained after applying a quantum circuit to an initial state 21 :math:`|0\rangle`. The quantum circuit depends on trainable variables 22 :math:`v = \{v_1, v_2\}`, and :math:`X_2`, :math:`Y_2` denote the 23 Pauli-X and Pauli-Y operator acting on the second qubit (*Note: We apply 24 the square to make the optimization landscapes more interesting, but in 25 common applications the cost is directly the energy expectation value*). 26 27 After doing this, we will then turn things around and use a fixed 28 quantum circuit to prepare a state :math:`|\psi\rangle`, but train the coefficients of 29 the Hamiltonian to minimize 30 31 .. math:: 32 33 \langle \psi | H | \psi \rangle^2 = (v_1 \langle \psi | X_2 | \psi 34 \rangle + v_2 \langle \psi | Y_2 | \psi \rangle )^2 . 35 """ 36 37 ############################################################################## 38 # 1. Optimizing the quantum circuit 39 # --------------------------------- 40 # 41 # Imports 42 # ~~~~~~~ 43 # 44 # We begin by importing PennyLane, the PennyLane-wrapped version of NumPy, 45 # and the GradientDescentOptimizer. 46 47 import pennylane as qml 48 from pennylane import numpy as np 49 from pennylane.optimize import GradientDescentOptimizer 50 51 ############################################################################## 52 # We use the default qubit simulator as a device. 53 54 dev = qml.device("default.qubit", wires=2) 55 56 ############################################################################## 57 # Quantum nodes 58 # ~~~~~~~~~~~~~ 59 # 60 # The quantum circuit of the variational eigensolver is an ansatz that 61 # defines a manifold of possible quantum states. We use a Hadamard, two 62 # rotations and a CNOT gate to construct our circuit. 63 64 65 def ansatz(var): 66 qml.Rot(0.3, 1.8, 5.4, wires=1) 67 qml.RX(var[0], wires=0) 68 qml.RY(var[1], wires=1) 69 qml.CNOT(wires=[0, 1]) 70 71 72 ############################################################################## 73 # A variational eigensolver requires us to evaluate expectations of 74 # different Pauli operators. In this example, the Hamiltonian is expressed 75 # by only two single-qubit Pauli operators, namely the X and Y operator 76 # applied to the first qubit. 77 # 78 # Since these operators will be measured on the same wire, we will need to 79 # create two quantum nodes (one for each operator whose expectation value 80 # we measure), but we can reuse the same device. 81 # 82 # .. note:: 83 # 84 # If the Pauli observables were evaluated on different wires, we 85 # could use one quantum node and return a tuple of expectations in only 86 # one quantum node: 87 # ``return qml.expectation.PauliX(0), qml.expectation.PauliY(1)`` 88 89 90 @qml.qnode(dev) 91 def circuit_X(var): 92 ansatz(var) 93 return qml.expval(qml.PauliX(1)) 94 95 96 @qml.qnode(dev) 97 def circuit_Y(var): 98 ansatz(var) 99 return qml.expval(qml.PauliY(1)) 100 101 102 ############################################################################## 103 # Objective 104 # ~~~~~~~~~ 105 106 # The cost function to be optimized in VQE is simply a linear combination 107 # of the expectations, which defines the expectation of the Hamiltonian we 108 # are interested in. In our case, we square this cost function to provide 109 # a more interesting landscape with the same minima. 110 111 112 def cost(var): 113 expX = circuit_X(var) 114 expY = circuit_Y(var) 115 return (0.1 * expX + 0.5 * expY) ** 2 116 117 118 ############################################################################## 119 # This cost defines the following landscape: 120 # 121 # *Note: To run the following cell you need the matplotlib library.* 122 123 import matplotlib.pyplot as plt 124 from mpl_toolkits.mplot3d import Axes3D 125 from matplotlib import cm 126 from matplotlib.ticker import MaxNLocator 127 128 fig = plt.figure(figsize=(6, 4)) 129 ax = fig.gca(projection="3d") 130 131 X = np.linspace(-3.0, 3.0, 20) 132 Y = np.linspace(-3.0, 3.0, 20) 133 xx, yy = np.meshgrid(X, Y) 134 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X)) 135 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False) 136 137 ax.set_xlabel("v1") 138 ax.set_ylabel("v2") 139 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower")) 140 141 plt.show() 142 143 ############################################################################## 144 # Optimization 145 # ~~~~~~~~~~~~ 146 # 147 # We create a GradientDescentOptimizer and use it to optimize the cost 148 # function. 149 150 opt = GradientDescentOptimizer(0.5) 151 152 var = [0.3, 2.5] 153 var_gd = [var] 154 for it in range(20): 155 var = opt.step(cost, var) 156 var_gd.append(var) 157 158 print( 159 "Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]".format( 160 it + 1, cost(var), var[0], var[1] 161 ) 162 ) 163 164 ############################################################################## 165 # We can plot the path that the variables took during gradient descent. To 166 # make the plot more clear, we will shorten the range for :math:`v_2`. 167 168 fig = plt.figure(figsize=(6, 4)) 169 ax = fig.gca(projection="3d") 170 171 X = np.linspace(-3, np.pi / 2, 20) 172 Y = np.linspace(-3, 3, 20) 173 xx, yy = np.meshgrid(X, Y) 174 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X)) 175 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False) 176 177 path_z = [cost(var) + 1e-8 for var in var_gd] 178 path_x = [v[0] for v in var_gd] 179 path_y = [v[1] for v in var_gd] 180 ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc") 181 182 ax.set_xlabel("v1") 183 ax.set_ylabel("v2") 184 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower")) 185 186 plt.legend() 187 plt.show() 188 189 190 ############################################################################## 191 # 2. Optimizing the Hamiltonian coefficients 192 # ------------------------------------------ 193 # 194 # Instead of optimizing the circuit parameters, we can also use a fixed 195 # circuit, 196 197 198 def ansatz(): 199 qml.Rot(0.3, 1.8, 5.4, wires=1) 200 qml.RX(-0.5, wires=0) 201 qml.RY(0.5, wires=1) 202 qml.CNOT(wires=[0, 1]) 203 204 205 @qml.qnode(dev) 206 def circuit_X(): 207 ansatz() 208 return qml.expval(qml.PauliX(1)) 209 210 211 @qml.qnode(dev) 212 def circuit_Y(): 213 ansatz() 214 return qml.expval(qml.PauliY(1)) 215 216 217 ############################################################################## 218 # and make the classical coefficients that appear in the Hamiltonian the 219 # trainable variables. 220 221 222 def cost(var): 223 expX = circuit_X() 224 expY = circuit_Y() 225 return (var[0] * expX + var[1] * expY) ** 2 226 227 228 opt = GradientDescentOptimizer(0.5) 229 230 var = [0.3, 2.5] 231 var_gd = [var] 232 for it in range(20): 233 var = opt.step(cost, var) 234 var_gd.append(var) 235 236 print( 237 "Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]".format( 238 it + 1, cost(var), var[0], var[1] 239 ) 240 ) 241 242 ############################################################################## 243 # The landscape has a quadratic shape. 244 245 fig = plt.figure(figsize=(6, 4)) 246 ax = fig.gca(projection="3d") 247 248 X = np.linspace(-3, np.pi / 2, 20) 249 Y = np.linspace(-3, 3, 20) 250 xx, yy = np.meshgrid(X, Y) 251 Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X)) 252 surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False) 253 254 path_z = [cost(var) + 1e-8 for var in var_gd] 255 path_x = [v[0] for v in var_gd] 256 path_y = [v[1] for v in var_gd] 257 ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc") 258 259 ax.set_xlabel("v1") 260 ax.set_ylabel("v2") 261 ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower")) 262 263 plt.legend() 264 plt.show() 265 266 267 ############################################################################## 268 # 3. Optimizing classical and quantum parameters 269 # ---------------------------------------------- 270 271 # Finally, we can optimize *classical* and *quantum* weights together by 272 # combining the two approaches from above. 273 274 275 def ansatz(var): 276 277 qml.Rot(0.3, 1.8, 5.4, wires=1) 278 qml.RX(var[0], wires=0) 279 qml.RY(var[1], wires=1) 280 qml.CNOT(wires=[0, 1]) 281 282 283 @qml.qnode(dev) 284 def circuit_X(var): 285 ansatz(var) 286 return qml.expval(qml.PauliX(1)) 287 288 289 @qml.qnode(dev) 290 def circuit_Y(var): 291 ansatz(var) 292 return qml.expval(qml.PauliY(1)) 293 294 295 def cost(var): 296 297 expX = circuit_X(var) 298 expY = circuit_Y(var) 299 300 return (var[2] * expX + var[3] * expY) ** 2 301 302 303 opt = GradientDescentOptimizer(0.5) 304 var = [0.3, 2.5, 0.3, 2.5] 305 306 for it in range(10): 307 var = opt.step(cost, var) 308 print("Cost after step {:5d}: {: 0.7f}".format(it + 1, cost(var))) 309 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/pennylane_run_variational_quantum_eigensolver.py b/examples/pennylane_run_variational_quantum_eigensolver.py --- a/examples/pennylane_run_variational_quantum_eigensolver.py +++ b/examples/pennylane_run_variational_quantum_eigensolver.py @@ -172,12 +172,13 @@ Y = np.linspace(-3, 3, 20) xx, yy = np.meshgrid(X, Y) Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X)) + surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False) path_z = [cost(var) + 1e-8 for var in var_gd] path_x = [v[0] for v in var_gd] path_y = [v[1] for v in var_gd] -ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc") +ax.plot(path_x, path_y, path_z, c="green", marker=".", label="graddesc", zorder=10) ax.set_xlabel("v1") ax.set_ylabel("v2") @@ -254,7 +255,7 @@ path_z = [cost(var) + 1e-8 for var in var_gd] path_x = [v[0] for v in var_gd] path_y = [v[1] for v in var_gd] -ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc") +ax.plot(path_x, path_y, path_z, c="pink", marker=".", label="graddesc", zorder=10) ax.set_xlabel("v1") ax.set_ylabel("v2")
{"golden_diff": "diff --git a/examples/pennylane_run_variational_quantum_eigensolver.py b/examples/pennylane_run_variational_quantum_eigensolver.py\n--- a/examples/pennylane_run_variational_quantum_eigensolver.py\n+++ b/examples/pennylane_run_variational_quantum_eigensolver.py\n@@ -172,12 +172,13 @@\n Y = np.linspace(-3, 3, 20)\n xx, yy = np.meshgrid(X, Y)\n Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\n+\n surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n \n path_z = [cost(var) + 1e-8 for var in var_gd]\n path_x = [v[0] for v in var_gd]\n path_y = [v[1] for v in var_gd]\n-ax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\")\n+ax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\", zorder=10)\n \n ax.set_xlabel(\"v1\")\n ax.set_ylabel(\"v2\")\n@@ -254,7 +255,7 @@\n path_z = [cost(var) + 1e-8 for var in var_gd]\n path_x = [v[0] for v in var_gd]\n path_y = [v[1] for v in var_gd]\n-ax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\")\n+ax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\", zorder=10)\n \n ax.set_xlabel(\"v1\")\n ax.set_ylabel(\"v2\")\n", "issue": "Missing plot lines in VQE tutorial \n#### Issue description\r\n\r\nThe second and third plots in the VQE tutorial do not show the plot line of the gradient descent. The legends are shown, however. \r\n\r\nIs this problem coming from making this file a \"run\" file? \r\n\r\nhttps://pennylane.readthedocs.io/en/latest/tutorials/pennylane_run_variational_quantum_eigensolver.html#vqe\r\n\r\n* *Expected behavior:*\r\n\r\nThe plots should show the gradient descent on top of the optimization landscape. \r\n\r\n\n", "before_files": [{"content": "r\"\"\"\n.. _vqe:\n\nVariational quantum eigensolver\n===============================\n\nThis example demonstrates the principle of a variational quantum\neigensolver (VQE), originally proposed in `Peruzzo et al.\n(2014) <https://www.nature.com/articles/ncomms5213>`__. To showcase the\nhybrid computational capabilities of PennyLane, we first train a quantum\ncircuit to minimize the squared energy expectation for a Hamiltonian\n:math:`H`,\n\n.. math::\n\n \\langle \\psi_v | H | \\psi_v \\rangle^2 =( 0.1 \\langle \\psi_{v} | X_2 |\n \\psi_v \\rangle + 0.5 \\langle \\psi_v | Y_2 | \\psi_v \\rangle )^2.\n\nHere, :math:`|\\psi_v\\rangle` is the state\nobtained after applying a quantum circuit to an initial state\n:math:`|0\\rangle`. The quantum circuit depends on trainable variables\n:math:`v = \\{v_1, v_2\\}`, and :math:`X_2`, :math:`Y_2` denote the\nPauli-X and Pauli-Y operator acting on the second qubit (*Note: We apply\nthe square to make the optimization landscapes more interesting, but in\ncommon applications the cost is directly the energy expectation value*).\n\nAfter doing this, we will then turn things around and use a fixed\nquantum circuit to prepare a state :math:`|\\psi\\rangle`, but train the coefficients of\nthe Hamiltonian to minimize\n\n.. math::\n\n \\langle \\psi | H | \\psi \\rangle^2 = (v_1 \\langle \\psi | X_2 | \\psi\n \\rangle + v_2 \\langle \\psi | Y_2 | \\psi \\rangle )^2 .\n\"\"\"\n\n##############################################################################\n# 1. Optimizing the quantum circuit\n# ---------------------------------\n#\n# Imports\n# ~~~~~~~\n#\n# We begin by importing PennyLane, the PennyLane-wrapped version of NumPy,\n# and the GradientDescentOptimizer.\n\nimport pennylane as qml\nfrom pennylane import numpy as np\nfrom pennylane.optimize import GradientDescentOptimizer\n\n##############################################################################\n# We use the default qubit simulator as a device.\n\ndev = qml.device(\"default.qubit\", wires=2)\n\n##############################################################################\n# Quantum nodes\n# ~~~~~~~~~~~~~\n#\n# The quantum circuit of the variational eigensolver is an ansatz that\n# defines a manifold of possible quantum states. We use a Hadamard, two\n# rotations and a CNOT gate to construct our circuit.\n\n\ndef ansatz(var):\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\n##############################################################################\n# A variational eigensolver requires us to evaluate expectations of\n# different Pauli operators. In this example, the Hamiltonian is expressed\n# by only two single-qubit Pauli operators, namely the X and Y operator\n# applied to the first qubit.\n#\n# Since these operators will be measured on the same wire, we will need to\n# create two quantum nodes (one for each operator whose expectation value\n# we measure), but we can reuse the same device.\n#\n# .. note::\n#\n# If the Pauli observables were evaluated on different wires, we\n# could use one quantum node and return a tuple of expectations in only\n# one quantum node:\n# ``return qml.expectation.PauliX(0), qml.expectation.PauliY(1)``\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# Objective\n# ~~~~~~~~~\n\n# The cost function to be optimized in VQE is simply a linear combination\n# of the expectations, which defines the expectation of the Hamiltonian we\n# are interested in. In our case, we square this cost function to provide\n# a more interesting landscape with the same minima.\n\n\ndef cost(var):\n expX = circuit_X(var)\n expY = circuit_Y(var)\n return (0.1 * expX + 0.5 * expY) ** 2\n\n\n##############################################################################\n# This cost defines the following landscape:\n#\n# *Note: To run the following cell you need the matplotlib library.*\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import MaxNLocator\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3.0, 3.0, 20)\nY = np.linspace(-3.0, 3.0, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.show()\n\n##############################################################################\n# Optimization\n# ~~~~~~~~~~~~\n#\n# We create a GradientDescentOptimizer and use it to optimize the cost\n# function.\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# We can plot the path that the variables took during gradient descent. To\n# make the plot more clear, we will shorten the range for :math:`v_2`.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\")\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 2. Optimizing the Hamiltonian coefficients\n# ------------------------------------------\n#\n# Instead of optimizing the circuit parameters, we can also use a fixed\n# circuit,\n\n\ndef ansatz():\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(-0.5, wires=0)\n qml.RY(0.5, wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X():\n ansatz()\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y():\n ansatz()\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# and make the classical coefficients that appear in the Hamiltonian the\n# trainable variables.\n\n\ndef cost(var):\n expX = circuit_X()\n expY = circuit_Y()\n return (var[0] * expX + var[1] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# The landscape has a quadratic shape.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\")\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 3. Optimizing classical and quantum parameters\n# ----------------------------------------------\n\n# Finally, we can optimize *classical* and *quantum* weights together by\n# combining the two approaches from above.\n\n\ndef ansatz(var):\n\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\ndef cost(var):\n\n expX = circuit_X(var)\n expY = circuit_Y(var)\n\n return (var[2] * expX + var[3] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\nvar = [0.3, 2.5, 0.3, 2.5]\n\nfor it in range(10):\n var = opt.step(cost, var)\n print(\"Cost after step {:5d}: {: 0.7f}\".format(it + 1, cost(var)))\n", "path": "examples/pennylane_run_variational_quantum_eigensolver.py"}], "after_files": [{"content": "r\"\"\"\n.. _vqe:\n\nVariational quantum eigensolver\n===============================\n\nThis example demonstrates the principle of a variational quantum\neigensolver (VQE), originally proposed in `Peruzzo et al.\n(2014) <https://www.nature.com/articles/ncomms5213>`__. To showcase the\nhybrid computational capabilities of PennyLane, we first train a quantum\ncircuit to minimize the squared energy expectation for a Hamiltonian\n:math:`H`,\n\n.. math::\n\n \\langle \\psi_v | H | \\psi_v \\rangle^2 =( 0.1 \\langle \\psi_{v} | X_2 |\n \\psi_v \\rangle + 0.5 \\langle \\psi_v | Y_2 | \\psi_v \\rangle )^2.\n\nHere, :math:`|\\psi_v\\rangle` is the state\nobtained after applying a quantum circuit to an initial state\n:math:`|0\\rangle`. The quantum circuit depends on trainable variables\n:math:`v = \\{v_1, v_2\\}`, and :math:`X_2`, :math:`Y_2` denote the\nPauli-X and Pauli-Y operator acting on the second qubit (*Note: We apply\nthe square to make the optimization landscapes more interesting, but in\ncommon applications the cost is directly the energy expectation value*).\n\nAfter doing this, we will then turn things around and use a fixed\nquantum circuit to prepare a state :math:`|\\psi\\rangle`, but train the coefficients of\nthe Hamiltonian to minimize\n\n.. math::\n\n \\langle \\psi | H | \\psi \\rangle^2 = (v_1 \\langle \\psi | X_2 | \\psi\n \\rangle + v_2 \\langle \\psi | Y_2 | \\psi \\rangle )^2 .\n\"\"\"\n\n##############################################################################\n# 1. Optimizing the quantum circuit\n# ---------------------------------\n#\n# Imports\n# ~~~~~~~\n#\n# We begin by importing PennyLane, the PennyLane-wrapped version of NumPy,\n# and the GradientDescentOptimizer.\n\nimport pennylane as qml\nfrom pennylane import numpy as np\nfrom pennylane.optimize import GradientDescentOptimizer\n\n##############################################################################\n# We use the default qubit simulator as a device.\n\ndev = qml.device(\"default.qubit\", wires=2)\n\n##############################################################################\n# Quantum nodes\n# ~~~~~~~~~~~~~\n#\n# The quantum circuit of the variational eigensolver is an ansatz that\n# defines a manifold of possible quantum states. We use a Hadamard, two\n# rotations and a CNOT gate to construct our circuit.\n\n\ndef ansatz(var):\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\n##############################################################################\n# A variational eigensolver requires us to evaluate expectations of\n# different Pauli operators. In this example, the Hamiltonian is expressed\n# by only two single-qubit Pauli operators, namely the X and Y operator\n# applied to the first qubit.\n#\n# Since these operators will be measured on the same wire, we will need to\n# create two quantum nodes (one for each operator whose expectation value\n# we measure), but we can reuse the same device.\n#\n# .. note::\n#\n# If the Pauli observables were evaluated on different wires, we\n# could use one quantum node and return a tuple of expectations in only\n# one quantum node:\n# ``return qml.expectation.PauliX(0), qml.expectation.PauliY(1)``\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# Objective\n# ~~~~~~~~~\n\n# The cost function to be optimized in VQE is simply a linear combination\n# of the expectations, which defines the expectation of the Hamiltonian we\n# are interested in. In our case, we square this cost function to provide\n# a more interesting landscape with the same minima.\n\n\ndef cost(var):\n expX = circuit_X(var)\n expY = circuit_Y(var)\n return (0.1 * expX + 0.5 * expY) ** 2\n\n\n##############################################################################\n# This cost defines the following landscape:\n#\n# *Note: To run the following cell you need the matplotlib library.*\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import MaxNLocator\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3.0, 3.0, 20)\nY = np.linspace(-3.0, 3.0, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.show()\n\n##############################################################################\n# Optimization\n# ~~~~~~~~~~~~\n#\n# We create a GradientDescentOptimizer and use it to optimize the cost\n# function.\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# We can plot the path that the variables took during gradient descent. To\n# make the plot more clear, we will shorten the range for :math:`v_2`.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\n\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"green\", marker=\".\", label=\"graddesc\", zorder=10)\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 2. Optimizing the Hamiltonian coefficients\n# ------------------------------------------\n#\n# Instead of optimizing the circuit parameters, we can also use a fixed\n# circuit,\n\n\ndef ansatz():\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(-0.5, wires=0)\n qml.RY(0.5, wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X():\n ansatz()\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y():\n ansatz()\n return qml.expval(qml.PauliY(1))\n\n\n##############################################################################\n# and make the classical coefficients that appear in the Hamiltonian the\n# trainable variables.\n\n\ndef cost(var):\n expX = circuit_X()\n expY = circuit_Y()\n return (var[0] * expX + var[1] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\n\nvar = [0.3, 2.5]\nvar_gd = [var]\nfor it in range(20):\n var = opt.step(cost, var)\n var_gd.append(var)\n\n print(\n \"Cost after step {:5d}: {: .7f} | Variables: [{: .5f},{: .5f}]\".format(\n it + 1, cost(var), var[0], var[1]\n )\n )\n\n##############################################################################\n# The landscape has a quadratic shape.\n\nfig = plt.figure(figsize=(6, 4))\nax = fig.gca(projection=\"3d\")\n\nX = np.linspace(-3, np.pi / 2, 20)\nY = np.linspace(-3, 3, 20)\nxx, yy = np.meshgrid(X, Y)\nZ = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))\nsurf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)\n\npath_z = [cost(var) + 1e-8 for var in var_gd]\npath_x = [v[0] for v in var_gd]\npath_y = [v[1] for v in var_gd]\nax.plot(path_x, path_y, path_z, c=\"pink\", marker=\".\", label=\"graddesc\", zorder=10)\n\nax.set_xlabel(\"v1\")\nax.set_ylabel(\"v2\")\nax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune=\"lower\"))\n\nplt.legend()\nplt.show()\n\n\n##############################################################################\n# 3. Optimizing classical and quantum parameters\n# ----------------------------------------------\n\n# Finally, we can optimize *classical* and *quantum* weights together by\n# combining the two approaches from above.\n\n\ndef ansatz(var):\n\n qml.Rot(0.3, 1.8, 5.4, wires=1)\n qml.RX(var[0], wires=0)\n qml.RY(var[1], wires=1)\n qml.CNOT(wires=[0, 1])\n\n\[email protected](dev)\ndef circuit_X(var):\n ansatz(var)\n return qml.expval(qml.PauliX(1))\n\n\[email protected](dev)\ndef circuit_Y(var):\n ansatz(var)\n return qml.expval(qml.PauliY(1))\n\n\ndef cost(var):\n\n expX = circuit_X(var)\n expY = circuit_Y(var)\n\n return (var[2] * expX + var[3] * expY) ** 2\n\n\nopt = GradientDescentOptimizer(0.5)\nvar = [0.3, 2.5, 0.3, 2.5]\n\nfor it in range(10):\n var = opt.step(cost, var)\n print(\"Cost after step {:5d}: {: 0.7f}\".format(it + 1, cost(var)))\n", "path": "examples/pennylane_run_variational_quantum_eigensolver.py"}]}
3,728
426
gh_patches_debug_4029
rasdani/github-patches
git_diff
saleor__saleor-723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add checkout steps navigation Now there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/order/views.py` Content: ``` 1 import logging 2 3 from django.conf import settings 4 from django.contrib import messages, auth 5 from django.db import transaction 6 from django.http import Http404, HttpResponseForbidden 7 from django.shortcuts import get_object_or_404, redirect 8 from django.utils.translation import ugettext as _ 9 from django.template.response import TemplateResponse 10 from payments import RedirectNeeded 11 12 from .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm 13 from .models import Order, Payment 14 from ..core.utils import get_client_ip 15 from .utils import check_order_status 16 17 logger = logging.getLogger(__name__) 18 19 20 def details(request, token): 21 orders = Order.objects.prefetch_related('groups__items') 22 order = get_object_or_404(orders, token=token) 23 groups = order.groups.all() 24 return TemplateResponse(request, 'order/details.html', 25 {'order': order, 'groups': groups}) 26 27 28 def payment(request, token): 29 orders = Order.objects.prefetch_related('groups__items') 30 order = get_object_or_404(orders, token=token) 31 groups = order.groups.all() 32 payments = order.payments.all() 33 form_data = request.POST or None 34 try: 35 waiting_payment = order.payments.get(status='waiting') 36 except Payment.DoesNotExist: 37 waiting_payment = None 38 waiting_payment_form = None 39 else: 40 form_data = None 41 waiting_payment_form = PaymentDeleteForm( 42 None, order=order, initial={'payment_id': waiting_payment.id}) 43 if order.is_fully_paid(): 44 form_data = None 45 payment_form = None 46 if not order.is_pre_authorized(): 47 payment_form = PaymentMethodsForm(form_data) 48 # FIXME: redirect if there is only one payment method 49 if payment_form.is_valid(): 50 payment_method = payment_form.cleaned_data['method'] 51 return redirect('order:payment', token=order.token, 52 variant=payment_method) 53 return TemplateResponse(request, 'order/payment.html', 54 {'order': order, 'groups': groups, 55 'payment_form': payment_form, 56 'waiting_payment': waiting_payment, 57 'waiting_payment_form': waiting_payment_form, 58 'payments': payments}) 59 60 61 @check_order_status 62 def start_payment(request, order, variant): 63 waiting_payments = order.payments.filter(status='waiting').exists() 64 if waiting_payments: 65 return redirect('order:payment', token=order.token) 66 billing = order.billing_address 67 total = order.get_total() 68 defaults = {'total': total.gross, 69 'tax': total.tax, 'currency': total.currency, 70 'delivery': order.get_delivery_total().gross, 71 'billing_first_name': billing.first_name, 72 'billing_last_name': billing.last_name, 73 'billing_address_1': billing.street_address_1, 74 'billing_address_2': billing.street_address_2, 75 'billing_city': billing.city, 76 'billing_postcode': billing.postal_code, 77 'billing_country_code': billing.country, 78 'billing_email': order.user_email, 79 'description': _('Order %(order_number)s') % { 80 'order_number': order}, 81 'billing_country_area': billing.country_area, 82 'customer_ip_address': get_client_ip(request)} 83 variant_choices = settings.CHECKOUT_PAYMENT_CHOICES 84 if variant not in [code for code, dummy_name in variant_choices]: 85 raise Http404('%r is not a valid payment variant' % (variant,)) 86 with transaction.atomic(): 87 order.change_status('payment-pending') 88 payment, dummy_created = Payment.objects.get_or_create( 89 variant=variant, status='waiting', order=order, defaults=defaults) 90 try: 91 form = payment.get_form(data=request.POST or None) 92 except RedirectNeeded as redirect_to: 93 return redirect(str(redirect_to)) 94 except Exception: 95 logger.exception('Error communicating with the payment gateway') 96 messages.error( 97 request, 98 _('Oops, it looks like we were unable to contact the selected' 99 ' payment service')) 100 payment.change_status('error') 101 return redirect('order:payment', token=order.token) 102 template = 'order/payment/%s.html' % variant 103 return TemplateResponse(request, [template, 'order/payment/default.html'], 104 {'form': form, 'payment': payment}) 105 106 107 @check_order_status 108 def cancel_payment(request, order): 109 form = PaymentDeleteForm(request.POST or None, order=order) 110 if form.is_valid(): 111 with transaction.atomic(): 112 form.save() 113 return redirect('order:payment', token=order.token) 114 return HttpResponseForbidden() 115 116 117 def create_password(request, token): 118 if request.user.is_authenticated(): 119 return redirect('order:details', token=token) 120 order = get_object_or_404(Order, token=token) 121 email = order.user_email 122 form_data = request.POST.copy() 123 if form_data: 124 form_data.update({'email': email}) 125 form = PasswordForm(form_data or None) 126 127 if form.is_valid(): 128 user = form.save(request) 129 order.user = user 130 order.save(update_fields=['user']) 131 password = form_data.get('password1') 132 auth_user = auth.authenticate(email=email, password=password) 133 if auth_user is not None: 134 auth.login(request, auth_user) 135 return redirect('order:details', token=token) 136 ctx = {'form': form, 'email': email} 137 return TemplateResponse(request, 'order/create_password.html', ctx) 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/order/views.py b/saleor/order/views.py --- a/saleor/order/views.py +++ b/saleor/order/views.py @@ -133,5 +133,5 @@ if auth_user is not None: auth.login(request, auth_user) return redirect('order:details', token=token) - ctx = {'form': form, 'email': email} + ctx = {'form': form, 'email': email, 'order': order} return TemplateResponse(request, 'order/create_password.html', ctx)
{"golden_diff": "diff --git a/saleor/order/views.py b/saleor/order/views.py\n--- a/saleor/order/views.py\n+++ b/saleor/order/views.py\n@@ -133,5 +133,5 @@\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n- ctx = {'form': form, 'email': email}\n+ ctx = {'form': form, 'email': email, 'order': order}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "issue": "Add checkout steps navigation\nNow there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.template.response import TemplateResponse\nfrom payments import RedirectNeeded\n\nfrom .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm\nfrom .models import Order, Payment\nfrom ..core.utils import get_client_ip\nfrom .utils import check_order_status\n\nlogger = logging.getLogger(__name__)\n\n\ndef details(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n return TemplateResponse(request, 'order/details.html',\n {'order': order, 'groups': groups})\n\n\ndef payment(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n payments = order.payments.all()\n form_data = request.POST or None\n try:\n waiting_payment = order.payments.get(status='waiting')\n except Payment.DoesNotExist:\n waiting_payment = None\n waiting_payment_form = None\n else:\n form_data = None\n waiting_payment_form = PaymentDeleteForm(\n None, order=order, initial={'payment_id': waiting_payment.id})\n if order.is_fully_paid():\n form_data = None\n payment_form = None\n if not order.is_pre_authorized():\n payment_form = PaymentMethodsForm(form_data)\n # FIXME: redirect if there is only one payment method\n if payment_form.is_valid():\n payment_method = payment_form.cleaned_data['method']\n return redirect('order:payment', token=order.token,\n variant=payment_method)\n return TemplateResponse(request, 'order/payment.html',\n {'order': order, 'groups': groups,\n 'payment_form': payment_form,\n 'waiting_payment': waiting_payment,\n 'waiting_payment_form': waiting_payment_form,\n 'payments': payments})\n\n\n@check_order_status\ndef start_payment(request, order, variant):\n waiting_payments = order.payments.filter(status='waiting').exists()\n if waiting_payments:\n return redirect('order:payment', token=order.token)\n billing = order.billing_address\n total = order.get_total()\n defaults = {'total': total.gross,\n 'tax': total.tax, 'currency': total.currency,\n 'delivery': order.get_delivery_total().gross,\n 'billing_first_name': billing.first_name,\n 'billing_last_name': billing.last_name,\n 'billing_address_1': billing.street_address_1,\n 'billing_address_2': billing.street_address_2,\n 'billing_city': billing.city,\n 'billing_postcode': billing.postal_code,\n 'billing_country_code': billing.country,\n 'billing_email': order.user_email,\n 'description': _('Order %(order_number)s') % {\n 'order_number': order},\n 'billing_country_area': billing.country_area,\n 'customer_ip_address': get_client_ip(request)}\n variant_choices = settings.CHECKOUT_PAYMENT_CHOICES\n if variant not in [code for code, dummy_name in variant_choices]:\n raise Http404('%r is not a valid payment variant' % (variant,))\n with transaction.atomic():\n order.change_status('payment-pending')\n payment, dummy_created = Payment.objects.get_or_create(\n variant=variant, status='waiting', order=order, defaults=defaults)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n return redirect(str(redirect_to))\n except Exception:\n logger.exception('Error communicating with the payment gateway')\n messages.error(\n request,\n _('Oops, it looks like we were unable to contact the selected'\n ' payment service'))\n payment.change_status('error')\n return redirect('order:payment', token=order.token)\n template = 'order/payment/%s.html' % variant\n return TemplateResponse(request, [template, 'order/payment/default.html'],\n {'form': form, 'payment': payment})\n\n\n@check_order_status\ndef cancel_payment(request, order):\n form = PaymentDeleteForm(request.POST or None, order=order)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('order:payment', token=order.token)\n return HttpResponseForbidden()\n\n\ndef create_password(request, token):\n if request.user.is_authenticated():\n return redirect('order:details', token=token)\n order = get_object_or_404(Order, token=token)\n email = order.user_email\n form_data = request.POST.copy()\n if form_data:\n form_data.update({'email': email})\n form = PasswordForm(form_data or None)\n\n if form.is_valid():\n user = form.save(request)\n order.user = user\n order.save(update_fields=['user'])\n password = form_data.get('password1')\n auth_user = auth.authenticate(email=email, password=password)\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n ctx = {'form': form, 'email': email}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "path": "saleor/order/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.template.response import TemplateResponse\nfrom payments import RedirectNeeded\n\nfrom .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm\nfrom .models import Order, Payment\nfrom ..core.utils import get_client_ip\nfrom .utils import check_order_status\n\nlogger = logging.getLogger(__name__)\n\n\ndef details(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n return TemplateResponse(request, 'order/details.html',\n {'order': order, 'groups': groups})\n\n\ndef payment(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n payments = order.payments.all()\n form_data = request.POST or None\n try:\n waiting_payment = order.payments.get(status='waiting')\n except Payment.DoesNotExist:\n waiting_payment = None\n waiting_payment_form = None\n else:\n form_data = None\n waiting_payment_form = PaymentDeleteForm(\n None, order=order, initial={'payment_id': waiting_payment.id})\n if order.is_fully_paid():\n form_data = None\n payment_form = None\n if not order.is_pre_authorized():\n payment_form = PaymentMethodsForm(form_data)\n # FIXME: redirect if there is only one payment method\n if payment_form.is_valid():\n payment_method = payment_form.cleaned_data['method']\n return redirect('order:payment', token=order.token,\n variant=payment_method)\n return TemplateResponse(request, 'order/payment.html',\n {'order': order, 'groups': groups,\n 'payment_form': payment_form,\n 'waiting_payment': waiting_payment,\n 'waiting_payment_form': waiting_payment_form,\n 'payments': payments})\n\n\n@check_order_status\ndef start_payment(request, order, variant):\n waiting_payments = order.payments.filter(status='waiting').exists()\n if waiting_payments:\n return redirect('order:payment', token=order.token)\n billing = order.billing_address\n total = order.get_total()\n defaults = {'total': total.gross,\n 'tax': total.tax, 'currency': total.currency,\n 'delivery': order.get_delivery_total().gross,\n 'billing_first_name': billing.first_name,\n 'billing_last_name': billing.last_name,\n 'billing_address_1': billing.street_address_1,\n 'billing_address_2': billing.street_address_2,\n 'billing_city': billing.city,\n 'billing_postcode': billing.postal_code,\n 'billing_country_code': billing.country,\n 'billing_email': order.user_email,\n 'description': _('Order %(order_number)s') % {\n 'order_number': order},\n 'billing_country_area': billing.country_area,\n 'customer_ip_address': get_client_ip(request)}\n variant_choices = settings.CHECKOUT_PAYMENT_CHOICES\n if variant not in [code for code, dummy_name in variant_choices]:\n raise Http404('%r is not a valid payment variant' % (variant,))\n with transaction.atomic():\n order.change_status('payment-pending')\n payment, dummy_created = Payment.objects.get_or_create(\n variant=variant, status='waiting', order=order, defaults=defaults)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n return redirect(str(redirect_to))\n except Exception:\n logger.exception('Error communicating with the payment gateway')\n messages.error(\n request,\n _('Oops, it looks like we were unable to contact the selected'\n ' payment service'))\n payment.change_status('error')\n return redirect('order:payment', token=order.token)\n template = 'order/payment/%s.html' % variant\n return TemplateResponse(request, [template, 'order/payment/default.html'],\n {'form': form, 'payment': payment})\n\n\n@check_order_status\ndef cancel_payment(request, order):\n form = PaymentDeleteForm(request.POST or None, order=order)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('order:payment', token=order.token)\n return HttpResponseForbidden()\n\n\ndef create_password(request, token):\n if request.user.is_authenticated():\n return redirect('order:details', token=token)\n order = get_object_or_404(Order, token=token)\n email = order.user_email\n form_data = request.POST.copy()\n if form_data:\n form_data.update({'email': email})\n form = PasswordForm(form_data or None)\n\n if form.is_valid():\n user = form.save(request)\n order.user = user\n order.save(update_fields=['user'])\n password = form_data.get('password1')\n auth_user = auth.authenticate(email=email, password=password)\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n ctx = {'form': form, 'email': email, 'order': order}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "path": "saleor/order/views.py"}]}
1,750
125
gh_patches_debug_5719
rasdani/github-patches
git_diff
NVIDIA-Merlin__NVTabular-1587
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] configure_tensorflow raises TypeError: expected string or bytes-like object in Horovod **Describe the bug** While trying to integrate Horovod's KerasEstimator w/ NVTabular, I'm seeing this error: ``` [1,0]<stderr>:/nvtabular/nvtabular/loader/tf_utils.py:53: UserWarning: TensorFlow runtime already initialized, may not be enough memory for cudf [1,0]<stderr>: warnings.warn( [1,0]<stderr>:Traceback (most recent call last): [1,0]<stderr>: File "/nvtabular/nvtabular/loader/tf_utils.py", line 57, in configure_tensorflow [1,0]<stderr>: tf.config.experimental.set_virtual_device_configuration( [1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/framework/config.py", line 871, in set_logical_device_configuration [1,0]<stderr>: context.context().set_logical_device_configuration(device, logical_devices) [1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/eager/context.py", line 1629, in set_logical_device_configuration [1,0]<stderr>: raise RuntimeError( [1,0]<stderr>:RuntimeError: Virtual devices cannot be modified after being initialized[1,0]<stderr>: [1,0]<stderr>: [1,0]<stderr>:During handling of the above exception, another exception occurred: [1,0]<stderr>: [1,0]<stderr>:Traceback (most recent call last): [1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/runpy.py", line 194, in _run_module_as_main [1,0]<stderr>: return _run_code(code, main_globals, None, [1,0]<stderr>: File "/opt/conda/envs/rapids/lib/python3.8/runpy.py", line 87, in _run_code [1,0]<stderr>: exec(code, run_globals) [1,0]<stderr>: File "/horovod/horovod/spark/task/mpirun_exec_fn.py", line 52, in <module> [1,0]<stderr>: main(codec.loads_base64(sys.argv[1]), codec.loads_base64(sys.argv[2])) [1,0]<stderr>: File "/horovod/horovod/spark/task/mpirun_exec_fn.py", line 45, in main [1,0]<stderr>: task_exec(driver_addresses, settings, 'OMPI_COMM_WORLD_RANK', 'OMPI_COMM_WORLD_LOCAL_RANK') [1,0]<stderr>: File "/horovod/horovod/spark/task/__init__.py", line 61, in task_exec [1,0]<stderr>: result = fn(*args, **kwargs) [1,0]<stderr>: File "/horovod/horovod/spark/keras/remote.py", line 261, in train [1,0]<stderr>: history = fit(model, dm, steps_per_epoch, [1,0]<stderr>: File "/horovod/horovod/spark/keras/util.py", line 41, in fn [1,0]<stderr>: train_data = data_module.train_data(train_reader) [1,0]<stderr>: File "/horovod/horovod/spark/keras/datamodule.py", line 110, in train_data [1,0]<stderr>: from nvtabular.loader.tensorflow import KerasSequenceLoader [1,0]<stderr>: File "/nvtabular/nvtabular/loader/tensorflow.py", line 28, in <module> [1,0]<stderr>: from_dlpack = configure_tensorflow() [1,0]<stderr>: File "/nvtabular/nvtabular/loader/tf_utils.py", line 63, in configure_tensorflow [1,0]<stderr>: warnings.warn(e) [1,0]<stderr>:TypeError: expected string or bytes-like object ``` This is resulting in a fatal error instead of a non-fatal warning. My current workaround is to just use `warnings.warn(str(e))` here, although I'm not sure why the `RuntimeError` isn't automatically cast to string (when it normally would cast to string). **Steps/Code to reproduce bug** Unfortunately, since this involves integration with horovod, so the setup to reproduce is non-trivial. **Expected behavior** A non-fatal warning, and continued execution. **Environment details (please complete the following information):** - Environment location: Docker on Linux with single GPU. - Method of NVTabular install: custom docker image based on `nvcr.io/nvidia/merlin/merlin-tensorflow-training:22.05` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nvtabular/loader/tf_utils.py` Content: ``` 1 # 2 # Copyright (c) 2021, NVIDIA CORPORATION. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 17 import os 18 import warnings 19 20 import tensorflow as tf 21 from packaging import version 22 from tensorflow.python.feature_column import feature_column_v2 as fc 23 24 from merlin.core.dispatch import HAS_GPU 25 from merlin.core.utils import device_mem_size 26 27 28 def configure_tensorflow(memory_allocation=None, device=None): 29 total_gpu_mem_mb = device_mem_size(kind="total", cpu=(not HAS_GPU)) / (1024 ** 2) 30 31 if memory_allocation is None: 32 memory_allocation = os.environ.get("TF_MEMORY_ALLOCATION", 0.5) 33 34 if float(memory_allocation) < 1: 35 memory_allocation = total_gpu_mem_mb * float(memory_allocation) 36 memory_allocation = int(memory_allocation) 37 assert memory_allocation < total_gpu_mem_mb 38 39 # TODO: what will this look like in any sort 40 # of distributed set up? 41 if device is None: 42 device = int(os.environ.get("TF_VISIBLE_DEVICE", 0)) 43 tf_devices = tf.config.list_physical_devices("GPU") 44 if HAS_GPU and len(tf_devices) == 0: 45 raise ImportError("TensorFlow is not configured for GPU") 46 if HAS_GPU: 47 try: 48 tf.config.set_logical_device_configuration( 49 tf_devices[device], 50 [tf.config.LogicalDeviceConfiguration(memory_limit=memory_allocation)], 51 ) 52 except RuntimeError: 53 warnings.warn( 54 "TensorFlow runtime already initialized, may not be enough memory for cudf" 55 ) 56 try: 57 tf.config.experimental.set_virtual_device_configuration( 58 tf_devices[device], 59 [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_allocation)], 60 ) 61 except RuntimeError as e: 62 # Virtual devices must be set before GPUs have been initialized 63 warnings.warn(e) 64 65 # versions using TF earlier than 2.3.0 need to use extension 66 # library for dlpack support to avoid memory leak issue 67 __TF_DLPACK_STABLE_VERSION = "2.3.0" 68 if version.parse(tf.__version__) < version.parse(__TF_DLPACK_STABLE_VERSION): 69 try: 70 from tfdlpack import from_dlpack 71 except ModuleNotFoundError as e: 72 message = "If using TensorFlow < 2.3.0, you must install tfdlpack-gpu extension library" 73 raise ModuleNotFoundError(message) from e 74 75 else: 76 from tensorflow.experimental.dlpack import from_dlpack 77 78 return from_dlpack 79 80 81 def _get_parents(column): 82 """ 83 recursive function for finding the feature columns 84 that supply inputs for a given `column`. If there are 85 none, returns the column. Uses sets so is not 86 deterministic. 87 """ 88 if isinstance(column.parents[0], str): 89 return set([column]) 90 parents = set() 91 for parent in column.parents: 92 parents |= _get_parents(parent) 93 return parents 94 95 96 def get_dataset_schema_from_feature_columns(feature_columns): 97 """ 98 maps from a list of TensorFlow `feature_column`s to 99 lists giving the categorical and continuous feature 100 names for a dataset. Useful for constructing NVTabular 101 Workflows from feature columns 102 """ 103 base_columns = set() 104 for column in feature_columns: 105 base_columns |= _get_parents(column) 106 107 cat_names, cont_names = [], [] 108 for column in base_columns: 109 if isinstance(column, fc.CategoricalColumn): 110 cat_names.append(column.name) 111 else: 112 cont_names.append(column.name) 113 114 cat_names = sorted(cat_names) 115 cont_names = sorted(cont_names) 116 return cat_names, cont_names 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nvtabular/loader/tf_utils.py b/nvtabular/loader/tf_utils.py --- a/nvtabular/loader/tf_utils.py +++ b/nvtabular/loader/tf_utils.py @@ -60,7 +60,7 @@ ) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized - warnings.warn(e) + warnings.warn(str(e)) # versions using TF earlier than 2.3.0 need to use extension # library for dlpack support to avoid memory leak issue
{"golden_diff": "diff --git a/nvtabular/loader/tf_utils.py b/nvtabular/loader/tf_utils.py\n--- a/nvtabular/loader/tf_utils.py\n+++ b/nvtabular/loader/tf_utils.py\n@@ -60,7 +60,7 @@\n )\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n- warnings.warn(e)\n+ warnings.warn(str(e))\n \n # versions using TF earlier than 2.3.0 need to use extension\n # library for dlpack support to avoid memory leak issue\n", "issue": "[BUG] configure_tensorflow raises TypeError: expected string or bytes-like object in Horovod\n**Describe the bug**\r\nWhile trying to integrate Horovod's KerasEstimator w/ NVTabular, I'm seeing this error:\r\n```\r\n[1,0]<stderr>:/nvtabular/nvtabular/loader/tf_utils.py:53: UserWarning: TensorFlow runtime already initialized, may not be enough memory for cudf\r\n[1,0]<stderr>: warnings.warn(\r\n[1,0]<stderr>:Traceback (most recent call last):\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tf_utils.py\", line 57, in configure_tensorflow\r\n[1,0]<stderr>: tf.config.experimental.set_virtual_device_configuration(\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/framework/config.py\", line 871, in set_logical_device_configuration\r\n[1,0]<stderr>: context.context().set_logical_device_configuration(device, logical_devices)\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/site-packages/tensorflow/python/eager/context.py\", line 1629, in set_logical_device_configuration\r\n[1,0]<stderr>: raise RuntimeError(\r\n[1,0]<stderr>:RuntimeError: Virtual devices cannot be modified after being initialized[1,0]<stderr>:\r\n[1,0]<stderr>:\r\n[1,0]<stderr>:During handling of the above exception, another exception occurred:\r\n[1,0]<stderr>:\r\n[1,0]<stderr>:Traceback (most recent call last):\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n[1,0]<stderr>: return _run_code(code, main_globals, None,\r\n[1,0]<stderr>: File \"/opt/conda/envs/rapids/lib/python3.8/runpy.py\", line 87, in _run_code\r\n[1,0]<stderr>: exec(code, run_globals)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/mpirun_exec_fn.py\", line 52, in <module>\r\n[1,0]<stderr>: main(codec.loads_base64(sys.argv[1]), codec.loads_base64(sys.argv[2]))\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/mpirun_exec_fn.py\", line 45, in main\r\n[1,0]<stderr>: task_exec(driver_addresses, settings, 'OMPI_COMM_WORLD_RANK', 'OMPI_COMM_WORLD_LOCAL_RANK')\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/task/__init__.py\", line 61, in task_exec\r\n[1,0]<stderr>: result = fn(*args, **kwargs)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/remote.py\", line 261, in train\r\n[1,0]<stderr>: history = fit(model, dm, steps_per_epoch,\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/util.py\", line 41, in fn\r\n[1,0]<stderr>: train_data = data_module.train_data(train_reader)\r\n[1,0]<stderr>: File \"/horovod/horovod/spark/keras/datamodule.py\", line 110, in train_data\r\n[1,0]<stderr>: from nvtabular.loader.tensorflow import KerasSequenceLoader\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tensorflow.py\", line 28, in <module>\r\n[1,0]<stderr>: from_dlpack = configure_tensorflow()\r\n[1,0]<stderr>: File \"/nvtabular/nvtabular/loader/tf_utils.py\", line 63, in configure_tensorflow\r\n[1,0]<stderr>: warnings.warn(e)\r\n[1,0]<stderr>:TypeError: expected string or bytes-like object\r\n```\r\n\r\nThis is resulting in a fatal error instead of a non-fatal warning. My current workaround is to just use `warnings.warn(str(e))` here, although I'm not sure why the `RuntimeError` isn't automatically cast to string (when it normally would cast to string).\r\n\r\n**Steps/Code to reproduce bug**\r\nUnfortunately, since this involves integration with horovod, so the setup to reproduce is non-trivial.\r\n\r\n**Expected behavior**\r\nA non-fatal warning, and continued execution.\r\n\r\n**Environment details (please complete the following information):**\r\n - Environment location: Docker on Linux with single GPU.\r\n - Method of NVTabular install: custom docker image based on `nvcr.io/nvidia/merlin/merlin-tensorflow-training:22.05` \n", "before_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport warnings\n\nimport tensorflow as tf\nfrom packaging import version\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\n\nfrom merlin.core.dispatch import HAS_GPU\nfrom merlin.core.utils import device_mem_size\n\n\ndef configure_tensorflow(memory_allocation=None, device=None):\n total_gpu_mem_mb = device_mem_size(kind=\"total\", cpu=(not HAS_GPU)) / (1024 ** 2)\n\n if memory_allocation is None:\n memory_allocation = os.environ.get(\"TF_MEMORY_ALLOCATION\", 0.5)\n\n if float(memory_allocation) < 1:\n memory_allocation = total_gpu_mem_mb * float(memory_allocation)\n memory_allocation = int(memory_allocation)\n assert memory_allocation < total_gpu_mem_mb\n\n # TODO: what will this look like in any sort\n # of distributed set up?\n if device is None:\n device = int(os.environ.get(\"TF_VISIBLE_DEVICE\", 0))\n tf_devices = tf.config.list_physical_devices(\"GPU\")\n if HAS_GPU and len(tf_devices) == 0:\n raise ImportError(\"TensorFlow is not configured for GPU\")\n if HAS_GPU:\n try:\n tf.config.set_logical_device_configuration(\n tf_devices[device],\n [tf.config.LogicalDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError:\n warnings.warn(\n \"TensorFlow runtime already initialized, may not be enough memory for cudf\"\n )\n try:\n tf.config.experimental.set_virtual_device_configuration(\n tf_devices[device],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n warnings.warn(e)\n\n # versions using TF earlier than 2.3.0 need to use extension\n # library for dlpack support to avoid memory leak issue\n __TF_DLPACK_STABLE_VERSION = \"2.3.0\"\n if version.parse(tf.__version__) < version.parse(__TF_DLPACK_STABLE_VERSION):\n try:\n from tfdlpack import from_dlpack\n except ModuleNotFoundError as e:\n message = \"If using TensorFlow < 2.3.0, you must install tfdlpack-gpu extension library\"\n raise ModuleNotFoundError(message) from e\n\n else:\n from tensorflow.experimental.dlpack import from_dlpack\n\n return from_dlpack\n\n\ndef _get_parents(column):\n \"\"\"\n recursive function for finding the feature columns\n that supply inputs for a given `column`. If there are\n none, returns the column. Uses sets so is not\n deterministic.\n \"\"\"\n if isinstance(column.parents[0], str):\n return set([column])\n parents = set()\n for parent in column.parents:\n parents |= _get_parents(parent)\n return parents\n\n\ndef get_dataset_schema_from_feature_columns(feature_columns):\n \"\"\"\n maps from a list of TensorFlow `feature_column`s to\n lists giving the categorical and continuous feature\n names for a dataset. Useful for constructing NVTabular\n Workflows from feature columns\n \"\"\"\n base_columns = set()\n for column in feature_columns:\n base_columns |= _get_parents(column)\n\n cat_names, cont_names = [], []\n for column in base_columns:\n if isinstance(column, fc.CategoricalColumn):\n cat_names.append(column.name)\n else:\n cont_names.append(column.name)\n\n cat_names = sorted(cat_names)\n cont_names = sorted(cont_names)\n return cat_names, cont_names\n", "path": "nvtabular/loader/tf_utils.py"}], "after_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport warnings\n\nimport tensorflow as tf\nfrom packaging import version\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\n\nfrom merlin.core.dispatch import HAS_GPU\nfrom merlin.core.utils import device_mem_size\n\n\ndef configure_tensorflow(memory_allocation=None, device=None):\n total_gpu_mem_mb = device_mem_size(kind=\"total\", cpu=(not HAS_GPU)) / (1024 ** 2)\n\n if memory_allocation is None:\n memory_allocation = os.environ.get(\"TF_MEMORY_ALLOCATION\", 0.5)\n\n if float(memory_allocation) < 1:\n memory_allocation = total_gpu_mem_mb * float(memory_allocation)\n memory_allocation = int(memory_allocation)\n assert memory_allocation < total_gpu_mem_mb\n\n # TODO: what will this look like in any sort\n # of distributed set up?\n if device is None:\n device = int(os.environ.get(\"TF_VISIBLE_DEVICE\", 0))\n tf_devices = tf.config.list_physical_devices(\"GPU\")\n if HAS_GPU and len(tf_devices) == 0:\n raise ImportError(\"TensorFlow is not configured for GPU\")\n if HAS_GPU:\n try:\n tf.config.set_logical_device_configuration(\n tf_devices[device],\n [tf.config.LogicalDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError:\n warnings.warn(\n \"TensorFlow runtime already initialized, may not be enough memory for cudf\"\n )\n try:\n tf.config.experimental.set_virtual_device_configuration(\n tf_devices[device],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_allocation)],\n )\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n warnings.warn(str(e))\n\n # versions using TF earlier than 2.3.0 need to use extension\n # library for dlpack support to avoid memory leak issue\n __TF_DLPACK_STABLE_VERSION = \"2.3.0\"\n if version.parse(tf.__version__) < version.parse(__TF_DLPACK_STABLE_VERSION):\n try:\n from tfdlpack import from_dlpack\n except ModuleNotFoundError as e:\n message = \"If using TensorFlow < 2.3.0, you must install tfdlpack-gpu extension library\"\n raise ModuleNotFoundError(message) from e\n\n else:\n from tensorflow.experimental.dlpack import from_dlpack\n\n return from_dlpack\n\n\ndef _get_parents(column):\n \"\"\"\n recursive function for finding the feature columns\n that supply inputs for a given `column`. If there are\n none, returns the column. Uses sets so is not\n deterministic.\n \"\"\"\n if isinstance(column.parents[0], str):\n return set([column])\n parents = set()\n for parent in column.parents:\n parents |= _get_parents(parent)\n return parents\n\n\ndef get_dataset_schema_from_feature_columns(feature_columns):\n \"\"\"\n maps from a list of TensorFlow `feature_column`s to\n lists giving the categorical and continuous feature\n names for a dataset. Useful for constructing NVTabular\n Workflows from feature columns\n \"\"\"\n base_columns = set()\n for column in feature_columns:\n base_columns |= _get_parents(column)\n\n cat_names, cont_names = [], []\n for column in base_columns:\n if isinstance(column, fc.CategoricalColumn):\n cat_names.append(column.name)\n else:\n cont_names.append(column.name)\n\n cat_names = sorted(cat_names)\n cont_names = sorted(cont_names)\n return cat_names, cont_names\n", "path": "nvtabular/loader/tf_utils.py"}]}
2,480
126
gh_patches_debug_2865
rasdani/github-patches
git_diff
rootpy__rootpy-511
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'TCanvas' object has no attribute 'name' Hi, I am seeing weird issues with the interactive module. It looks like the TCanvas is not 'decorated' when loading rootpy.interactive. ``` >>> from ROOT import * >>> t = TCanvas() >>> from rootpy.interactive import wait /usr/local/lib/python2.7/site-packages/IPython/frontend.py:30: UserWarning: The top-level `frontend` package has been deprecated. All its subpackages have been moved to the top `IPython` level. warn("The top-level `frontend` package has been deprecated. " w>>> wait() Traceback (most recent call last): File "<console>", line 1, in <module> File "/usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rootpy-dev-py2.7.egg/rootpy/interactive/rootwait.py", line 206, in wait_for_zero_canvases log.debug("waiting for canvas {0} to close".format(canvas.name)) AttributeError: 'TCanvas' object has no attribute 'name' ``` Albert --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rootpy/interactive/rootwait.py` Content: ``` 1 # Copyright 2012 the rootpy developers 2 # distributed under the terms of the GNU General Public License 3 """ 4 The functions in this module provide a way of pausing code execution until 5 canvases are closed. This can be useful when testing code and you don't want to 6 keep the objects alive outside of your function. 7 8 The wait function can be called repeatedly to pause multiple times. 9 10 wait_for_zero_canvases() 11 Keeps root alive until CTRL-c is pressed or all canvases are closed 12 13 wait_for_zero_canvases(middle_mouse_close=True) 14 allows canvases to be closed with the middle mouse button (see below) 15 16 wait is shorthand for wait_for_zero_canvases 17 18 Examples 19 -------- 20 21 from rootpy.plotting import Canvas 22 from rootpy.interactive import wait 23 24 c = Canvas() 25 c.Update() 26 wait() 27 28 c2 = Canvas() 29 c2.Update() 30 wait(True) 31 # This canvas can be killed by middle clicking on it or hitting 32 # escape whilst it has focus 33 34 """ 35 from __future__ import absolute_import 36 37 import threading 38 from atexit import register 39 40 import ROOT 41 42 from . import log; log = log[__name__] 43 from ..defaults import extra_initialization 44 from ..memory.keepalive import keepalive 45 from .canvas_events import attach_event_handler 46 47 __all__ = [ 48 'wait_for_zero_canvases', 49 'wait_for_browser_close', 50 'wait', 51 ] 52 53 _processRootEvents = None 54 _finishSchedule = None 55 __ACTIVE = False 56 57 58 @extra_initialization 59 def fetch_vars(): 60 global _processRootEvents, _finishSchedule, __ACTIVE 61 PyGUIThread = getattr(ROOT, 'PyGUIThread', None) 62 if PyGUIThread is not None: 63 _processRootEvents = getattr(PyGUIThread, "_Thread__target", None) 64 _finishSchedule = getattr(PyGUIThread, "finishSchedule", None) 65 if _processRootEvents is None: 66 log.warning( 67 "unable to access ROOT's GUI thread either because " 68 "PyROOT's finalSetup() was called while in batch mode " 69 "or because PyROOT is using the new PyOS_InputHook " 70 "based mechanism that is not yet supported in rootpy " 71 "(PyConfig.StartGuiThread == 'inputhook' or " 72 "gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will " 73 "instead call raw_input() and wait for [Enter]") 74 else: 75 __ACTIVE = True 76 77 78 def wait_failover(caller): 79 if not ROOT.gROOT.IsBatch(): 80 log.warning( 81 "{0} is failing over to raw_input()".format(caller.__name__)) 82 raw_input("press [Enter] to continue") 83 84 85 def start_new_gui_thread(): 86 """ 87 Attempt to start a new GUI thread, if possible. 88 89 It is only possible to start one if there was one running on module import. 90 """ 91 PyGUIThread = getattr(ROOT, 'PyGUIThread', None) 92 93 if PyGUIThread is not None: 94 assert not PyGUIThread.isAlive(), "GUI thread already running!" 95 96 assert _processRootEvents, ( 97 "GUI thread wasn't started when rootwait was imported, " 98 "so it can't be restarted") 99 100 ROOT.keeppolling = 1 101 ROOT.PyGUIThread = threading.Thread( 102 None, _processRootEvents, None, (ROOT,)) 103 104 ROOT.PyGUIThread.finishSchedule = _finishSchedule 105 ROOT.PyGUIThread.setDaemon(1) 106 ROOT.PyGUIThread.start() 107 log.debug("successfully started a new GUI thread") 108 109 110 def stop_gui_thread(): 111 """ 112 Try to stop the GUI thread. If it was running returns True, 113 otherwise False. 114 """ 115 PyGUIThread = getattr(ROOT, 'PyGUIThread', None) 116 117 if PyGUIThread is None or not PyGUIThread.isAlive(): 118 log.debug("no existing GUI thread is runnng") 119 return False 120 121 ROOT.keeppolling = 0 122 try: 123 PyGUIThread.finishSchedule() 124 except AttributeError: 125 log.debug("unable to call finishSchedule() on PyGUIThread") 126 pass 127 PyGUIThread.join() 128 log.debug("successfully stopped the existing GUI thread") 129 return True 130 131 132 def get_visible_canvases(): 133 """ 134 Return a list of active GUI canvases 135 (as opposed to invisible Batch canvases) 136 """ 137 try: 138 return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()] 139 except AttributeError: 140 # We might be exiting and ROOT.gROOT will raise an AttributeError 141 return [] 142 143 144 def run_application_until_done(): 145 146 had_gui_thread = stop_gui_thread() 147 148 ROOT.gApplication._threaded = True 149 ROOT.gApplication.Run(True) 150 151 if had_gui_thread: 152 start_new_gui_thread() 153 154 155 def dispatcher(f): 156 disp = ROOT.TPyDispatcher(f) 157 keepalive(disp, f) 158 return disp 159 160 161 def wait_for_zero_canvases(middle_mouse_close=False): 162 """ 163 Wait for all canvases to be closed, or CTRL-c. 164 165 If `middle_mouse_close`, middle click will shut the canvas. 166 167 incpy.ignore 168 """ 169 if not __ACTIVE: 170 wait_failover(wait_for_zero_canvases) 171 return 172 173 @dispatcher 174 def count_canvases(): 175 """ 176 Count the number of active canvases and finish gApplication.Run() 177 if there are none remaining. 178 179 incpy.ignore 180 """ 181 if not get_visible_canvases(): 182 try: 183 ROOT.gSystem.ExitLoop() 184 except AttributeError: 185 # We might be exiting and ROOT.gROOT will raise an AttributeError 186 pass 187 188 @dispatcher 189 def exit_application_loop(): 190 """ 191 Signal handler for CTRL-c to cause gApplication.Run() to finish. 192 193 incpy.ignore 194 """ 195 ROOT.gSystem.ExitLoop() 196 197 # Handle CTRL-c 198 sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True) 199 sh.Add() 200 sh.Connect("Notified()", "TPyDispatcher", 201 exit_application_loop, "Dispatch()") 202 203 visible_canvases = get_visible_canvases() 204 205 for canvas in visible_canvases: 206 log.debug("waiting for canvas {0} to close".format(canvas.name)) 207 canvas.Update() 208 209 if middle_mouse_close: 210 attach_event_handler(canvas) 211 212 if not getattr(canvas, "_py_close_dispatcher_attached", False): 213 # Attach a handler only once to each canvas 214 canvas._py_close_dispatcher_attached = True 215 canvas.Connect("Closed()", "TPyDispatcher", 216 count_canvases, "Dispatch()") 217 keepalive(canvas, count_canvases) 218 219 if visible_canvases and not ROOT.gROOT.IsBatch(): 220 run_application_until_done() 221 222 # Disconnect from canvases 223 for canvas in visible_canvases: 224 if getattr(canvas, "_py_close_dispatcher_attached", False): 225 canvas._py_close_dispatcher_attached = False 226 canvas.Disconnect("Closed()", count_canvases, "Dispatch()") 227 228 wait = wait_for_zero_canvases 229 230 231 def wait_for_frame(frame): 232 """ 233 wait until a TGMainFrame is closed or ctrl-c 234 """ 235 if not frame: 236 # It's already closed or maybe we're in batch mode 237 return 238 239 @dispatcher 240 def close(): 241 ROOT.gSystem.ExitLoop() 242 243 if not getattr(frame, "_py_close_dispatcher_attached", False): 244 frame._py_close_dispatcher_attached = True 245 frame.Connect("CloseWindow()", "TPyDispatcher", close, "Dispatch()") 246 247 @dispatcher 248 def exit_application_loop(): 249 """ 250 Signal handler for CTRL-c to cause gApplication.Run() to finish. 251 252 incpy.ignore 253 """ 254 ROOT.gSystem.ExitLoop() 255 256 # Handle CTRL-c 257 sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True) 258 sh.Add() 259 sh.Connect("Notified()", "TPyDispatcher", 260 exit_application_loop, "Dispatch()") 261 262 if not ROOT.gROOT.IsBatch(): 263 run_application_until_done() 264 # Need to disconnect to prevent close handler from running when python 265 # teardown has already commenced. 266 frame.Disconnect("CloseWindow()", close, "Dispatch()") 267 268 269 def wait_for_browser_close(b): 270 """ 271 Can be used to wait until a TBrowser is closed 272 """ 273 if b: 274 if not __ACTIVE: 275 wait_failover(wait_for_browser_close) 276 return 277 wait_for_frame(b.GetBrowserImp().GetMainFrame()) 278 279 280 def prevent_close_with_canvases(): 281 """ 282 Register a handler which prevents python from exiting until 283 all canvases are closed 284 """ 285 register(wait_for_zero_canvases) 286 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rootpy/interactive/rootwait.py b/rootpy/interactive/rootwait.py --- a/rootpy/interactive/rootwait.py +++ b/rootpy/interactive/rootwait.py @@ -203,7 +203,7 @@ visible_canvases = get_visible_canvases() for canvas in visible_canvases: - log.debug("waiting for canvas {0} to close".format(canvas.name)) + log.debug("waiting for canvas {0} to close".format(canvas.GetName())) canvas.Update() if middle_mouse_close:
{"golden_diff": "diff --git a/rootpy/interactive/rootwait.py b/rootpy/interactive/rootwait.py\n--- a/rootpy/interactive/rootwait.py\n+++ b/rootpy/interactive/rootwait.py\n@@ -203,7 +203,7 @@\n visible_canvases = get_visible_canvases()\n \n for canvas in visible_canvases:\n- log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\n+ log.debug(\"waiting for canvas {0} to close\".format(canvas.GetName()))\n canvas.Update()\n \n if middle_mouse_close:\n", "issue": "'TCanvas' object has no attribute 'name'\nHi,\n\nI am seeing weird issues with the interactive module. It looks like the TCanvas is not 'decorated' when loading rootpy.interactive.\n\n```\n>>> from ROOT import *\n>>> t = TCanvas()\n>>> from rootpy.interactive import wait\n/usr/local/lib/python2.7/site-packages/IPython/frontend.py:30: UserWarning: The top-level `frontend` package has been deprecated. All its subpackages have been moved to the top `IPython` level.\n warn(\"The top-level `frontend` package has been deprecated. \"\nw>>> wait()\nTraceback (most recent call last):\n File \"<console>\", line 1, in <module>\n File \"/usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rootpy-dev-py2.7.egg/rootpy/interactive/rootwait.py\", line 206, in wait_for_zero_canvases\n log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\nAttributeError: 'TCanvas' object has no attribute 'name'\n```\n\nAlbert\n\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThe functions in this module provide a way of pausing code execution until\ncanvases are closed. This can be useful when testing code and you don't want to\nkeep the objects alive outside of your function.\n\nThe wait function can be called repeatedly to pause multiple times.\n\nwait_for_zero_canvases()\n Keeps root alive until CTRL-c is pressed or all canvases are closed\n\nwait_for_zero_canvases(middle_mouse_close=True)\n allows canvases to be closed with the middle mouse button (see below)\n\nwait is shorthand for wait_for_zero_canvases\n\nExamples\n--------\n\n from rootpy.plotting import Canvas\n from rootpy.interactive import wait\n\n c = Canvas()\n c.Update()\n wait()\n\n c2 = Canvas()\n c2.Update()\n wait(True)\n # This canvas can be killed by middle clicking on it or hitting\n # escape whilst it has focus\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport threading\nfrom atexit import register\n\nimport ROOT\n\nfrom . import log; log = log[__name__]\nfrom ..defaults import extra_initialization\nfrom ..memory.keepalive import keepalive\nfrom .canvas_events import attach_event_handler\n\n__all__ = [\n 'wait_for_zero_canvases',\n 'wait_for_browser_close',\n 'wait',\n]\n\n_processRootEvents = None\n_finishSchedule = None\n__ACTIVE = False\n\n\n@extra_initialization\ndef fetch_vars():\n global _processRootEvents, _finishSchedule, __ACTIVE\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n if PyGUIThread is not None:\n _processRootEvents = getattr(PyGUIThread, \"_Thread__target\", None)\n _finishSchedule = getattr(PyGUIThread, \"finishSchedule\", None)\n if _processRootEvents is None:\n log.warning(\n \"unable to access ROOT's GUI thread either because \"\n \"PyROOT's finalSetup() was called while in batch mode \"\n \"or because PyROOT is using the new PyOS_InputHook \"\n \"based mechanism that is not yet supported in rootpy \"\n \"(PyConfig.StartGuiThread == 'inputhook' or \"\n \"gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will \"\n \"instead call raw_input() and wait for [Enter]\")\n else:\n __ACTIVE = True\n\n\ndef wait_failover(caller):\n if not ROOT.gROOT.IsBatch():\n log.warning(\n \"{0} is failing over to raw_input()\".format(caller.__name__))\n raw_input(\"press [Enter] to continue\")\n\n\ndef start_new_gui_thread():\n \"\"\"\n Attempt to start a new GUI thread, if possible.\n\n It is only possible to start one if there was one running on module import.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is not None:\n assert not PyGUIThread.isAlive(), \"GUI thread already running!\"\n\n assert _processRootEvents, (\n \"GUI thread wasn't started when rootwait was imported, \"\n \"so it can't be restarted\")\n\n ROOT.keeppolling = 1\n ROOT.PyGUIThread = threading.Thread(\n None, _processRootEvents, None, (ROOT,))\n\n ROOT.PyGUIThread.finishSchedule = _finishSchedule\n ROOT.PyGUIThread.setDaemon(1)\n ROOT.PyGUIThread.start()\n log.debug(\"successfully started a new GUI thread\")\n\n\ndef stop_gui_thread():\n \"\"\"\n Try to stop the GUI thread. If it was running returns True,\n otherwise False.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is None or not PyGUIThread.isAlive():\n log.debug(\"no existing GUI thread is runnng\")\n return False\n\n ROOT.keeppolling = 0\n try:\n PyGUIThread.finishSchedule()\n except AttributeError:\n log.debug(\"unable to call finishSchedule() on PyGUIThread\")\n pass\n PyGUIThread.join()\n log.debug(\"successfully stopped the existing GUI thread\")\n return True\n\n\ndef get_visible_canvases():\n \"\"\"\n Return a list of active GUI canvases\n (as opposed to invisible Batch canvases)\n \"\"\"\n try:\n return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n return []\n\n\ndef run_application_until_done():\n\n had_gui_thread = stop_gui_thread()\n\n ROOT.gApplication._threaded = True\n ROOT.gApplication.Run(True)\n\n if had_gui_thread:\n start_new_gui_thread()\n\n\ndef dispatcher(f):\n disp = ROOT.TPyDispatcher(f)\n keepalive(disp, f)\n return disp\n\n\ndef wait_for_zero_canvases(middle_mouse_close=False):\n \"\"\"\n Wait for all canvases to be closed, or CTRL-c.\n\n If `middle_mouse_close`, middle click will shut the canvas.\n\n incpy.ignore\n \"\"\"\n if not __ACTIVE:\n wait_failover(wait_for_zero_canvases)\n return\n\n @dispatcher\n def count_canvases():\n \"\"\"\n Count the number of active canvases and finish gApplication.Run()\n if there are none remaining.\n\n incpy.ignore\n \"\"\"\n if not get_visible_canvases():\n try:\n ROOT.gSystem.ExitLoop()\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n pass\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n visible_canvases = get_visible_canvases()\n\n for canvas in visible_canvases:\n log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\n canvas.Update()\n\n if middle_mouse_close:\n attach_event_handler(canvas)\n\n if not getattr(canvas, \"_py_close_dispatcher_attached\", False):\n # Attach a handler only once to each canvas\n canvas._py_close_dispatcher_attached = True\n canvas.Connect(\"Closed()\", \"TPyDispatcher\",\n count_canvases, \"Dispatch()\")\n keepalive(canvas, count_canvases)\n\n if visible_canvases and not ROOT.gROOT.IsBatch():\n run_application_until_done()\n\n # Disconnect from canvases\n for canvas in visible_canvases:\n if getattr(canvas, \"_py_close_dispatcher_attached\", False):\n canvas._py_close_dispatcher_attached = False\n canvas.Disconnect(\"Closed()\", count_canvases, \"Dispatch()\")\n\nwait = wait_for_zero_canvases\n\n\ndef wait_for_frame(frame):\n \"\"\"\n wait until a TGMainFrame is closed or ctrl-c\n \"\"\"\n if not frame:\n # It's already closed or maybe we're in batch mode\n return\n\n @dispatcher\n def close():\n ROOT.gSystem.ExitLoop()\n\n if not getattr(frame, \"_py_close_dispatcher_attached\", False):\n frame._py_close_dispatcher_attached = True\n frame.Connect(\"CloseWindow()\", \"TPyDispatcher\", close, \"Dispatch()\")\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n if not ROOT.gROOT.IsBatch():\n run_application_until_done()\n # Need to disconnect to prevent close handler from running when python\n # teardown has already commenced.\n frame.Disconnect(\"CloseWindow()\", close, \"Dispatch()\")\n\n\ndef wait_for_browser_close(b):\n \"\"\"\n Can be used to wait until a TBrowser is closed\n \"\"\"\n if b:\n if not __ACTIVE:\n wait_failover(wait_for_browser_close)\n return\n wait_for_frame(b.GetBrowserImp().GetMainFrame())\n\n\ndef prevent_close_with_canvases():\n \"\"\"\n Register a handler which prevents python from exiting until\n all canvases are closed\n \"\"\"\n register(wait_for_zero_canvases)\n", "path": "rootpy/interactive/rootwait.py"}], "after_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThe functions in this module provide a way of pausing code execution until\ncanvases are closed. This can be useful when testing code and you don't want to\nkeep the objects alive outside of your function.\n\nThe wait function can be called repeatedly to pause multiple times.\n\nwait_for_zero_canvases()\n Keeps root alive until CTRL-c is pressed or all canvases are closed\n\nwait_for_zero_canvases(middle_mouse_close=True)\n allows canvases to be closed with the middle mouse button (see below)\n\nwait is shorthand for wait_for_zero_canvases\n\nExamples\n--------\n\n from rootpy.plotting import Canvas\n from rootpy.interactive import wait\n\n c = Canvas()\n c.Update()\n wait()\n\n c2 = Canvas()\n c2.Update()\n wait(True)\n # This canvas can be killed by middle clicking on it or hitting\n # escape whilst it has focus\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport threading\nfrom atexit import register\n\nimport ROOT\n\nfrom . import log; log = log[__name__]\nfrom ..defaults import extra_initialization\nfrom ..memory.keepalive import keepalive\nfrom .canvas_events import attach_event_handler\n\n__all__ = [\n 'wait_for_zero_canvases',\n 'wait_for_browser_close',\n 'wait',\n]\n\n_processRootEvents = None\n_finishSchedule = None\n__ACTIVE = False\n\n\n@extra_initialization\ndef fetch_vars():\n global _processRootEvents, _finishSchedule, __ACTIVE\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n if PyGUIThread is not None:\n _processRootEvents = getattr(PyGUIThread, \"_Thread__target\", None)\n _finishSchedule = getattr(PyGUIThread, \"finishSchedule\", None)\n if _processRootEvents is None:\n log.warning(\n \"unable to access ROOT's GUI thread either because \"\n \"PyROOT's finalSetup() was called while in batch mode \"\n \"or because PyROOT is using the new PyOS_InputHook \"\n \"based mechanism that is not yet supported in rootpy \"\n \"(PyConfig.StartGuiThread == 'inputhook' or \"\n \"gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will \"\n \"instead call raw_input() and wait for [Enter]\")\n else:\n __ACTIVE = True\n\n\ndef wait_failover(caller):\n if not ROOT.gROOT.IsBatch():\n log.warning(\n \"{0} is failing over to raw_input()\".format(caller.__name__))\n raw_input(\"press [Enter] to continue\")\n\n\ndef start_new_gui_thread():\n \"\"\"\n Attempt to start a new GUI thread, if possible.\n\n It is only possible to start one if there was one running on module import.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is not None:\n assert not PyGUIThread.isAlive(), \"GUI thread already running!\"\n\n assert _processRootEvents, (\n \"GUI thread wasn't started when rootwait was imported, \"\n \"so it can't be restarted\")\n\n ROOT.keeppolling = 1\n ROOT.PyGUIThread = threading.Thread(\n None, _processRootEvents, None, (ROOT,))\n\n ROOT.PyGUIThread.finishSchedule = _finishSchedule\n ROOT.PyGUIThread.setDaemon(1)\n ROOT.PyGUIThread.start()\n log.debug(\"successfully started a new GUI thread\")\n\n\ndef stop_gui_thread():\n \"\"\"\n Try to stop the GUI thread. If it was running returns True,\n otherwise False.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is None or not PyGUIThread.isAlive():\n log.debug(\"no existing GUI thread is runnng\")\n return False\n\n ROOT.keeppolling = 0\n try:\n PyGUIThread.finishSchedule()\n except AttributeError:\n log.debug(\"unable to call finishSchedule() on PyGUIThread\")\n pass\n PyGUIThread.join()\n log.debug(\"successfully stopped the existing GUI thread\")\n return True\n\n\ndef get_visible_canvases():\n \"\"\"\n Return a list of active GUI canvases\n (as opposed to invisible Batch canvases)\n \"\"\"\n try:\n return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n return []\n\n\ndef run_application_until_done():\n\n had_gui_thread = stop_gui_thread()\n\n ROOT.gApplication._threaded = True\n ROOT.gApplication.Run(True)\n\n if had_gui_thread:\n start_new_gui_thread()\n\n\ndef dispatcher(f):\n disp = ROOT.TPyDispatcher(f)\n keepalive(disp, f)\n return disp\n\n\ndef wait_for_zero_canvases(middle_mouse_close=False):\n \"\"\"\n Wait for all canvases to be closed, or CTRL-c.\n\n If `middle_mouse_close`, middle click will shut the canvas.\n\n incpy.ignore\n \"\"\"\n if not __ACTIVE:\n wait_failover(wait_for_zero_canvases)\n return\n\n @dispatcher\n def count_canvases():\n \"\"\"\n Count the number of active canvases and finish gApplication.Run()\n if there are none remaining.\n\n incpy.ignore\n \"\"\"\n if not get_visible_canvases():\n try:\n ROOT.gSystem.ExitLoop()\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n pass\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n visible_canvases = get_visible_canvases()\n\n for canvas in visible_canvases:\n log.debug(\"waiting for canvas {0} to close\".format(canvas.GetName()))\n canvas.Update()\n\n if middle_mouse_close:\n attach_event_handler(canvas)\n\n if not getattr(canvas, \"_py_close_dispatcher_attached\", False):\n # Attach a handler only once to each canvas\n canvas._py_close_dispatcher_attached = True\n canvas.Connect(\"Closed()\", \"TPyDispatcher\",\n count_canvases, \"Dispatch()\")\n keepalive(canvas, count_canvases)\n\n if visible_canvases and not ROOT.gROOT.IsBatch():\n run_application_until_done()\n\n # Disconnect from canvases\n for canvas in visible_canvases:\n if getattr(canvas, \"_py_close_dispatcher_attached\", False):\n canvas._py_close_dispatcher_attached = False\n canvas.Disconnect(\"Closed()\", count_canvases, \"Dispatch()\")\n\nwait = wait_for_zero_canvases\n\n\ndef wait_for_frame(frame):\n \"\"\"\n wait until a TGMainFrame is closed or ctrl-c\n \"\"\"\n if not frame:\n # It's already closed or maybe we're in batch mode\n return\n\n @dispatcher\n def close():\n ROOT.gSystem.ExitLoop()\n\n if not getattr(frame, \"_py_close_dispatcher_attached\", False):\n frame._py_close_dispatcher_attached = True\n frame.Connect(\"CloseWindow()\", \"TPyDispatcher\", close, \"Dispatch()\")\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n if not ROOT.gROOT.IsBatch():\n run_application_until_done()\n # Need to disconnect to prevent close handler from running when python\n # teardown has already commenced.\n frame.Disconnect(\"CloseWindow()\", close, \"Dispatch()\")\n\n\ndef wait_for_browser_close(b):\n \"\"\"\n Can be used to wait until a TBrowser is closed\n \"\"\"\n if b:\n if not __ACTIVE:\n wait_failover(wait_for_browser_close)\n return\n wait_for_frame(b.GetBrowserImp().GetMainFrame())\n\n\ndef prevent_close_with_canvases():\n \"\"\"\n Register a handler which prevents python from exiting until\n all canvases are closed\n \"\"\"\n register(wait_for_zero_canvases)\n", "path": "rootpy/interactive/rootwait.py"}]}
3,159
123
gh_patches_debug_5398
rasdani/github-patches
git_diff
jazzband__pip-tools-2038
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow http(s) as constraint file <!--- Describe the changes here. ---> Alternative to #2038. In my opinion, this is probably overshooting. ##### Contributor checklist - [ ] Included tests for the changes. - [ ] PR title is short, clear, and ready to be included in the user-facing changelog. ##### Maintainer checklist - [ ] Verified one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing. - [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `piptools/scripts/options.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import Any, Literal 4 5 import click 6 from pip._internal.commands import create_command 7 from pip._internal.utils.misc import redact_auth_from_url 8 9 from piptools.locations import CACHE_DIR, DEFAULT_CONFIG_FILE_NAMES 10 from piptools.utils import UNSAFE_PACKAGES, override_defaults_from_config_file 11 12 BuildTargetT = Literal["sdist", "wheel", "editable"] 13 ALL_BUILD_TARGETS: tuple[BuildTargetT, ...] = ( 14 "editable", 15 "sdist", 16 "wheel", 17 ) 18 19 20 def _get_default_option(option_name: str) -> Any: 21 """ 22 Get default value of the pip's option (including option from pip.conf) 23 by a given option name. 24 """ 25 install_command = create_command("install") 26 default_values = install_command.parser.get_default_values() 27 return getattr(default_values, option_name) 28 29 30 help_option_names = ("-h", "--help") 31 32 # The options used by pip-compile and pip-sync are presented in no specific order. 33 34 version = click.version_option(package_name="pip-tools") 35 36 color = click.option( 37 "--color/--no-color", 38 default=None, 39 help="Force output to be colorized or not, instead of auto-detecting color support", 40 ) 41 42 verbose = click.option( 43 "-v", 44 "--verbose", 45 count=True, 46 help="Show more output", 47 ) 48 quiet = click.option( 49 "-q", 50 "--quiet", 51 count=True, 52 help="Give less output", 53 ) 54 55 dry_run = click.option( 56 "-n", 57 "--dry-run", 58 is_flag=True, 59 help="Only show what would happen, don't change anything", 60 ) 61 62 pre = click.option( 63 "-p", 64 "--pre", 65 is_flag=True, 66 default=None, 67 help="Allow resolving to prereleases (default is not)", 68 ) 69 70 rebuild = click.option( 71 "-r", 72 "--rebuild", 73 is_flag=True, 74 help="Clear any caches upfront, rebuild from scratch", 75 ) 76 77 extra = click.option( 78 "--extra", 79 "extras", 80 multiple=True, 81 help="Name of an extras_require group to install; may be used more than once", 82 ) 83 84 all_extras = click.option( 85 "--all-extras", 86 is_flag=True, 87 default=False, 88 help="Install all extras_require groups", 89 ) 90 91 find_links = click.option( 92 "-f", 93 "--find-links", 94 multiple=True, 95 help="Look for archives in this directory or on this HTML page; may be used more than once", 96 ) 97 98 index_url = click.option( 99 "-i", 100 "--index-url", 101 help="Change index URL (defaults to {index_url})".format( 102 index_url=redact_auth_from_url(_get_default_option("index_url")) 103 ), 104 ) 105 106 no_index = click.option( 107 "--no-index", 108 is_flag=True, 109 help="Ignore package index (only looking at --find-links URLs instead).", 110 ) 111 112 extra_index_url = click.option( 113 "--extra-index-url", 114 multiple=True, 115 help="Add another index URL to search; may be used more than once", 116 ) 117 118 cert = click.option("--cert", help="Path to alternate CA bundle.") 119 120 client_cert = click.option( 121 "--client-cert", 122 help=( 123 "Path to SSL client certificate, a single file containing " 124 "the private key and the certificate in PEM format." 125 ), 126 ) 127 128 trusted_host = click.option( 129 "--trusted-host", 130 multiple=True, 131 help=( 132 "Mark this host as trusted, even though it does not have " 133 "valid or any HTTPS; may be used more than once" 134 ), 135 ) 136 137 header = click.option( 138 "--header/--no-header", 139 is_flag=True, 140 default=True, 141 help="Add header to generated file", 142 ) 143 144 emit_trusted_host = click.option( 145 "--emit-trusted-host/--no-emit-trusted-host", 146 is_flag=True, 147 default=True, 148 help="Add trusted host option to generated file", 149 ) 150 151 annotate = click.option( 152 "--annotate/--no-annotate", 153 is_flag=True, 154 default=True, 155 help="Annotate results, indicating where dependencies come from", 156 ) 157 158 annotation_style = click.option( 159 "--annotation-style", 160 type=click.Choice(("line", "split")), 161 default="split", 162 help="Choose the format of annotation comments", 163 ) 164 165 upgrade = click.option( 166 "-U", 167 "--upgrade/--no-upgrade", 168 is_flag=True, 169 default=False, 170 help="Try to upgrade all dependencies to their latest versions", 171 ) 172 173 upgrade_package = click.option( 174 "-P", 175 "--upgrade-package", 176 "upgrade_packages", 177 nargs=1, 178 multiple=True, 179 help="Specify a particular package to upgrade; may be used more than once", 180 ) 181 182 output_file = click.option( 183 "-o", 184 "--output-file", 185 nargs=1, 186 default=None, 187 type=click.File("w+b", atomic=True, lazy=True), 188 help=( 189 "Output file name. Required if more than one input file is given. " 190 "Will be derived from input file otherwise." 191 ), 192 ) 193 194 newline = click.option( 195 "--newline", 196 type=click.Choice(("LF", "CRLF", "native", "preserve"), case_sensitive=False), 197 default="preserve", 198 help="Override the newline control characters used", 199 ) 200 201 allow_unsafe = click.option( 202 "--allow-unsafe/--no-allow-unsafe", 203 is_flag=True, 204 default=False, 205 help=( 206 "Pin packages considered unsafe: {}.\n\n" 207 "WARNING: Future versions of pip-tools will enable this behavior by default. " 208 "Use --no-allow-unsafe to keep the old behavior. It is recommended to pass the " 209 "--allow-unsafe now to adapt to the upcoming change.".format( 210 ", ".join(sorted(UNSAFE_PACKAGES)) 211 ) 212 ), 213 ) 214 215 strip_extras = click.option( 216 "--strip-extras/--no-strip-extras", 217 is_flag=True, 218 default=None, 219 help="Assure output file is constraints compatible, avoiding use of extras.", 220 ) 221 222 generate_hashes = click.option( 223 "--generate-hashes", 224 is_flag=True, 225 default=False, 226 help="Generate pip 8 style hashes in the resulting requirements file.", 227 ) 228 229 reuse_hashes = click.option( 230 "--reuse-hashes/--no-reuse-hashes", 231 is_flag=True, 232 default=True, 233 help=( 234 "Improve the speed of --generate-hashes by reusing the hashes from an " 235 "existing output file." 236 ), 237 ) 238 239 max_rounds = click.option( 240 "--max-rounds", 241 default=10, 242 help="Maximum number of rounds before resolving the requirements aborts.", 243 ) 244 245 src_files = click.argument( 246 "src_files", 247 nargs=-1, 248 type=click.Path(exists=True, allow_dash=True), 249 ) 250 251 build_isolation = click.option( 252 "--build-isolation/--no-build-isolation", 253 is_flag=True, 254 default=True, 255 help=( 256 "Enable isolation when building a modern source distribution. " 257 "Build dependencies specified by PEP 518 must be already installed " 258 "if build isolation is disabled." 259 ), 260 ) 261 262 emit_find_links = click.option( 263 "--emit-find-links/--no-emit-find-links", 264 is_flag=True, 265 default=True, 266 help="Add the find-links option to generated file", 267 ) 268 269 cache_dir = click.option( 270 "--cache-dir", 271 help="Store the cache data in DIRECTORY.", 272 default=CACHE_DIR, 273 envvar="PIP_TOOLS_CACHE_DIR", 274 show_default=True, 275 show_envvar=True, 276 type=click.Path(file_okay=False, writable=True), 277 ) 278 279 pip_args = click.option( 280 "--pip-args", 281 "pip_args_str", 282 help="Arguments to pass directly to the pip command.", 283 ) 284 285 resolver = click.option( 286 "--resolver", 287 "resolver_name", 288 type=click.Choice(("legacy", "backtracking")), 289 default="backtracking", 290 envvar="PIP_TOOLS_RESOLVER", 291 help="Choose the dependency resolver.", 292 ) 293 294 emit_index_url = click.option( 295 "--emit-index-url/--no-emit-index-url", 296 is_flag=True, 297 default=True, 298 help="Add index URL to generated file", 299 ) 300 301 emit_options = click.option( 302 "--emit-options/--no-emit-options", 303 is_flag=True, 304 default=True, 305 help="Add options to generated file", 306 ) 307 308 unsafe_package = click.option( 309 "--unsafe-package", 310 multiple=True, 311 help=( 312 "Specify a package to consider unsafe; may be used more than once. " 313 f"Replaces default unsafe packages: {', '.join(sorted(UNSAFE_PACKAGES))}" 314 ), 315 ) 316 317 config = click.option( 318 "--config", 319 type=click.Path( 320 exists=True, 321 file_okay=True, 322 dir_okay=False, 323 readable=True, 324 allow_dash=False, 325 path_type=str, 326 ), 327 help=( 328 f"Read configuration from TOML file. By default, looks for the following " 329 f"files in the given order: {', '.join(DEFAULT_CONFIG_FILE_NAMES)}." 330 ), 331 is_eager=True, 332 callback=override_defaults_from_config_file, 333 ) 334 335 no_config = click.option( 336 "--no-config", 337 is_flag=True, 338 default=False, 339 help="Do not read any config file.", 340 is_eager=True, 341 ) 342 343 constraint = click.option( 344 "-c", 345 "--constraint", 346 type=click.Path( 347 exists=True, 348 file_okay=True, 349 dir_okay=False, 350 readable=True, 351 allow_dash=False, 352 path_type=str, 353 ), 354 multiple=True, 355 help="Constrain versions using the given constraints file; may be used more than once.", 356 ) 357 358 ask = click.option( 359 "-a", 360 "--ask", 361 is_flag=True, 362 help="Show what would happen, then ask whether to continue", 363 ) 364 365 force = click.option( 366 "--force", is_flag=True, help="Proceed even if conflicts are found" 367 ) 368 369 python_executable = click.option( 370 "--python-executable", 371 help="Custom python executable path if targeting an environment other than current.", 372 ) 373 374 user = click.option( 375 "--user", 376 "user_only", 377 is_flag=True, 378 help="Restrict attention to user directory", 379 ) 380 381 build_deps_for = click.option( 382 "--build-deps-for", 383 "build_deps_targets", 384 multiple=True, 385 type=click.Choice(ALL_BUILD_TARGETS), 386 help="Name of a build target to extract dependencies for. " 387 "Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as " 388 "well; may be used more than once.", 389 ) 390 391 all_build_deps = click.option( 392 "--all-build-deps", 393 is_flag=True, 394 default=False, 395 help="Extract dependencies for all build targets. " 396 "Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as " 397 "well.", 398 ) 399 400 only_build_deps = click.option( 401 "--only-build-deps", 402 is_flag=True, 403 default=False, 404 help="Extract a package only if it is a build dependency.", 405 ) 406 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/piptools/scripts/options.py b/piptools/scripts/options.py --- a/piptools/scripts/options.py +++ b/piptools/scripts/options.py @@ -343,14 +343,6 @@ constraint = click.option( "-c", "--constraint", - type=click.Path( - exists=True, - file_okay=True, - dir_okay=False, - readable=True, - allow_dash=False, - path_type=str, - ), multiple=True, help="Constrain versions using the given constraints file; may be used more than once.", )
{"golden_diff": "diff --git a/piptools/scripts/options.py b/piptools/scripts/options.py\n--- a/piptools/scripts/options.py\n+++ b/piptools/scripts/options.py\n@@ -343,14 +343,6 @@\n constraint = click.option(\n \"-c\",\n \"--constraint\",\n- type=click.Path(\n- exists=True,\n- file_okay=True,\n- dir_okay=False,\n- readable=True,\n- allow_dash=False,\n- path_type=str,\n- ),\n multiple=True,\n help=\"Constrain versions using the given constraints file; may be used more than once.\",\n )\n", "issue": "Allow http(s) as constraint file\n<!--- Describe the changes here. --->\r\n\r\nAlternative to #2038.\r\n\r\nIn my opinion, this is probably overshooting.\r\n\r\n##### Contributor checklist\r\n\r\n- [ ] Included tests for the changes.\r\n- [ ] PR title is short, clear, and ready to be included in the user-facing changelog.\r\n\r\n##### Maintainer checklist\r\n\r\n- [ ] Verified one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.\r\n- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Literal\n\nimport click\nfrom pip._internal.commands import create_command\nfrom pip._internal.utils.misc import redact_auth_from_url\n\nfrom piptools.locations import CACHE_DIR, DEFAULT_CONFIG_FILE_NAMES\nfrom piptools.utils import UNSAFE_PACKAGES, override_defaults_from_config_file\n\nBuildTargetT = Literal[\"sdist\", \"wheel\", \"editable\"]\nALL_BUILD_TARGETS: tuple[BuildTargetT, ...] = (\n \"editable\",\n \"sdist\",\n \"wheel\",\n)\n\n\ndef _get_default_option(option_name: str) -> Any:\n \"\"\"\n Get default value of the pip's option (including option from pip.conf)\n by a given option name.\n \"\"\"\n install_command = create_command(\"install\")\n default_values = install_command.parser.get_default_values()\n return getattr(default_values, option_name)\n\n\nhelp_option_names = (\"-h\", \"--help\")\n\n# The options used by pip-compile and pip-sync are presented in no specific order.\n\nversion = click.version_option(package_name=\"pip-tools\")\n\ncolor = click.option(\n \"--color/--no-color\",\n default=None,\n help=\"Force output to be colorized or not, instead of auto-detecting color support\",\n)\n\nverbose = click.option(\n \"-v\",\n \"--verbose\",\n count=True,\n help=\"Show more output\",\n)\nquiet = click.option(\n \"-q\",\n \"--quiet\",\n count=True,\n help=\"Give less output\",\n)\n\ndry_run = click.option(\n \"-n\",\n \"--dry-run\",\n is_flag=True,\n help=\"Only show what would happen, don't change anything\",\n)\n\npre = click.option(\n \"-p\",\n \"--pre\",\n is_flag=True,\n default=None,\n help=\"Allow resolving to prereleases (default is not)\",\n)\n\nrebuild = click.option(\n \"-r\",\n \"--rebuild\",\n is_flag=True,\n help=\"Clear any caches upfront, rebuild from scratch\",\n)\n\nextra = click.option(\n \"--extra\",\n \"extras\",\n multiple=True,\n help=\"Name of an extras_require group to install; may be used more than once\",\n)\n\nall_extras = click.option(\n \"--all-extras\",\n is_flag=True,\n default=False,\n help=\"Install all extras_require groups\",\n)\n\nfind_links = click.option(\n \"-f\",\n \"--find-links\",\n multiple=True,\n help=\"Look for archives in this directory or on this HTML page; may be used more than once\",\n)\n\nindex_url = click.option(\n \"-i\",\n \"--index-url\",\n help=\"Change index URL (defaults to {index_url})\".format(\n index_url=redact_auth_from_url(_get_default_option(\"index_url\"))\n ),\n)\n\nno_index = click.option(\n \"--no-index\",\n is_flag=True,\n help=\"Ignore package index (only looking at --find-links URLs instead).\",\n)\n\nextra_index_url = click.option(\n \"--extra-index-url\",\n multiple=True,\n help=\"Add another index URL to search; may be used more than once\",\n)\n\ncert = click.option(\"--cert\", help=\"Path to alternate CA bundle.\")\n\nclient_cert = click.option(\n \"--client-cert\",\n help=(\n \"Path to SSL client certificate, a single file containing \"\n \"the private key and the certificate in PEM format.\"\n ),\n)\n\ntrusted_host = click.option(\n \"--trusted-host\",\n multiple=True,\n help=(\n \"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS; may be used more than once\"\n ),\n)\n\nheader = click.option(\n \"--header/--no-header\",\n is_flag=True,\n default=True,\n help=\"Add header to generated file\",\n)\n\nemit_trusted_host = click.option(\n \"--emit-trusted-host/--no-emit-trusted-host\",\n is_flag=True,\n default=True,\n help=\"Add trusted host option to generated file\",\n)\n\nannotate = click.option(\n \"--annotate/--no-annotate\",\n is_flag=True,\n default=True,\n help=\"Annotate results, indicating where dependencies come from\",\n)\n\nannotation_style = click.option(\n \"--annotation-style\",\n type=click.Choice((\"line\", \"split\")),\n default=\"split\",\n help=\"Choose the format of annotation comments\",\n)\n\nupgrade = click.option(\n \"-U\",\n \"--upgrade/--no-upgrade\",\n is_flag=True,\n default=False,\n help=\"Try to upgrade all dependencies to their latest versions\",\n)\n\nupgrade_package = click.option(\n \"-P\",\n \"--upgrade-package\",\n \"upgrade_packages\",\n nargs=1,\n multiple=True,\n help=\"Specify a particular package to upgrade; may be used more than once\",\n)\n\noutput_file = click.option(\n \"-o\",\n \"--output-file\",\n nargs=1,\n default=None,\n type=click.File(\"w+b\", atomic=True, lazy=True),\n help=(\n \"Output file name. Required if more than one input file is given. \"\n \"Will be derived from input file otherwise.\"\n ),\n)\n\nnewline = click.option(\n \"--newline\",\n type=click.Choice((\"LF\", \"CRLF\", \"native\", \"preserve\"), case_sensitive=False),\n default=\"preserve\",\n help=\"Override the newline control characters used\",\n)\n\nallow_unsafe = click.option(\n \"--allow-unsafe/--no-allow-unsafe\",\n is_flag=True,\n default=False,\n help=(\n \"Pin packages considered unsafe: {}.\\n\\n\"\n \"WARNING: Future versions of pip-tools will enable this behavior by default. \"\n \"Use --no-allow-unsafe to keep the old behavior. It is recommended to pass the \"\n \"--allow-unsafe now to adapt to the upcoming change.\".format(\n \", \".join(sorted(UNSAFE_PACKAGES))\n )\n ),\n)\n\nstrip_extras = click.option(\n \"--strip-extras/--no-strip-extras\",\n is_flag=True,\n default=None,\n help=\"Assure output file is constraints compatible, avoiding use of extras.\",\n)\n\ngenerate_hashes = click.option(\n \"--generate-hashes\",\n is_flag=True,\n default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\",\n)\n\nreuse_hashes = click.option(\n \"--reuse-hashes/--no-reuse-hashes\",\n is_flag=True,\n default=True,\n help=(\n \"Improve the speed of --generate-hashes by reusing the hashes from an \"\n \"existing output file.\"\n ),\n)\n\nmax_rounds = click.option(\n \"--max-rounds\",\n default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\",\n)\n\nsrc_files = click.argument(\n \"src_files\",\n nargs=-1,\n type=click.Path(exists=True, allow_dash=True),\n)\n\nbuild_isolation = click.option(\n \"--build-isolation/--no-build-isolation\",\n is_flag=True,\n default=True,\n help=(\n \"Enable isolation when building a modern source distribution. \"\n \"Build dependencies specified by PEP 518 must be already installed \"\n \"if build isolation is disabled.\"\n ),\n)\n\nemit_find_links = click.option(\n \"--emit-find-links/--no-emit-find-links\",\n is_flag=True,\n default=True,\n help=\"Add the find-links option to generated file\",\n)\n\ncache_dir = click.option(\n \"--cache-dir\",\n help=\"Store the cache data in DIRECTORY.\",\n default=CACHE_DIR,\n envvar=\"PIP_TOOLS_CACHE_DIR\",\n show_default=True,\n show_envvar=True,\n type=click.Path(file_okay=False, writable=True),\n)\n\npip_args = click.option(\n \"--pip-args\",\n \"pip_args_str\",\n help=\"Arguments to pass directly to the pip command.\",\n)\n\nresolver = click.option(\n \"--resolver\",\n \"resolver_name\",\n type=click.Choice((\"legacy\", \"backtracking\")),\n default=\"backtracking\",\n envvar=\"PIP_TOOLS_RESOLVER\",\n help=\"Choose the dependency resolver.\",\n)\n\nemit_index_url = click.option(\n \"--emit-index-url/--no-emit-index-url\",\n is_flag=True,\n default=True,\n help=\"Add index URL to generated file\",\n)\n\nemit_options = click.option(\n \"--emit-options/--no-emit-options\",\n is_flag=True,\n default=True,\n help=\"Add options to generated file\",\n)\n\nunsafe_package = click.option(\n \"--unsafe-package\",\n multiple=True,\n help=(\n \"Specify a package to consider unsafe; may be used more than once. \"\n f\"Replaces default unsafe packages: {', '.join(sorted(UNSAFE_PACKAGES))}\"\n ),\n)\n\nconfig = click.option(\n \"--config\",\n type=click.Path(\n exists=True,\n file_okay=True,\n dir_okay=False,\n readable=True,\n allow_dash=False,\n path_type=str,\n ),\n help=(\n f\"Read configuration from TOML file. By default, looks for the following \"\n f\"files in the given order: {', '.join(DEFAULT_CONFIG_FILE_NAMES)}.\"\n ),\n is_eager=True,\n callback=override_defaults_from_config_file,\n)\n\nno_config = click.option(\n \"--no-config\",\n is_flag=True,\n default=False,\n help=\"Do not read any config file.\",\n is_eager=True,\n)\n\nconstraint = click.option(\n \"-c\",\n \"--constraint\",\n type=click.Path(\n exists=True,\n file_okay=True,\n dir_okay=False,\n readable=True,\n allow_dash=False,\n path_type=str,\n ),\n multiple=True,\n help=\"Constrain versions using the given constraints file; may be used more than once.\",\n)\n\nask = click.option(\n \"-a\",\n \"--ask\",\n is_flag=True,\n help=\"Show what would happen, then ask whether to continue\",\n)\n\nforce = click.option(\n \"--force\", is_flag=True, help=\"Proceed even if conflicts are found\"\n)\n\npython_executable = click.option(\n \"--python-executable\",\n help=\"Custom python executable path if targeting an environment other than current.\",\n)\n\nuser = click.option(\n \"--user\",\n \"user_only\",\n is_flag=True,\n help=\"Restrict attention to user directory\",\n)\n\nbuild_deps_for = click.option(\n \"--build-deps-for\",\n \"build_deps_targets\",\n multiple=True,\n type=click.Choice(ALL_BUILD_TARGETS),\n help=\"Name of a build target to extract dependencies for. \"\n \"Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as \"\n \"well; may be used more than once.\",\n)\n\nall_build_deps = click.option(\n \"--all-build-deps\",\n is_flag=True,\n default=False,\n help=\"Extract dependencies for all build targets. \"\n \"Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as \"\n \"well.\",\n)\n\nonly_build_deps = click.option(\n \"--only-build-deps\",\n is_flag=True,\n default=False,\n help=\"Extract a package only if it is a build dependency.\",\n)\n", "path": "piptools/scripts/options.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Literal\n\nimport click\nfrom pip._internal.commands import create_command\nfrom pip._internal.utils.misc import redact_auth_from_url\n\nfrom piptools.locations import CACHE_DIR, DEFAULT_CONFIG_FILE_NAMES\nfrom piptools.utils import UNSAFE_PACKAGES, override_defaults_from_config_file\n\nBuildTargetT = Literal[\"sdist\", \"wheel\", \"editable\"]\nALL_BUILD_TARGETS: tuple[BuildTargetT, ...] = (\n \"editable\",\n \"sdist\",\n \"wheel\",\n)\n\n\ndef _get_default_option(option_name: str) -> Any:\n \"\"\"\n Get default value of the pip's option (including option from pip.conf)\n by a given option name.\n \"\"\"\n install_command = create_command(\"install\")\n default_values = install_command.parser.get_default_values()\n return getattr(default_values, option_name)\n\n\nhelp_option_names = (\"-h\", \"--help\")\n\n# The options used by pip-compile and pip-sync are presented in no specific order.\n\nversion = click.version_option(package_name=\"pip-tools\")\n\ncolor = click.option(\n \"--color/--no-color\",\n default=None,\n help=\"Force output to be colorized or not, instead of auto-detecting color support\",\n)\n\nverbose = click.option(\n \"-v\",\n \"--verbose\",\n count=True,\n help=\"Show more output\",\n)\nquiet = click.option(\n \"-q\",\n \"--quiet\",\n count=True,\n help=\"Give less output\",\n)\n\ndry_run = click.option(\n \"-n\",\n \"--dry-run\",\n is_flag=True,\n help=\"Only show what would happen, don't change anything\",\n)\n\npre = click.option(\n \"-p\",\n \"--pre\",\n is_flag=True,\n default=None,\n help=\"Allow resolving to prereleases (default is not)\",\n)\n\nrebuild = click.option(\n \"-r\",\n \"--rebuild\",\n is_flag=True,\n help=\"Clear any caches upfront, rebuild from scratch\",\n)\n\nextra = click.option(\n \"--extra\",\n \"extras\",\n multiple=True,\n help=\"Name of an extras_require group to install; may be used more than once\",\n)\n\nall_extras = click.option(\n \"--all-extras\",\n is_flag=True,\n default=False,\n help=\"Install all extras_require groups\",\n)\n\nfind_links = click.option(\n \"-f\",\n \"--find-links\",\n multiple=True,\n help=\"Look for archives in this directory or on this HTML page; may be used more than once\",\n)\n\nindex_url = click.option(\n \"-i\",\n \"--index-url\",\n help=\"Change index URL (defaults to {index_url})\".format(\n index_url=redact_auth_from_url(_get_default_option(\"index_url\"))\n ),\n)\n\nno_index = click.option(\n \"--no-index\",\n is_flag=True,\n help=\"Ignore package index (only looking at --find-links URLs instead).\",\n)\n\nextra_index_url = click.option(\n \"--extra-index-url\",\n multiple=True,\n help=\"Add another index URL to search; may be used more than once\",\n)\n\ncert = click.option(\"--cert\", help=\"Path to alternate CA bundle.\")\n\nclient_cert = click.option(\n \"--client-cert\",\n help=(\n \"Path to SSL client certificate, a single file containing \"\n \"the private key and the certificate in PEM format.\"\n ),\n)\n\ntrusted_host = click.option(\n \"--trusted-host\",\n multiple=True,\n help=(\n \"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS; may be used more than once\"\n ),\n)\n\nheader = click.option(\n \"--header/--no-header\",\n is_flag=True,\n default=True,\n help=\"Add header to generated file\",\n)\n\nemit_trusted_host = click.option(\n \"--emit-trusted-host/--no-emit-trusted-host\",\n is_flag=True,\n default=True,\n help=\"Add trusted host option to generated file\",\n)\n\nannotate = click.option(\n \"--annotate/--no-annotate\",\n is_flag=True,\n default=True,\n help=\"Annotate results, indicating where dependencies come from\",\n)\n\nannotation_style = click.option(\n \"--annotation-style\",\n type=click.Choice((\"line\", \"split\")),\n default=\"split\",\n help=\"Choose the format of annotation comments\",\n)\n\nupgrade = click.option(\n \"-U\",\n \"--upgrade/--no-upgrade\",\n is_flag=True,\n default=False,\n help=\"Try to upgrade all dependencies to their latest versions\",\n)\n\nupgrade_package = click.option(\n \"-P\",\n \"--upgrade-package\",\n \"upgrade_packages\",\n nargs=1,\n multiple=True,\n help=\"Specify a particular package to upgrade; may be used more than once\",\n)\n\noutput_file = click.option(\n \"-o\",\n \"--output-file\",\n nargs=1,\n default=None,\n type=click.File(\"w+b\", atomic=True, lazy=True),\n help=(\n \"Output file name. Required if more than one input file is given. \"\n \"Will be derived from input file otherwise.\"\n ),\n)\n\nnewline = click.option(\n \"--newline\",\n type=click.Choice((\"LF\", \"CRLF\", \"native\", \"preserve\"), case_sensitive=False),\n default=\"preserve\",\n help=\"Override the newline control characters used\",\n)\n\nallow_unsafe = click.option(\n \"--allow-unsafe/--no-allow-unsafe\",\n is_flag=True,\n default=False,\n help=(\n \"Pin packages considered unsafe: {}.\\n\\n\"\n \"WARNING: Future versions of pip-tools will enable this behavior by default. \"\n \"Use --no-allow-unsafe to keep the old behavior. It is recommended to pass the \"\n \"--allow-unsafe now to adapt to the upcoming change.\".format(\n \", \".join(sorted(UNSAFE_PACKAGES))\n )\n ),\n)\n\nstrip_extras = click.option(\n \"--strip-extras/--no-strip-extras\",\n is_flag=True,\n default=None,\n help=\"Assure output file is constraints compatible, avoiding use of extras.\",\n)\n\ngenerate_hashes = click.option(\n \"--generate-hashes\",\n is_flag=True,\n default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\",\n)\n\nreuse_hashes = click.option(\n \"--reuse-hashes/--no-reuse-hashes\",\n is_flag=True,\n default=True,\n help=(\n \"Improve the speed of --generate-hashes by reusing the hashes from an \"\n \"existing output file.\"\n ),\n)\n\nmax_rounds = click.option(\n \"--max-rounds\",\n default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\",\n)\n\nsrc_files = click.argument(\n \"src_files\",\n nargs=-1,\n type=click.Path(exists=True, allow_dash=True),\n)\n\nbuild_isolation = click.option(\n \"--build-isolation/--no-build-isolation\",\n is_flag=True,\n default=True,\n help=(\n \"Enable isolation when building a modern source distribution. \"\n \"Build dependencies specified by PEP 518 must be already installed \"\n \"if build isolation is disabled.\"\n ),\n)\n\nemit_find_links = click.option(\n \"--emit-find-links/--no-emit-find-links\",\n is_flag=True,\n default=True,\n help=\"Add the find-links option to generated file\",\n)\n\ncache_dir = click.option(\n \"--cache-dir\",\n help=\"Store the cache data in DIRECTORY.\",\n default=CACHE_DIR,\n envvar=\"PIP_TOOLS_CACHE_DIR\",\n show_default=True,\n show_envvar=True,\n type=click.Path(file_okay=False, writable=True),\n)\n\npip_args = click.option(\n \"--pip-args\",\n \"pip_args_str\",\n help=\"Arguments to pass directly to the pip command.\",\n)\n\nresolver = click.option(\n \"--resolver\",\n \"resolver_name\",\n type=click.Choice((\"legacy\", \"backtracking\")),\n default=\"backtracking\",\n envvar=\"PIP_TOOLS_RESOLVER\",\n help=\"Choose the dependency resolver.\",\n)\n\nemit_index_url = click.option(\n \"--emit-index-url/--no-emit-index-url\",\n is_flag=True,\n default=True,\n help=\"Add index URL to generated file\",\n)\n\nemit_options = click.option(\n \"--emit-options/--no-emit-options\",\n is_flag=True,\n default=True,\n help=\"Add options to generated file\",\n)\n\nunsafe_package = click.option(\n \"--unsafe-package\",\n multiple=True,\n help=(\n \"Specify a package to consider unsafe; may be used more than once. \"\n f\"Replaces default unsafe packages: {', '.join(sorted(UNSAFE_PACKAGES))}\"\n ),\n)\n\nconfig = click.option(\n \"--config\",\n type=click.Path(\n exists=True,\n file_okay=True,\n dir_okay=False,\n readable=True,\n allow_dash=False,\n path_type=str,\n ),\n help=(\n f\"Read configuration from TOML file. By default, looks for the following \"\n f\"files in the given order: {', '.join(DEFAULT_CONFIG_FILE_NAMES)}.\"\n ),\n is_eager=True,\n callback=override_defaults_from_config_file,\n)\n\nno_config = click.option(\n \"--no-config\",\n is_flag=True,\n default=False,\n help=\"Do not read any config file.\",\n is_eager=True,\n)\n\nconstraint = click.option(\n \"-c\",\n \"--constraint\",\n multiple=True,\n help=\"Constrain versions using the given constraints file; may be used more than once.\",\n)\n\nask = click.option(\n \"-a\",\n \"--ask\",\n is_flag=True,\n help=\"Show what would happen, then ask whether to continue\",\n)\n\nforce = click.option(\n \"--force\", is_flag=True, help=\"Proceed even if conflicts are found\"\n)\n\npython_executable = click.option(\n \"--python-executable\",\n help=\"Custom python executable path if targeting an environment other than current.\",\n)\n\nuser = click.option(\n \"--user\",\n \"user_only\",\n is_flag=True,\n help=\"Restrict attention to user directory\",\n)\n\nbuild_deps_for = click.option(\n \"--build-deps-for\",\n \"build_deps_targets\",\n multiple=True,\n type=click.Choice(ALL_BUILD_TARGETS),\n help=\"Name of a build target to extract dependencies for. \"\n \"Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as \"\n \"well; may be used more than once.\",\n)\n\nall_build_deps = click.option(\n \"--all-build-deps\",\n is_flag=True,\n default=False,\n help=\"Extract dependencies for all build targets. \"\n \"Static dependencies declared in 'pyproject.toml::build-system.requires' will be included as \"\n \"well.\",\n)\n\nonly_build_deps = click.option(\n \"--only-build-deps\",\n is_flag=True,\n default=False,\n help=\"Extract a package only if it is a build dependency.\",\n)\n", "path": "piptools/scripts/options.py"}]}
3,925
134
gh_patches_debug_15822
rasdani/github-patches
git_diff
goauthentik__authentik-7315
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Login fails at GET /api/v3/flows/executor/default-authentication-flow/ **Describe the bug** Authentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow. **To Reproduce** Steps to reproduce the behavior: 1. Enter Username 2. Enter Password 3. Press Enter 4. See error **Expected behavior** This stage would normally be to select the authenticator to use. (Key or TOTP, in my case) **Screenshots** If applicable, add screenshots to help explain your problem. **Logs** <details> <summary>Stacktrace from authentik</summary> ``` Traceback (most recent call last): File "/authentik/flows/views/executor.py", line 287, in get stage_response = self.current_stage_view.dispatch(request) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py", line 143, in dispatch return handler(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/authentik/stages/authenticator_validate/stage.py", line 222, in get challenges = self.get_device_challenges() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/authentik/stages/authenticator_validate/stage.py", line 157, in get_device_challenges user_devices = list(devices_for_user(self.get_pending_user())) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/authentik/stages/authenticator/__init__.py", line 93, in devices_for_user yield from device_set File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 398, in __iter__ self._fetch_all() File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 1881, in _fetch_all self._result_cache = list(self._iterable_class(self)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 91, in __iter__ results = compiler.execute_sql( ^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1562, in execute_sql cursor.execute(sql, params) File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 67, in execute return self._execute_with_wrappers( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers return executor(sql, params, many, context) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 84, in _execute with self.db.wrap_database_errors: File "/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py", line 91, in __exit__ raise dj_exc_value.with_traceback(traceback) from exc_value File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 89, in _execute return self.cursor.execute(sql, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py", line 69, in execute return super().execute(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py", line 737, in execute raise ex.with_traceback(None) django.db.utils.ProgrammingError: relation "authentik_stages_authenticator_static_staticdevice" does not exist LINE 1: ...tic_staticdevice"."throttling_failure_count" FROM "authentik... ^ ``` </details> **Version and Deployment (please complete the following information):** - authentik version: gh-next as of 10/21/2023 - Deployment: docker-compose **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lifecycle/system_migrations/otp_merge.py` Content: ``` 1 # flake8: noqa 2 from lifecycle.migrate import BaseMigration 3 4 SQL_STATEMENT = """ 5 DELETE FROM django_migrations WHERE app = 'otp_static'; 6 DELETE FROM django_migrations WHERE app = 'otp_totp'; 7 -- Rename tables (static) 8 ALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice; 9 ALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken; 10 ALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq; 11 ALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq; 12 -- Rename tables (totp) 13 ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice; 14 ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq; 15 """ 16 17 18 class Migration(BaseMigration): 19 def needs_migration(self) -> bool: 20 self.cur.execute( 21 "select * from information_schema.tables WHERE table_name='otp_static_staticdevice'" 22 ) 23 return bool(self.cur.rowcount) 24 25 def run(self): 26 self.cur.execute(SQL_STATEMENT) 27 self.fake_migration( 28 ( 29 "authentik_stages_authenticator_static", 30 "0008_initial", 31 ), 32 ( 33 "authentik_stages_authenticator_static", 34 "0009_throttling", 35 ), 36 ( 37 "authentik_stages_authenticator_totp", 38 "0008_initial", 39 ), 40 ( 41 "authentik_stages_authenticator_totp", 42 "0009_auto_20190420_0723", 43 ), 44 ) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py --- a/lifecycle/system_migrations/otp_merge.py +++ b/lifecycle/system_migrations/otp_merge.py @@ -2,6 +2,7 @@ from lifecycle.migrate import BaseMigration SQL_STATEMENT = """ +BEGIN TRANSACTION; DELETE FROM django_migrations WHERE app = 'otp_static'; DELETE FROM django_migrations WHERE app = 'otp_totp'; -- Rename tables (static) @@ -12,6 +13,7 @@ -- Rename tables (totp) ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice; ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq; +COMMIT; """
{"golden_diff": "diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py\n--- a/lifecycle/system_migrations/otp_merge.py\n+++ b/lifecycle/system_migrations/otp_merge.py\n@@ -2,6 +2,7 @@\n from lifecycle.migrate import BaseMigration\n \n SQL_STATEMENT = \"\"\"\n+BEGIN TRANSACTION;\n DELETE FROM django_migrations WHERE app = 'otp_static';\n DELETE FROM django_migrations WHERE app = 'otp_totp';\n -- Rename tables (static)\n@@ -12,6 +13,7 @@\n -- Rename tables (totp)\n ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\n ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n+COMMIT;\n \"\"\"\n", "issue": "Login fails at GET /api/v3/flows/executor/default-authentication-flow/ \n**Describe the bug**\r\nAuthentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Enter Username\r\n2. Enter Password\r\n3. Press Enter\r\n4. See error\r\n\r\n**Expected behavior**\r\nThis stage would normally be to select the authenticator to use. (Key or TOTP, in my case)\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 287, in get\r\n stage_response = self.current_stage_view.dispatch(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py\", line 143, in dispatch\r\n return handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 222, in get\r\n challenges = self.get_device_challenges()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 157, in get_device_challenges\r\n user_devices = list(devices_for_user(self.get_pending_user()))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator/__init__.py\", line 93, in devices_for_user\r\n yield from device_set\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 398, in __iter__\r\n self._fetch_all()\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 1881, in _fetch_all\r\n self._result_cache = list(self._iterable_class(self))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 91, in __iter__\r\n results = compiler.execute_sql(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1562, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py\", line 69, in execute\r\n return super().execute(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py\", line 737, in execute\r\n raise ex.with_traceback(None)\r\ndjango.db.utils.ProgrammingError: relation \"authentik_stages_authenticator_static_staticdevice\" does not exist\r\nLINE 1: ...tic_staticdevice\".\"throttling_failure_count\" FROM \"authentik...\r\n ^\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: gh-next as of 10/21/2023\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n \n", "before_files": [{"content": "# flake8: noqa\nfrom lifecycle.migrate import BaseMigration\n\nSQL_STATEMENT = \"\"\"\nDELETE FROM django_migrations WHERE app = 'otp_static';\nDELETE FROM django_migrations WHERE app = 'otp_totp';\n-- Rename tables (static)\nALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;\nALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;\nALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;\nALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;\n-- Rename tables (totp)\nALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\nALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n\"\"\"\n\n\nclass Migration(BaseMigration):\n def needs_migration(self) -> bool:\n self.cur.execute(\n \"select * from information_schema.tables WHERE table_name='otp_static_staticdevice'\"\n )\n return bool(self.cur.rowcount)\n\n def run(self):\n self.cur.execute(SQL_STATEMENT)\n self.fake_migration(\n (\n \"authentik_stages_authenticator_static\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_static\",\n \"0009_throttling\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0009_auto_20190420_0723\",\n ),\n )\n", "path": "lifecycle/system_migrations/otp_merge.py"}], "after_files": [{"content": "# flake8: noqa\nfrom lifecycle.migrate import BaseMigration\n\nSQL_STATEMENT = \"\"\"\nBEGIN TRANSACTION;\nDELETE FROM django_migrations WHERE app = 'otp_static';\nDELETE FROM django_migrations WHERE app = 'otp_totp';\n-- Rename tables (static)\nALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;\nALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;\nALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;\nALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;\n-- Rename tables (totp)\nALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\nALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\nCOMMIT;\n\"\"\"\n\n\nclass Migration(BaseMigration):\n def needs_migration(self) -> bool:\n self.cur.execute(\n \"select * from information_schema.tables WHERE table_name='otp_static_staticdevice'\"\n )\n return bool(self.cur.rowcount)\n\n def run(self):\n self.cur.execute(SQL_STATEMENT)\n self.fake_migration(\n (\n \"authentik_stages_authenticator_static\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_static\",\n \"0009_throttling\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0009_auto_20190420_0723\",\n ),\n )\n", "path": "lifecycle/system_migrations/otp_merge.py"}]}
1,760
183
gh_patches_debug_18048
rasdani/github-patches
git_diff
Textualize__textual-2747
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs: `Message` not exported from `textual.messages` Just something minor I spotted while playing with the [Messages up](https://textual.textualize.io/guide/widgets/#messages-up) example in the docs. The code still works but my editor complains that: > "Message" is not exported from module "textual.messages" > Import from "textual.message" instead [reportPrivateImportUsage] Happy to submit a quick PR if deemed worth fixing! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/examples/guide/compound/byte02.py` Content: ``` 1 from __future__ import annotations 2 3 from textual.app import App, ComposeResult 4 from textual.containers import Container 5 from textual.messages import Message 6 from textual.reactive import reactive 7 from textual.widget import Widget 8 from textual.widgets import Input, Label, Switch 9 10 11 class BitSwitch(Widget): 12 """A Switch with a numeric label above it.""" 13 14 DEFAULT_CSS = """ 15 BitSwitch { 16 layout: vertical; 17 width: auto; 18 height: auto; 19 } 20 BitSwitch > Label { 21 text-align: center; 22 width: 100%; 23 } 24 """ 25 26 class BitChanged(Message): 27 """Sent when the 'bit' changes.""" 28 29 def __init__(self, bit: int, value: bool) -> None: 30 super().__init__() 31 self.bit = bit 32 self.value = value 33 34 value = reactive(0) # (1)! 35 36 def __init__(self, bit: int) -> None: 37 self.bit = bit 38 super().__init__() 39 40 def compose(self) -> ComposeResult: 41 yield Label(str(self.bit)) 42 yield Switch() 43 44 def on_switch_changed(self, event: Switch.Changed) -> None: # (2)! 45 """When the switch changes, notify the parent via a message.""" 46 event.stop() # (3)! 47 self.value = event.value # (4)! 48 self.post_message(self.BitChanged(self.bit, event.value)) 49 50 51 class ByteInput(Widget): 52 """A compound widget with 8 switches.""" 53 54 DEFAULT_CSS = """ 55 ByteInput { 56 width: auto; 57 height: auto; 58 border: blank; 59 layout: horizontal; 60 } 61 ByteInput:focus-within { 62 border: heavy $secondary; 63 } 64 """ 65 66 def compose(self) -> ComposeResult: 67 for bit in reversed(range(8)): 68 yield BitSwitch(bit) 69 70 71 class ByteEditor(Widget): 72 DEFAULT_CSS = """ 73 ByteEditor > Container { 74 height: 1fr; 75 align: center middle; 76 } 77 ByteEditor > Container.top { 78 background: $boost; 79 } 80 ByteEditor Input { 81 width: 16; 82 } 83 """ 84 85 def compose(self) -> ComposeResult: 86 with Container(classes="top"): 87 yield Input(placeholder="byte") 88 with Container(): 89 yield ByteInput() 90 91 def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None: 92 """When a switch changes, update the value.""" 93 value = 0 94 for switch in self.query(BitSwitch): 95 value |= switch.value << switch.bit 96 self.query_one(Input).value = str(value) 97 98 99 class ByteInputApp(App): 100 def compose(self) -> ComposeResult: 101 yield ByteEditor() 102 103 104 if __name__ == "__main__": 105 app = ByteInputApp() 106 app.run() 107 ``` Path: `docs/examples/guide/compound/byte03.py` Content: ``` 1 from __future__ import annotations 2 3 from textual.app import App, ComposeResult 4 from textual.containers import Container 5 from textual.geometry import clamp 6 from textual.messages import Message 7 from textual.reactive import reactive 8 from textual.widget import Widget 9 from textual.widgets import Input, Label, Switch 10 11 12 class BitSwitch(Widget): 13 """A Switch with a numeric label above it.""" 14 15 DEFAULT_CSS = """ 16 BitSwitch { 17 layout: vertical; 18 width: auto; 19 height: auto; 20 } 21 BitSwitch > Label { 22 text-align: center; 23 width: 100%; 24 } 25 """ 26 27 class BitChanged(Message): 28 """Sent when the 'bit' changes.""" 29 30 def __init__(self, bit: int, value: bool) -> None: 31 super().__init__() 32 self.bit = bit 33 self.value = value 34 35 value = reactive(0) 36 37 def __init__(self, bit: int) -> None: 38 self.bit = bit 39 super().__init__() 40 41 def compose(self) -> ComposeResult: 42 yield Label(str(self.bit)) 43 yield Switch() 44 45 def watch_value(self, value: bool) -> None: # (1)! 46 """When the value changes we want to set the switch accordingly.""" 47 self.query_one(Switch).value = value 48 49 def on_switch_changed(self, event: Switch.Changed) -> None: 50 """When the switch changes, notify the parent via a message.""" 51 event.stop() 52 self.value = event.value 53 self.post_message(self.BitChanged(self.bit, event.value)) 54 55 56 class ByteInput(Widget): 57 """A compound widget with 8 switches.""" 58 59 DEFAULT_CSS = """ 60 ByteInput { 61 width: auto; 62 height: auto; 63 border: blank; 64 layout: horizontal; 65 } 66 ByteInput:focus-within { 67 border: heavy $secondary; 68 } 69 """ 70 71 def compose(self) -> ComposeResult: 72 for bit in reversed(range(8)): 73 yield BitSwitch(bit) 74 75 76 class ByteEditor(Widget): 77 DEFAULT_CSS = """ 78 ByteEditor > Container { 79 height: 1fr; 80 align: center middle; 81 } 82 ByteEditor > Container.top { 83 background: $boost; 84 } 85 ByteEditor Input { 86 width: 16; 87 } 88 """ 89 90 value = reactive(0) 91 92 def validate_value(self, value: int) -> int: # (2)! 93 """Ensure value is between 0 and 255.""" 94 return clamp(value, 0, 255) 95 96 def compose(self) -> ComposeResult: 97 with Container(classes="top"): 98 yield Input(placeholder="byte") 99 with Container(): 100 yield ByteInput() 101 102 def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None: 103 """When a switch changes, update the value.""" 104 value = 0 105 for switch in self.query(BitSwitch): 106 value |= switch.value << switch.bit 107 self.query_one(Input).value = str(value) 108 109 def on_input_changed(self, event: Input.Changed) -> None: # (3)! 110 """When the text changes, set the value of the byte.""" 111 try: 112 self.value = int(event.value or "0") 113 except ValueError: 114 pass 115 116 def watch_value(self, value: int) -> None: # (4)! 117 """When self.value changes, update switches.""" 118 for switch in self.query(BitSwitch): 119 with switch.prevent(BitSwitch.BitChanged): # (5)! 120 switch.value = bool(value & (1 << switch.bit)) # (6)! 121 122 123 class ByteInputApp(App): 124 def compose(self) -> ComposeResult: 125 yield ByteEditor() 126 127 128 if __name__ == "__main__": 129 app = ByteInputApp() 130 app.run() 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/examples/guide/compound/byte02.py b/docs/examples/guide/compound/byte02.py --- a/docs/examples/guide/compound/byte02.py +++ b/docs/examples/guide/compound/byte02.py @@ -2,7 +2,7 @@ from textual.app import App, ComposeResult from textual.containers import Container -from textual.messages import Message +from textual.message import Message from textual.reactive import reactive from textual.widget import Widget from textual.widgets import Input, Label, Switch diff --git a/docs/examples/guide/compound/byte03.py b/docs/examples/guide/compound/byte03.py --- a/docs/examples/guide/compound/byte03.py +++ b/docs/examples/guide/compound/byte03.py @@ -3,7 +3,7 @@ from textual.app import App, ComposeResult from textual.containers import Container from textual.geometry import clamp -from textual.messages import Message +from textual.message import Message from textual.reactive import reactive from textual.widget import Widget from textual.widgets import Input, Label, Switch
{"golden_diff": "diff --git a/docs/examples/guide/compound/byte02.py b/docs/examples/guide/compound/byte02.py\n--- a/docs/examples/guide/compound/byte02.py\n+++ b/docs/examples/guide/compound/byte02.py\n@@ -2,7 +2,7 @@\n \n from textual.app import App, ComposeResult\n from textual.containers import Container\n-from textual.messages import Message\n+from textual.message import Message\n from textual.reactive import reactive\n from textual.widget import Widget\n from textual.widgets import Input, Label, Switch\ndiff --git a/docs/examples/guide/compound/byte03.py b/docs/examples/guide/compound/byte03.py\n--- a/docs/examples/guide/compound/byte03.py\n+++ b/docs/examples/guide/compound/byte03.py\n@@ -3,7 +3,7 @@\n from textual.app import App, ComposeResult\n from textual.containers import Container\n from textual.geometry import clamp\n-from textual.messages import Message\n+from textual.message import Message\n from textual.reactive import reactive\n from textual.widget import Widget\n from textual.widgets import Input, Label, Switch\n", "issue": "docs: `Message` not exported from `textual.messages`\nJust something minor I spotted while playing with the [Messages up](https://textual.textualize.io/guide/widgets/#messages-up) example in the docs. The code still works but my editor complains that:\r\n\r\n> \"Message\" is not exported from module \"textual.messages\"\r\n> Import from \"textual.message\" instead [reportPrivateImportUsage]\r\n\r\nHappy to submit a quick PR if deemed worth fixing!\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.messages import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0) # (1)!\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def on_switch_changed(self, event: Switch.Changed) -> None: # (2)!\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop() # (3)!\n self.value = event.value # (4)!\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte02.py"}, {"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.geometry import clamp\nfrom textual.messages import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0)\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def watch_value(self, value: bool) -> None: # (1)!\n \"\"\"When the value changes we want to set the switch accordingly.\"\"\"\n self.query_one(Switch).value = value\n\n def on_switch_changed(self, event: Switch.Changed) -> None:\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop()\n self.value = event.value\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n value = reactive(0)\n\n def validate_value(self, value: int) -> int: # (2)!\n \"\"\"Ensure value is between 0 and 255.\"\"\"\n return clamp(value, 0, 255)\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n def on_input_changed(self, event: Input.Changed) -> None: # (3)!\n \"\"\"When the text changes, set the value of the byte.\"\"\"\n try:\n self.value = int(event.value or \"0\")\n except ValueError:\n pass\n\n def watch_value(self, value: int) -> None: # (4)!\n \"\"\"When self.value changes, update switches.\"\"\"\n for switch in self.query(BitSwitch):\n with switch.prevent(BitSwitch.BitChanged): # (5)!\n switch.value = bool(value & (1 << switch.bit)) # (6)!\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte03.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0) # (1)!\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def on_switch_changed(self, event: Switch.Changed) -> None: # (2)!\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop() # (3)!\n self.value = event.value # (4)!\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte02.py"}, {"content": "from __future__ import annotations\n\nfrom textual.app import App, ComposeResult\nfrom textual.containers import Container\nfrom textual.geometry import clamp\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import Widget\nfrom textual.widgets import Input, Label, Switch\n\n\nclass BitSwitch(Widget):\n \"\"\"A Switch with a numeric label above it.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n BitSwitch {\n layout: vertical;\n width: auto;\n height: auto;\n }\n BitSwitch > Label {\n text-align: center;\n width: 100%;\n }\n \"\"\"\n\n class BitChanged(Message):\n \"\"\"Sent when the 'bit' changes.\"\"\"\n\n def __init__(self, bit: int, value: bool) -> None:\n super().__init__()\n self.bit = bit\n self.value = value\n\n value = reactive(0)\n\n def __init__(self, bit: int) -> None:\n self.bit = bit\n super().__init__()\n\n def compose(self) -> ComposeResult:\n yield Label(str(self.bit))\n yield Switch()\n\n def watch_value(self, value: bool) -> None: # (1)!\n \"\"\"When the value changes we want to set the switch accordingly.\"\"\"\n self.query_one(Switch).value = value\n\n def on_switch_changed(self, event: Switch.Changed) -> None:\n \"\"\"When the switch changes, notify the parent via a message.\"\"\"\n event.stop()\n self.value = event.value\n self.post_message(self.BitChanged(self.bit, event.value))\n\n\nclass ByteInput(Widget):\n \"\"\"A compound widget with 8 switches.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n ByteInput {\n width: auto;\n height: auto;\n border: blank;\n layout: horizontal;\n }\n ByteInput:focus-within {\n border: heavy $secondary;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n for bit in reversed(range(8)):\n yield BitSwitch(bit)\n\n\nclass ByteEditor(Widget):\n DEFAULT_CSS = \"\"\"\n ByteEditor > Container {\n height: 1fr;\n align: center middle;\n }\n ByteEditor > Container.top {\n background: $boost;\n }\n ByteEditor Input {\n width: 16;\n }\n \"\"\"\n\n value = reactive(0)\n\n def validate_value(self, value: int) -> int: # (2)!\n \"\"\"Ensure value is between 0 and 255.\"\"\"\n return clamp(value, 0, 255)\n\n def compose(self) -> ComposeResult:\n with Container(classes=\"top\"):\n yield Input(placeholder=\"byte\")\n with Container():\n yield ByteInput()\n\n def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None:\n \"\"\"When a switch changes, update the value.\"\"\"\n value = 0\n for switch in self.query(BitSwitch):\n value |= switch.value << switch.bit\n self.query_one(Input).value = str(value)\n\n def on_input_changed(self, event: Input.Changed) -> None: # (3)!\n \"\"\"When the text changes, set the value of the byte.\"\"\"\n try:\n self.value = int(event.value or \"0\")\n except ValueError:\n pass\n\n def watch_value(self, value: int) -> None: # (4)!\n \"\"\"When self.value changes, update switches.\"\"\"\n for switch in self.query(BitSwitch):\n with switch.prevent(BitSwitch.BitChanged): # (5)!\n switch.value = bool(value & (1 << switch.bit)) # (6)!\n\n\nclass ByteInputApp(App):\n def compose(self) -> ComposeResult:\n yield ByteEditor()\n\n\nif __name__ == \"__main__\":\n app = ByteInputApp()\n app.run()\n", "path": "docs/examples/guide/compound/byte03.py"}]}
2,315
245
gh_patches_debug_35030
rasdani/github-patches
git_diff
pfnet__pytorch-pfn-extras-582
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Align terminology (`option` v.s. `config`) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytorch_pfn_extras/runtime/_to.py` Content: ``` 1 from typing import Any, Dict, Optional, Type, TypeVar 2 3 import torch 4 5 import pytorch_pfn_extras as ppe 6 from pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime 7 8 9 ModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor) 10 11 12 def to( 13 module_or_tensor: ModuleOrTensor, 14 device: DeviceLike, 15 *, 16 config: Optional[Dict[str, Any]] = None, 17 runtime_class: Optional[Type[BaseRuntime]] = None, 18 ) -> ModuleOrTensor: 19 """A function to transfer the given object to the given device. 20 21 If PyTorch's device type is given as the ``device`` argument, 22 the behavior of this function is equivalent to 23 ``module_or_tensor.to(module_or_tensor, device)``. 24 25 Otherwise, this function uses the **Runtime** mechanism. 26 This function looks for the Runtime for the device from the RuntimeRegistry 27 and delegates the actual transfer operation to it. 28 29 See also the documentation of ``ppe.runtime.BaseRuntime`` for details. 30 31 Args: 32 module_or_tensor (torch.nn.Module or torch.Tensor): 33 An object to be transferred. 34 device (torch.device or str): 35 The device that the input object is transferred to. 36 config (dict, optional): 37 A config of dictionary type that is passed to 38 ``runtime_class.__init__`` as an argument. 39 runtime_class: 40 A runtime class inherited from `BaseRuntime` class. 41 If ``None``, a runtime class is automatically selected 42 based on the ``device`` argument from the runtime registry. 43 44 Returns: 45 A `torch.Tensor` with the specified device. 46 """ 47 if config is None: 48 config = {} 49 if runtime_class is None: 50 registry = ppe.runtime.runtime_registry 51 runtime_class = registry.get_runtime_class_for_device_spec(device) 52 runtime = runtime_class(device, config) 53 obj = module_or_tensor 54 if isinstance(obj, torch.nn.Module): 55 ppe.runtime._runtime._set_module_runtime_tag(obj, runtime) 56 return runtime.move_module(obj) 57 elif isinstance(obj, torch.Tensor): 58 return runtime.move_tensor(obj) 59 else: 60 raise ValueError('Unsupported type for module_or_tensor') 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py --- a/pytorch_pfn_extras/runtime/_to.py +++ b/pytorch_pfn_extras/runtime/_to.py @@ -13,8 +13,9 @@ module_or_tensor: ModuleOrTensor, device: DeviceLike, *, - config: Optional[Dict[str, Any]] = None, + options: Optional[Dict[str, Any]] = None, runtime_class: Optional[Type[BaseRuntime]] = None, + config: Optional[Dict[str, Any]] = None, ) -> ModuleOrTensor: """A function to transfer the given object to the given device. @@ -33,23 +34,30 @@ An object to be transferred. device (torch.device or str): The device that the input object is transferred to. - config (dict, optional): - A config of dictionary type that is passed to + options (dict, optional): + An options of dictionary type that is passed to ``runtime_class.__init__`` as an argument. runtime_class: A runtime class inherited from `BaseRuntime` class. If ``None``, a runtime class is automatically selected based on the ``device`` argument from the runtime registry. + config (dict, optional): + DEPRECATED. Use `options`. Returns: A `torch.Tensor` with the specified device. """ - if config is None: - config = {} + if options is None: + options = {} + if config is not None: + options = config + elif config is not None: + raise ValueError('options and config cannot be specified together') + if runtime_class is None: registry = ppe.runtime.runtime_registry runtime_class = registry.get_runtime_class_for_device_spec(device) - runtime = runtime_class(device, config) + runtime = runtime_class(device, options) obj = module_or_tensor if isinstance(obj, torch.nn.Module): ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)
{"golden_diff": "diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py\n--- a/pytorch_pfn_extras/runtime/_to.py\n+++ b/pytorch_pfn_extras/runtime/_to.py\n@@ -13,8 +13,9 @@\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n- config: Optional[Dict[str, Any]] = None,\n+ options: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n+ config: Optional[Dict[str, Any]] = None,\n ) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n \n@@ -33,23 +34,30 @@\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n- config (dict, optional):\n- A config of dictionary type that is passed to\n+ options (dict, optional):\n+ An options of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n+ config (dict, optional):\n+ DEPRECATED. Use `options`.\n \n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n- if config is None:\n- config = {}\n+ if options is None:\n+ options = {}\n+ if config is not None:\n+ options = config\n+ elif config is not None:\n+ raise ValueError('options and config cannot be specified together')\n+\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n- runtime = runtime_class(device, config)\n+ runtime = runtime_class(device, options)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n", "issue": "Align terminology (`option` v.s. `config`)\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Type, TypeVar\n\nimport torch\n\nimport pytorch_pfn_extras as ppe\nfrom pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime\n\n\nModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)\n\n\ndef to(\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n config: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n\n If PyTorch's device type is given as the ``device`` argument,\n the behavior of this function is equivalent to\n ``module_or_tensor.to(module_or_tensor, device)``.\n\n Otherwise, this function uses the **Runtime** mechanism.\n This function looks for the Runtime for the device from the RuntimeRegistry\n and delegates the actual transfer operation to it.\n\n See also the documentation of ``ppe.runtime.BaseRuntime`` for details.\n\n Args:\n module_or_tensor (torch.nn.Module or torch.Tensor):\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n config (dict, optional):\n A config of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n\n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n if config is None:\n config = {}\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n runtime = runtime_class(device, config)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n return runtime.move_module(obj)\n elif isinstance(obj, torch.Tensor):\n return runtime.move_tensor(obj)\n else:\n raise ValueError('Unsupported type for module_or_tensor')\n", "path": "pytorch_pfn_extras/runtime/_to.py"}], "after_files": [{"content": "from typing import Any, Dict, Optional, Type, TypeVar\n\nimport torch\n\nimport pytorch_pfn_extras as ppe\nfrom pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime\n\n\nModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)\n\n\ndef to(\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n options: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n config: Optional[Dict[str, Any]] = None,\n) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n\n If PyTorch's device type is given as the ``device`` argument,\n the behavior of this function is equivalent to\n ``module_or_tensor.to(module_or_tensor, device)``.\n\n Otherwise, this function uses the **Runtime** mechanism.\n This function looks for the Runtime for the device from the RuntimeRegistry\n and delegates the actual transfer operation to it.\n\n See also the documentation of ``ppe.runtime.BaseRuntime`` for details.\n\n Args:\n module_or_tensor (torch.nn.Module or torch.Tensor):\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n options (dict, optional):\n An options of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n config (dict, optional):\n DEPRECATED. Use `options`.\n\n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n if options is None:\n options = {}\n if config is not None:\n options = config\n elif config is not None:\n raise ValueError('options and config cannot be specified together')\n\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n runtime = runtime_class(device, options)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n return runtime.move_module(obj)\n elif isinstance(obj, torch.Tensor):\n return runtime.move_tensor(obj)\n else:\n raise ValueError('Unsupported type for module_or_tensor')\n", "path": "pytorch_pfn_extras/runtime/_to.py"}]}
867
465
gh_patches_debug_37046
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-3053
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecation: `imghdr` module is being deprecated in Python 3.11 The `imghdr` module is being deprected in `3.11` and will be removed in `3.13`. See [PEP 594](https://peps.python.org/pep-0594/#imghdr) for details. We currently use it only in one area: https://github.com/python-telegram-bot/python-telegram-bot/blob/be8f4f7aad9c3ded333950d00ecde57dbdda59c2/telegram/_files/inputfile.py#L108 Alternatives would be: 1. Just use the `mimetype` module? We were using only the `mimetype` module until [@b83a659](https://github.com/python-telegram-bot/python-telegram-bot/commit/b83a659) where we used a 'hack' to detect the type from the byte stream. This was then [replaced](https://github.com/python-telegram-bot/python-telegram-bot/commit/5dc1e4cac19cdd34d7cad4688afa0277a0c8a436) by the `imghdr` module which did the same thing better. 2. Use a library called [filetype](https://github.com/h2non/filetype.py) to detect images. This was [recommended](https://peps.python.org/pep-0594/#deprecated-modules) by the PEP. 3. Just copy the python [source](https://github.com/python/cpython/blob/3.11/Lib/imghdr.py) of the `imghdr` module into ours? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `telegram/_files/inputfile.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2022 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains an object that represents a Telegram InputFile.""" 20 21 import imghdr 22 import logging 23 import mimetypes 24 from pathlib import Path 25 from typing import IO, Optional, Union 26 from uuid import uuid4 27 28 from telegram._utils.types import FieldTuple 29 30 _DEFAULT_MIME_TYPE = "application/octet-stream" 31 logger = logging.getLogger(__name__) 32 33 34 class InputFile: 35 """This object represents a Telegram InputFile. 36 37 .. versionchanged:: 20.0 38 The former attribute ``attach`` was renamed to :attr:`attach_name`. 39 40 Args: 41 obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files 42 content as bytes or string. 43 44 Note: 45 If :paramref:`obj` is a string, it will be encoded as bytes via 46 :external:obj:`obj.encode('utf-8') <str.encode>`. 47 48 .. versionchanged:: 20.0 49 Accept string input. 50 filename (:obj:`str`, optional): Filename for this InputFile. 51 attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in 52 the request to Telegram should point to the multipart data via an ``attach://`` URI. 53 Defaults to `False`. 54 55 Attributes: 56 input_file_content (:obj:`bytes`): The binary content of the file to send. 57 attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in 58 the request to Telegram should point to the multipart data via a an URI of the form 59 ``attach://<attach_name>`` URI. 60 filename (:obj:`str`): Filename for the file to be sent. 61 mimetype (:obj:`str`): The mimetype inferred from the file to be sent. 62 63 """ 64 65 __slots__ = ("filename", "attach_name", "input_file_content", "mimetype") 66 67 def __init__( 68 self, obj: Union[IO[bytes], bytes, str], filename: str = None, attach: bool = False 69 ): 70 if isinstance(obj, bytes): 71 self.input_file_content = obj 72 elif isinstance(obj, str): 73 self.input_file_content = obj.encode("utf-8") 74 else: 75 self.input_file_content = obj.read() 76 self.attach_name: Optional[str] = "attached" + uuid4().hex if attach else None 77 78 if ( 79 not filename 80 and hasattr(obj, "name") 81 and not isinstance(obj.name, int) # type: ignore[union-attr] 82 ): 83 filename = Path(obj.name).name # type: ignore[union-attr] 84 85 image_mime_type = self.is_image(self.input_file_content) 86 if image_mime_type: 87 self.mimetype = image_mime_type 88 elif filename: 89 self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE 90 else: 91 self.mimetype = _DEFAULT_MIME_TYPE 92 93 self.filename = filename or self.mimetype.replace("/", ".") 94 95 @staticmethod 96 def is_image(stream: bytes) -> Optional[str]: 97 """Check if the content file is an image by analyzing its headers. 98 99 Args: 100 stream (:obj:`bytes`): A byte stream representing the content of a file. 101 102 Returns: 103 :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or 104 :obj:`None` else. 105 106 """ 107 try: 108 image = imghdr.what(None, stream) 109 if image: 110 return f"image/{image}" 111 return None 112 except Exception: 113 logger.debug( 114 "Could not parse file content. Assuming that file is not an image.", exc_info=True 115 ) 116 return None 117 118 @property 119 def field_tuple(self) -> FieldTuple: 120 """Field tuple representing the contents of the file for upload to the Telegram servers. 121 122 Returns: 123 Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]: 124 """ 125 return self.filename, self.input_file_content, self.mimetype 126 127 @property 128 def attach_uri(self) -> Optional[str]: 129 """URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if 130 :attr:`attach_name` is :obj:`None`. 131 """ 132 return f"attach://{self.attach_name}" if self.attach_name else None 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/telegram/_files/inputfile.py b/telegram/_files/inputfile.py --- a/telegram/_files/inputfile.py +++ b/telegram/_files/inputfile.py @@ -18,7 +18,6 @@ # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains an object that represents a Telegram InputFile.""" -import imghdr import logging import mimetypes from pathlib import Path @@ -35,7 +34,11 @@ """This object represents a Telegram InputFile. .. versionchanged:: 20.0 - The former attribute ``attach`` was renamed to :attr:`attach_name`. + + * The former attribute ``attach`` was renamed to :attr:`attach_name`. + * Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would + like to have the mime type automatically guessed, please pass :paramref:`filename` + in addition. Args: obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files @@ -82,39 +85,13 @@ ): filename = Path(obj.name).name # type: ignore[union-attr] - image_mime_type = self.is_image(self.input_file_content) - if image_mime_type: - self.mimetype = image_mime_type - elif filename: - self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE + if filename: + self.mimetype = mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE else: self.mimetype = _DEFAULT_MIME_TYPE self.filename = filename or self.mimetype.replace("/", ".") - @staticmethod - def is_image(stream: bytes) -> Optional[str]: - """Check if the content file is an image by analyzing its headers. - - Args: - stream (:obj:`bytes`): A byte stream representing the content of a file. - - Returns: - :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or - :obj:`None` else. - - """ - try: - image = imghdr.what(None, stream) - if image: - return f"image/{image}" - return None - except Exception: - logger.debug( - "Could not parse file content. Assuming that file is not an image.", exc_info=True - ) - return None - @property def field_tuple(self) -> FieldTuple: """Field tuple representing the contents of the file for upload to the Telegram servers.
{"golden_diff": "diff --git a/telegram/_files/inputfile.py b/telegram/_files/inputfile.py\n--- a/telegram/_files/inputfile.py\n+++ b/telegram/_files/inputfile.py\n@@ -18,7 +18,6 @@\n # along with this program. If not, see [http://www.gnu.org/licenses/].\n \"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n \n-import imghdr\n import logging\n import mimetypes\n from pathlib import Path\n@@ -35,7 +34,11 @@\n \"\"\"This object represents a Telegram InputFile.\n \n .. versionchanged:: 20.0\n- The former attribute ``attach`` was renamed to :attr:`attach_name`.\n+\n+ * The former attribute ``attach`` was renamed to :attr:`attach_name`.\n+ * Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would\n+ like to have the mime type automatically guessed, please pass :paramref:`filename`\n+ in addition.\n \n Args:\n obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files\n@@ -82,39 +85,13 @@\n ):\n filename = Path(obj.name).name # type: ignore[union-attr]\n \n- image_mime_type = self.is_image(self.input_file_content)\n- if image_mime_type:\n- self.mimetype = image_mime_type\n- elif filename:\n- self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE\n+ if filename:\n+ self.mimetype = mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE\n else:\n self.mimetype = _DEFAULT_MIME_TYPE\n \n self.filename = filename or self.mimetype.replace(\"/\", \".\")\n \n- @staticmethod\n- def is_image(stream: bytes) -> Optional[str]:\n- \"\"\"Check if the content file is an image by analyzing its headers.\n-\n- Args:\n- stream (:obj:`bytes`): A byte stream representing the content of a file.\n-\n- Returns:\n- :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or\n- :obj:`None` else.\n-\n- \"\"\"\n- try:\n- image = imghdr.what(None, stream)\n- if image:\n- return f\"image/{image}\"\n- return None\n- except Exception:\n- logger.debug(\n- \"Could not parse file content. Assuming that file is not an image.\", exc_info=True\n- )\n- return None\n-\n @property\n def field_tuple(self) -> FieldTuple:\n \"\"\"Field tuple representing the contents of the file for upload to the Telegram servers.\n", "issue": "Deprecation: `imghdr` module is being deprecated in Python 3.11\nThe `imghdr` module is being deprected in `3.11` and will be removed in `3.13`. See [PEP 594](https://peps.python.org/pep-0594/#imghdr) for details. \r\n\r\nWe currently use it only in one area: https://github.com/python-telegram-bot/python-telegram-bot/blob/be8f4f7aad9c3ded333950d00ecde57dbdda59c2/telegram/_files/inputfile.py#L108\r\n\r\nAlternatives would be:\r\n1. Just use the `mimetype` module? We were using only the `mimetype` module until [@b83a659](https://github.com/python-telegram-bot/python-telegram-bot/commit/b83a659) where we used a 'hack' to detect the type from the byte stream. This was then [replaced](https://github.com/python-telegram-bot/python-telegram-bot/commit/5dc1e4cac19cdd34d7cad4688afa0277a0c8a436) by the `imghdr` module which did the same thing better.\r\n2. Use a library called [filetype](https://github.com/h2non/filetype.py) to detect images. This was [recommended](https://peps.python.org/pep-0594/#deprecated-modules) by the PEP.\r\n3. Just copy the python [source](https://github.com/python/cpython/blob/3.11/Lib/imghdr.py) of the `imghdr` module into ours?\r\n\r\n \n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport logging\nimport mimetypes\nfrom pathlib import Path\nfrom typing import IO, Optional, Union\nfrom uuid import uuid4\n\nfrom telegram._utils.types import FieldTuple\n\n_DEFAULT_MIME_TYPE = \"application/octet-stream\"\nlogger = logging.getLogger(__name__)\n\n\nclass InputFile:\n \"\"\"This object represents a Telegram InputFile.\n\n .. versionchanged:: 20.0\n The former attribute ``attach`` was renamed to :attr:`attach_name`.\n\n Args:\n obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files\n content as bytes or string.\n\n Note:\n If :paramref:`obj` is a string, it will be encoded as bytes via\n :external:obj:`obj.encode('utf-8') <str.encode>`.\n\n .. versionchanged:: 20.0\n Accept string input.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in\n the request to Telegram should point to the multipart data via an ``attach://`` URI.\n Defaults to `False`.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binary content of the file to send.\n attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in\n the request to Telegram should point to the multipart data via a an URI of the form\n ``attach://<attach_name>`` URI.\n filename (:obj:`str`): Filename for the file to be sent.\n mimetype (:obj:`str`): The mimetype inferred from the file to be sent.\n\n \"\"\"\n\n __slots__ = (\"filename\", \"attach_name\", \"input_file_content\", \"mimetype\")\n\n def __init__(\n self, obj: Union[IO[bytes], bytes, str], filename: str = None, attach: bool = False\n ):\n if isinstance(obj, bytes):\n self.input_file_content = obj\n elif isinstance(obj, str):\n self.input_file_content = obj.encode(\"utf-8\")\n else:\n self.input_file_content = obj.read()\n self.attach_name: Optional[str] = \"attached\" + uuid4().hex if attach else None\n\n if (\n not filename\n and hasattr(obj, \"name\")\n and not isinstance(obj.name, int) # type: ignore[union-attr]\n ):\n filename = Path(obj.name).name # type: ignore[union-attr]\n\n image_mime_type = self.is_image(self.input_file_content)\n if image_mime_type:\n self.mimetype = image_mime_type\n elif filename:\n self.mimetype = mimetypes.guess_type(filename)[0] or _DEFAULT_MIME_TYPE\n else:\n self.mimetype = _DEFAULT_MIME_TYPE\n\n self.filename = filename or self.mimetype.replace(\"/\", \".\")\n\n @staticmethod\n def is_image(stream: bytes) -> Optional[str]:\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`bytes`): A byte stream representing the content of a file.\n\n Returns:\n :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or\n :obj:`None` else.\n\n \"\"\"\n try:\n image = imghdr.what(None, stream)\n if image:\n return f\"image/{image}\"\n return None\n except Exception:\n logger.debug(\n \"Could not parse file content. Assuming that file is not an image.\", exc_info=True\n )\n return None\n\n @property\n def field_tuple(self) -> FieldTuple:\n \"\"\"Field tuple representing the contents of the file for upload to the Telegram servers.\n\n Returns:\n Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:\n \"\"\"\n return self.filename, self.input_file_content, self.mimetype\n\n @property\n def attach_uri(self) -> Optional[str]:\n \"\"\"URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if\n :attr:`attach_name` is :obj:`None`.\n \"\"\"\n return f\"attach://{self.attach_name}\" if self.attach_name else None\n", "path": "telegram/_files/inputfile.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport logging\nimport mimetypes\nfrom pathlib import Path\nfrom typing import IO, Optional, Union\nfrom uuid import uuid4\n\nfrom telegram._utils.types import FieldTuple\n\n_DEFAULT_MIME_TYPE = \"application/octet-stream\"\nlogger = logging.getLogger(__name__)\n\n\nclass InputFile:\n \"\"\"This object represents a Telegram InputFile.\n\n .. versionchanged:: 20.0\n\n * The former attribute ``attach`` was renamed to :attr:`attach_name`.\n * Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would\n like to have the mime type automatically guessed, please pass :paramref:`filename`\n in addition.\n\n Args:\n obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files\n content as bytes or string.\n\n Note:\n If :paramref:`obj` is a string, it will be encoded as bytes via\n :external:obj:`obj.encode('utf-8') <str.encode>`.\n\n .. versionchanged:: 20.0\n Accept string input.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in\n the request to Telegram should point to the multipart data via an ``attach://`` URI.\n Defaults to `False`.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binary content of the file to send.\n attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in\n the request to Telegram should point to the multipart data via a an URI of the form\n ``attach://<attach_name>`` URI.\n filename (:obj:`str`): Filename for the file to be sent.\n mimetype (:obj:`str`): The mimetype inferred from the file to be sent.\n\n \"\"\"\n\n __slots__ = (\"filename\", \"attach_name\", \"input_file_content\", \"mimetype\")\n\n def __init__(\n self, obj: Union[IO[bytes], bytes, str], filename: str = None, attach: bool = False\n ):\n if isinstance(obj, bytes):\n self.input_file_content = obj\n elif isinstance(obj, str):\n self.input_file_content = obj.encode(\"utf-8\")\n else:\n self.input_file_content = obj.read()\n self.attach_name: Optional[str] = \"attached\" + uuid4().hex if attach else None\n\n if (\n not filename\n and hasattr(obj, \"name\")\n and not isinstance(obj.name, int) # type: ignore[union-attr]\n ):\n filename = Path(obj.name).name # type: ignore[union-attr]\n\n if filename:\n self.mimetype = mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE\n else:\n self.mimetype = _DEFAULT_MIME_TYPE\n\n self.filename = filename or self.mimetype.replace(\"/\", \".\")\n\n @property\n def field_tuple(self) -> FieldTuple:\n \"\"\"Field tuple representing the contents of the file for upload to the Telegram servers.\n\n Returns:\n Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:\n \"\"\"\n return self.filename, self.input_file_content, self.mimetype\n\n @property\n def attach_uri(self) -> Optional[str]:\n \"\"\"URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if\n :attr:`attach_name` is :obj:`None`.\n \"\"\"\n return f\"attach://{self.attach_name}\" if self.attach_name else None\n", "path": "telegram/_files/inputfile.py"}]}
2,092
620
gh_patches_debug_15176
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1185
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError: Only messages, members or roles may be passed https://sentry.io/will-tekulve/bot-development/issues/419244737/ ``` TypeError: Only messages, members or roles may be passed File "redbot/core/utils/mod.py", line 132, in is_mod_or_superior raise TypeError('Only messages, members or roles may be passed') Exception in on_message ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redbot/core/utils/mod.py` Content: ``` 1 import asyncio 2 from datetime import timedelta 3 from typing import List, Iterable, Union 4 5 import discord 6 7 from redbot.core import Config 8 from redbot.core.bot import Red 9 10 11 async def mass_purge(messages: List[discord.Message], 12 channel: discord.TextChannel): 13 """Bulk delete messages from a channel. 14 15 If more than 100 messages are supplied, the bot will delete 100 messages at 16 a time, sleeping between each action. 17 18 Note 19 ---- 20 Messages must not be older than 14 days, and the bot must not be a user 21 account. 22 23 Parameters 24 ---------- 25 messages : `list` of `discord.Message` 26 The messages to bulk delete. 27 channel : discord.TextChannel 28 The channel to delete messages from. 29 30 Raises 31 ------ 32 discord.Forbidden 33 You do not have proper permissions to delete the messages or you’re not 34 using a bot account. 35 discord.HTTPException 36 Deleting the messages failed. 37 38 """ 39 while messages: 40 if len(messages) > 1: 41 await channel.delete_messages(messages[:100]) 42 messages = messages[100:] 43 else: 44 await messages[0].delete() 45 messages = [] 46 await asyncio.sleep(1.5) 47 48 49 async def slow_deletion(messages: Iterable[discord.Message]): 50 """Delete a list of messages one at a time. 51 52 Any exceptions raised when trying to delete the message will be silenced. 53 54 Parameters 55 ---------- 56 messages : `iterable` of `discord.Message` 57 The messages to delete. 58 59 """ 60 for message in messages: 61 try: 62 await message.delete() 63 except discord.HTTPException: 64 pass 65 66 67 def get_audit_reason(author: discord.Member, reason: str = None): 68 """Construct a reason to appear in the audit log. 69 70 Parameters 71 ---------- 72 author : discord.Member 73 The author behind the audit log action. 74 reason : str 75 The reason behidn the audit log action. 76 77 Returns 78 ------- 79 str 80 The formatted audit log reason. 81 82 """ 83 return \ 84 "Action requested by {} (ID {}). Reason: {}".format(author, author.id, reason) if reason else \ 85 "Action requested by {} (ID {}).".format(author, author.id) 86 87 88 async def is_allowed_by_hierarchy(bot: Red, 89 settings: Config, 90 guild: discord.Guild, 91 mod: discord.Member, 92 user: discord.Member): 93 if not await settings.guild(guild).respect_hierarchy(): 94 return True 95 is_special = mod == guild.owner or await bot.is_owner(mod) 96 return mod.top_role.position > user.top_role.position or is_special 97 98 99 async def is_mod_or_superior( 100 bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]): 101 """Check if an object has mod or superior permissions. 102 103 If a message is passed, its author's permissions are checked. If a role is 104 passed, it simply checks if it is one of either the admin or mod roles. 105 106 Parameters 107 ---------- 108 bot : redbot.core.bot.Red 109 The bot object. 110 obj : `discord.Message` or `discord.Member` or `discord.Role` 111 The object to check permissions for. 112 113 Returns 114 ------- 115 bool 116 :code:`True` if the object has mod permissions. 117 118 Raises 119 ------ 120 TypeError 121 If the wrong type of ``obj`` was passed. 122 123 """ 124 user = None 125 if isinstance(obj, discord.Message): 126 user = obj.author 127 elif isinstance(obj, discord.Member): 128 user = obj 129 elif isinstance(obj, discord.Role): 130 pass 131 else: 132 raise TypeError('Only messages, members or roles may be passed') 133 134 server = obj.guild 135 admin_role_id = await bot.db.guild(server).admin_role() 136 mod_role_id = await bot.db.guild(server).mod_role() 137 138 if isinstance(obj, discord.Role): 139 return obj.id in [admin_role_id, mod_role_id] 140 mod_roles = [r for r in server.roles if r.id == mod_role_id] 141 mod_role = mod_roles[0] if len(mod_roles) > 0 else None 142 admin_roles = [r for r in server.roles if r.id == admin_role_id] 143 admin_role = admin_roles[0] if len(admin_roles) > 0 else None 144 145 if user and user == await bot.is_owner(user): 146 return True 147 elif admin_role and discord.utils.get(user.roles, name=admin_role): 148 return True 149 elif mod_role and discord.utils.get(user.roles, name=mod_role): 150 return True 151 else: 152 return False 153 154 155 def strfdelta(delta: timedelta): 156 """Format a timedelta object to a message with time units. 157 158 Parameters 159 ---------- 160 delta : datetime.timedelta 161 The duration to parse. 162 163 Returns 164 ------- 165 str 166 A message representing the timedelta with units. 167 168 """ 169 s = [] 170 if delta.days: 171 ds = '%i day' % delta.days 172 if delta.days > 1: 173 ds += 's' 174 s.append(ds) 175 hrs, rem = divmod(delta.seconds, 60*60) 176 if hrs: 177 hs = '%i hr' % hrs 178 if hrs > 1: 179 hs += 's' 180 s.append(hs) 181 mins, secs = divmod(rem, 60) 182 if mins: 183 s.append('%i min' % mins) 184 if secs: 185 s.append('%i sec' % secs) 186 return ' '.join(s) 187 188 189 async def is_admin_or_superior( 190 bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]): 191 """Same as `is_mod_or_superior` except for admin permissions. 192 193 If a message is passed, its author's permissions are checked. If a role is 194 passed, it simply checks if it is the admin role. 195 196 Parameters 197 ---------- 198 bot : redbot.core.bot.Red 199 The bot object. 200 obj : `discord.Message` or `discord.Member` or `discord.Role` 201 The object to check permissions for. 202 203 Returns 204 ------- 205 bool 206 :code:`True` if the object has admin permissions. 207 208 Raises 209 ------ 210 TypeError 211 If the wrong type of ``obj`` was passed. 212 213 """ 214 user = None 215 if isinstance(obj, discord.Message): 216 user = obj.author 217 elif isinstance(obj, discord.Member): 218 user = obj 219 elif isinstance(obj, discord.Role): 220 pass 221 else: 222 raise TypeError('Only messages, members or roles may be passed') 223 224 server = obj.guild 225 admin_role_id = await bot.db.guild(server).admin_role() 226 227 if isinstance(obj, discord.Role): 228 return obj.id == admin_role_id 229 admin_roles = [r for r in server.roles if r.id == admin_role_id] 230 admin_role = admin_roles[0] if len(admin_roles) > 0 else None 231 232 if user and await bot.is_owner(user): 233 return True 234 elif admin_roles and discord.utils.get(user.roles, name=admin_role): 235 return True 236 else: 237 return False 238 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redbot/core/utils/mod.py b/redbot/core/utils/mod.py --- a/redbot/core/utils/mod.py +++ b/redbot/core/utils/mod.py @@ -124,7 +124,7 @@ user = None if isinstance(obj, discord.Message): user = obj.author - elif isinstance(obj, discord.Member): + elif isinstance(obj, discord.Member) or isinstance(obj, discord.User): user = obj elif isinstance(obj, discord.Role): pass @@ -214,7 +214,7 @@ user = None if isinstance(obj, discord.Message): user = obj.author - elif isinstance(obj, discord.Member): + elif isinstance(obj, discord.Member) or isinstance(obj, discord.User): user = obj elif isinstance(obj, discord.Role): pass
{"golden_diff": "diff --git a/redbot/core/utils/mod.py b/redbot/core/utils/mod.py\n--- a/redbot/core/utils/mod.py\n+++ b/redbot/core/utils/mod.py\n@@ -124,7 +124,7 @@\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n- elif isinstance(obj, discord.Member):\n+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n@@ -214,7 +214,7 @@\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n- elif isinstance(obj, discord.Member):\n+ elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n", "issue": "TypeError: Only messages, members or roles may be passed\nhttps://sentry.io/will-tekulve/bot-development/issues/419244737/\n\n```\nTypeError: Only messages, members or roles may be passed\n File \"redbot/core/utils/mod.py\", line 132, in is_mod_or_superior\n raise TypeError('Only messages, members or roles may be passed')\n\nException in on_message\n```\n", "before_files": [{"content": "import asyncio\nfrom datetime import timedelta\nfrom typing import List, Iterable, Union\n\nimport discord\n\nfrom redbot.core import Config\nfrom redbot.core.bot import Red\n\n\nasync def mass_purge(messages: List[discord.Message],\n channel: discord.TextChannel):\n \"\"\"Bulk delete messages from a channel.\n\n If more than 100 messages are supplied, the bot will delete 100 messages at\n a time, sleeping between each action.\n\n Note\n ----\n Messages must not be older than 14 days, and the bot must not be a user\n account.\n\n Parameters\n ----------\n messages : `list` of `discord.Message`\n The messages to bulk delete.\n channel : discord.TextChannel\n The channel to delete messages from.\n\n Raises\n ------\n discord.Forbidden\n You do not have proper permissions to delete the messages or you\u2019re not\n using a bot account.\n discord.HTTPException\n Deleting the messages failed.\n\n \"\"\"\n while messages:\n if len(messages) > 1:\n await channel.delete_messages(messages[:100])\n messages = messages[100:]\n else:\n await messages[0].delete()\n messages = []\n await asyncio.sleep(1.5)\n\n\nasync def slow_deletion(messages: Iterable[discord.Message]):\n \"\"\"Delete a list of messages one at a time.\n\n Any exceptions raised when trying to delete the message will be silenced.\n\n Parameters\n ----------\n messages : `iterable` of `discord.Message`\n The messages to delete.\n\n \"\"\"\n for message in messages:\n try:\n await message.delete()\n except discord.HTTPException:\n pass\n\n\ndef get_audit_reason(author: discord.Member, reason: str = None):\n \"\"\"Construct a reason to appear in the audit log.\n\n Parameters\n ----------\n author : discord.Member\n The author behind the audit log action.\n reason : str\n The reason behidn the audit log action.\n\n Returns\n -------\n str\n The formatted audit log reason.\n\n \"\"\"\n return \\\n \"Action requested by {} (ID {}). Reason: {}\".format(author, author.id, reason) if reason else \\\n \"Action requested by {} (ID {}).\".format(author, author.id)\n\n\nasync def is_allowed_by_hierarchy(bot: Red,\n settings: Config,\n guild: discord.Guild,\n mod: discord.Member,\n user: discord.Member):\n if not await settings.guild(guild).respect_hierarchy():\n return True\n is_special = mod == guild.owner or await bot.is_owner(mod)\n return mod.top_role.position > user.top_role.position or is_special\n\n\nasync def is_mod_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Check if an object has mod or superior permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is one of either the admin or mod roles.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has mod permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n mod_role_id = await bot.db.guild(server).mod_role()\n\n if isinstance(obj, discord.Role):\n return obj.id in [admin_role_id, mod_role_id]\n mod_roles = [r for r in server.roles if r.id == mod_role_id]\n mod_role = mod_roles[0] if len(mod_roles) > 0 else None\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and user == await bot.is_owner(user):\n return True\n elif admin_role and discord.utils.get(user.roles, name=admin_role):\n return True\n elif mod_role and discord.utils.get(user.roles, name=mod_role):\n return True\n else:\n return False\n\n\ndef strfdelta(delta: timedelta):\n \"\"\"Format a timedelta object to a message with time units.\n\n Parameters\n ----------\n delta : datetime.timedelta\n The duration to parse.\n\n Returns\n -------\n str\n A message representing the timedelta with units.\n\n \"\"\"\n s = []\n if delta.days:\n ds = '%i day' % delta.days\n if delta.days > 1:\n ds += 's'\n s.append(ds)\n hrs, rem = divmod(delta.seconds, 60*60)\n if hrs:\n hs = '%i hr' % hrs\n if hrs > 1:\n hs += 's'\n s.append(hs)\n mins, secs = divmod(rem, 60)\n if mins:\n s.append('%i min' % mins)\n if secs:\n s.append('%i sec' % secs)\n return ' '.join(s)\n\n\nasync def is_admin_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Same as `is_mod_or_superior` except for admin permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is the admin role.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has admin permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n\n if isinstance(obj, discord.Role):\n return obj.id == admin_role_id\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and await bot.is_owner(user):\n return True\n elif admin_roles and discord.utils.get(user.roles, name=admin_role):\n return True\n else:\n return False\n", "path": "redbot/core/utils/mod.py"}], "after_files": [{"content": "import asyncio\nfrom datetime import timedelta\nfrom typing import List, Iterable, Union\n\nimport discord\n\nfrom redbot.core import Config\nfrom redbot.core.bot import Red\n\n\nasync def mass_purge(messages: List[discord.Message],\n channel: discord.TextChannel):\n \"\"\"Bulk delete messages from a channel.\n\n If more than 100 messages are supplied, the bot will delete 100 messages at\n a time, sleeping between each action.\n\n Note\n ----\n Messages must not be older than 14 days, and the bot must not be a user\n account.\n\n Parameters\n ----------\n messages : `list` of `discord.Message`\n The messages to bulk delete.\n channel : discord.TextChannel\n The channel to delete messages from.\n\n Raises\n ------\n discord.Forbidden\n You do not have proper permissions to delete the messages or you\u2019re not\n using a bot account.\n discord.HTTPException\n Deleting the messages failed.\n\n \"\"\"\n while messages:\n if len(messages) > 1:\n await channel.delete_messages(messages[:100])\n messages = messages[100:]\n else:\n await messages[0].delete()\n messages = []\n await asyncio.sleep(1.5)\n\n\nasync def slow_deletion(messages: Iterable[discord.Message]):\n \"\"\"Delete a list of messages one at a time.\n\n Any exceptions raised when trying to delete the message will be silenced.\n\n Parameters\n ----------\n messages : `iterable` of `discord.Message`\n The messages to delete.\n\n \"\"\"\n for message in messages:\n try:\n await message.delete()\n except discord.HTTPException:\n pass\n\n\ndef get_audit_reason(author: discord.Member, reason: str = None):\n \"\"\"Construct a reason to appear in the audit log.\n\n Parameters\n ----------\n author : discord.Member\n The author behind the audit log action.\n reason : str\n The reason behidn the audit log action.\n\n Returns\n -------\n str\n The formatted audit log reason.\n\n \"\"\"\n return \\\n \"Action requested by {} (ID {}). Reason: {}\".format(author, author.id, reason) if reason else \\\n \"Action requested by {} (ID {}).\".format(author, author.id)\n\n\nasync def is_allowed_by_hierarchy(bot: Red,\n settings: Config,\n guild: discord.Guild,\n mod: discord.Member,\n user: discord.Member):\n if not await settings.guild(guild).respect_hierarchy():\n return True\n is_special = mod == guild.owner or await bot.is_owner(mod)\n return mod.top_role.position > user.top_role.position or is_special\n\n\nasync def is_mod_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Check if an object has mod or superior permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is one of either the admin or mod roles.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has mod permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n mod_role_id = await bot.db.guild(server).mod_role()\n\n if isinstance(obj, discord.Role):\n return obj.id in [admin_role_id, mod_role_id]\n mod_roles = [r for r in server.roles if r.id == mod_role_id]\n mod_role = mod_roles[0] if len(mod_roles) > 0 else None\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and user == await bot.is_owner(user):\n return True\n elif admin_role and discord.utils.get(user.roles, name=admin_role):\n return True\n elif mod_role and discord.utils.get(user.roles, name=mod_role):\n return True\n else:\n return False\n\n\ndef strfdelta(delta: timedelta):\n \"\"\"Format a timedelta object to a message with time units.\n\n Parameters\n ----------\n delta : datetime.timedelta\n The duration to parse.\n\n Returns\n -------\n str\n A message representing the timedelta with units.\n\n \"\"\"\n s = []\n if delta.days:\n ds = '%i day' % delta.days\n if delta.days > 1:\n ds += 's'\n s.append(ds)\n hrs, rem = divmod(delta.seconds, 60*60)\n if hrs:\n hs = '%i hr' % hrs\n if hrs > 1:\n hs += 's'\n s.append(hs)\n mins, secs = divmod(rem, 60)\n if mins:\n s.append('%i min' % mins)\n if secs:\n s.append('%i sec' % secs)\n return ' '.join(s)\n\n\nasync def is_admin_or_superior(\n bot: Red, obj: Union[discord.Message, discord.Member, discord.Role]):\n \"\"\"Same as `is_mod_or_superior` except for admin permissions.\n\n If a message is passed, its author's permissions are checked. If a role is\n passed, it simply checks if it is the admin role.\n\n Parameters\n ----------\n bot : redbot.core.bot.Red\n The bot object.\n obj : `discord.Message` or `discord.Member` or `discord.Role`\n The object to check permissions for.\n\n Returns\n -------\n bool\n :code:`True` if the object has admin permissions.\n\n Raises\n ------\n TypeError\n If the wrong type of ``obj`` was passed.\n\n \"\"\"\n user = None\n if isinstance(obj, discord.Message):\n user = obj.author\n elif isinstance(obj, discord.Member) or isinstance(obj, discord.User):\n user = obj\n elif isinstance(obj, discord.Role):\n pass\n else:\n raise TypeError('Only messages, members or roles may be passed')\n\n server = obj.guild\n admin_role_id = await bot.db.guild(server).admin_role()\n\n if isinstance(obj, discord.Role):\n return obj.id == admin_role_id\n admin_roles = [r for r in server.roles if r.id == admin_role_id]\n admin_role = admin_roles[0] if len(admin_roles) > 0 else None\n\n if user and await bot.is_owner(user):\n return True\n elif admin_roles and discord.utils.get(user.roles, name=admin_role):\n return True\n else:\n return False\n", "path": "redbot/core/utils/mod.py"}]}
2,518
184
gh_patches_debug_7472
rasdani/github-patches
git_diff
getnikola__nikola-3211
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot use "ignored_assets" properly on Windows with custom themes <!-- Before creating an issue: * make sure you are using an up-to-date version of Nikola * search for existing issues that might be related Make sure to: * provide information about your environment (below) * include all the output you get, and any other information related to your problem Nikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED. If you are using this version, you should upgrade: https://getnikola.com/getting-started.html --> ### Environment **Python Version:** Python 3.7.1 **Nikola Version:** Nikola v8.0.1 **Operating System:** Windows 10 ### Description: The option "ignored_assets" used in theme meta files does not work properly on Windows: the relative path is only recognized when using a backslash, i.e. css\theme.css instead of css/theme.css ([see doc](https://getnikola.com/theming.html#theme-meta-files)). ### Solution: Normalize the path to the convention of the operating system when parsing the theme meta file. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nikola/plugins/task/copy_assets.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2019 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Copy theme assets into output.""" 28 29 30 import io 31 import os 32 33 from nikola.plugin_categories import Task 34 from nikola import utils 35 36 37 class CopyAssets(Task): 38 """Copy theme assets into output.""" 39 40 name = "copy_assets" 41 42 def gen_tasks(self): 43 """Create tasks to copy the assets of the whole theme chain. 44 45 If a file is present on two themes, use the version 46 from the "youngest" theme. 47 """ 48 kw = { 49 "themes": self.site.THEMES, 50 "translations": self.site.translations, 51 "files_folders": self.site.config['FILES_FOLDERS'], 52 "output_folder": self.site.config['OUTPUT_FOLDER'], 53 "filters": self.site.config['FILTERS'], 54 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'], 55 "code.css_selectors": ['pre.code', '.highlight pre'], 56 "code.css_head": '/* code.css file generated by Nikola */\n', 57 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n", 58 } 59 tasks = {} 60 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css') 61 code_css_input = utils.get_asset_path('assets/css/code.css', 62 themes=kw['themes'], 63 files_folders=kw['files_folders'], output_dir=None) 64 yield self.group_task() 65 66 main_theme = utils.get_theme_path(kw['themes'][0]) 67 theme_ini = utils.parse_theme_meta(main_theme) 68 if theme_ini: 69 ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') 70 ignored_assets = [asset_name.strip() for asset_name in ignored_assets] 71 else: 72 ignored_assets = [] 73 74 for theme_name in kw['themes']: 75 src = os.path.join(utils.get_theme_path(theme_name), 'assets') 76 dst = os.path.join(kw['output_folder'], 'assets') 77 for task in utils.copy_tree(src, dst): 78 asset_name = os.path.relpath(task['name'], dst) 79 if task['name'] in tasks or asset_name in ignored_assets: 80 continue 81 tasks[task['name']] = task 82 task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')] 83 task['basename'] = self.name 84 if code_css_input: 85 if 'file_dep' not in task: 86 task['file_dep'] = [] 87 task['file_dep'].append(code_css_input) 88 yield utils.apply_filters(task, kw['filters']) 89 90 # Check whether or not there is a code.css file around. 91 if not code_css_input and kw['code_color_scheme']: 92 def create_code_css(): 93 from pygments.formatters import get_formatter_by_name 94 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"]) 95 utils.makedirs(os.path.dirname(code_css_path)) 96 with io.open(code_css_path, 'w+', encoding='utf8') as outf: 97 outf.write(kw["code.css_head"]) 98 outf.write(formatter.get_style_defs(kw["code.css_selectors"])) 99 outf.write(kw["code.css_close"]) 100 101 if os.path.exists(code_css_path): 102 with io.open(code_css_path, 'r', encoding='utf-8') as fh: 103 testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"] 104 else: 105 testcontents = False 106 107 task = { 108 'basename': self.name, 109 'name': code_css_path, 110 'targets': [code_css_path], 111 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents], 112 'actions': [(create_code_css, [])], 113 'clean': True, 114 } 115 yield utils.apply_filters(task, kw['filters']) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py --- a/nikola/plugins/task/copy_assets.py +++ b/nikola/plugins/task/copy_assets.py @@ -67,7 +67,7 @@ theme_ini = utils.parse_theme_meta(main_theme) if theme_ini: ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') - ignored_assets = [asset_name.strip() for asset_name in ignored_assets] + ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets] else: ignored_assets = []
{"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -67,7 +67,7 @@\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n- ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n+ ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets]\n else:\n ignored_assets = []\n", "issue": "Cannot use \"ignored_assets\" properly on Windows with custom themes\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nMake sure to:\r\n* provide information about your environment (below)\r\n* include all the output you get, and any other information related to your problem\r\n\r\nNikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.\r\nIf you are using this version, you should upgrade: https://getnikola.com/getting-started.html\r\n-->\r\n\r\n### Environment\r\n\r\n**Python Version:** Python 3.7.1\r\n\r\n**Nikola Version:** Nikola v8.0.1\r\n\r\n**Operating System:** Windows 10\r\n\r\n### Description:\r\n\r\nThe option \"ignored_assets\" used in theme meta files does not work properly on Windows: the relative path is only recognized when using a backslash, i.e. css\\theme.css instead of css/theme.css ([see doc](https://getnikola.com/theming.html#theme-meta-files)).\r\n\r\n### Solution:\r\n\r\nNormalize the path to the convention of the operating system when parsing the theme meta file.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2019 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2019 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n ignored_assets = [os.path.normpath(asset_name.strip()) for asset_name in ignored_assets]\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]}
1,837
143
gh_patches_debug_353
rasdani/github-patches
git_diff
sopel-irc__sopel-1044
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [announce] Send confirmation to caller after all channels announced When Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice. It's an easy fix, and I'll open a PR for it tomorrow if there are no objections. I am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/announce.py` Content: ``` 1 # coding=utf-8 2 """ 3 announce.py - Send a message to all channels 4 Copyright © 2013, Elad Alfassa, <[email protected]> 5 Licensed under the Eiffel Forum License 2. 6 7 """ 8 from __future__ import unicode_literals, absolute_import, print_function, division 9 10 from sopel.module import commands, example 11 12 13 @commands('announce') 14 @example('.announce Some important message here') 15 def announce(bot, trigger): 16 """ 17 Send an announcement to all channels the bot is in 18 """ 19 if not trigger.admin: 20 bot.reply('Sorry, I can\'t let you do that') 21 return 22 for channel in bot.channels: 23 bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2)) 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py --- a/sopel/modules/announce.py +++ b/sopel/modules/announce.py @@ -21,3 +21,4 @@ return for channel in bot.channels: bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2)) + bot.reply('Announce complete.')
{"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -21,3 +21,4 @@\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n+ bot.reply('Announce complete.')\n", "issue": "[announce] Send confirmation to caller after all channels announced\nWhen Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.\n\nIt's an easy fix, and I'll open a PR for it tomorrow if there are no objections.\n\nI am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n", "path": "sopel/modules/announce.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}]}
608
90
gh_patches_debug_14515
rasdani/github-patches
git_diff
scipy__scipy-11229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OverflowError in resample_poly (upfirdn) scipy.signal.resample_poly fails if the output vector length would be greater than 2^31-1. ``` Traceback (most recent call last): File "<ipython-input-1-ac5d2b0a1632>", line 11, in <module> yy = resample_poly(y, 128, 1) File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\signaltools.py", line 2424, in resample_poly y = upfirdn(h, x, up, down, axis=axis) File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\_upfirdn.py", line 183, in upfirdn return ufd.apply_filter(x, axis) File "F:\Programs\Miniconda3\lib\site-packages\scipy\signal\_upfirdn.py", line 82, in apply_filter output_shape[axis] = output_len OverflowError: Python int too large to convert to C long ``` output_shape is created on the previous line (81): `output_shape = np.asarray(x.shape)` With an unspecified dtype it appears to get np.int32 by default, which is inadequate for specifying large array shapes. This could be fixed by explicitly specifying the dtype: `output_shape = np.asarray(x.shape, dtype=np.int64)` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scipy/signal/_upfirdn.py` Content: ``` 1 # Code adapted from "upfirdn" python library with permission: 2 # 3 # Copyright (c) 2009, Motorola, Inc 4 # 5 # All Rights Reserved. 6 # 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are 9 # met: 10 # 11 # * Redistributions of source code must retain the above copyright notice, 12 # this list of conditions and the following disclaimer. 13 # 14 # * Redistributions in binary form must reproduce the above copyright 15 # notice, this list of conditions and the following disclaimer in the 16 # documentation and/or other materials provided with the distribution. 17 # 18 # * Neither the name of Motorola nor the names of its contributors may be 19 # used to endorse or promote products derived from this software without 20 # specific prior written permission. 21 # 22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 26 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 34 import numpy as np 35 36 from ._upfirdn_apply import _output_len, _apply, mode_enum 37 38 __all__ = ['upfirdn', '_output_len'] 39 40 _upfirdn_modes = [ 41 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', 42 'antisymmetric', 'antireflect', 'line', 43 ] 44 45 46 def _pad_h(h, up): 47 """Store coefficients in a transposed, flipped arrangement. 48 49 For example, suppose upRate is 3, and the 50 input number of coefficients is 10, represented as h[0], ..., h[9]. 51 52 Then the internal buffer will look like this:: 53 54 h[9], h[6], h[3], h[0], // flipped phase 0 coefs 55 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) 56 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) 57 58 """ 59 h_padlen = len(h) + (-len(h) % up) 60 h_full = np.zeros(h_padlen, h.dtype) 61 h_full[:len(h)] = h 62 h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() 63 return h_full 64 65 66 def _check_mode(mode): 67 mode = mode.lower() 68 enum = mode_enum(mode) 69 return enum 70 71 72 class _UpFIRDn(object): 73 def __init__(self, h, x_dtype, up, down): 74 """Helper for resampling""" 75 h = np.asarray(h) 76 if h.ndim != 1 or h.size == 0: 77 raise ValueError('h must be 1-D with non-zero length') 78 self._output_type = np.result_type(h.dtype, x_dtype, np.float32) 79 h = np.asarray(h, self._output_type) 80 self._up = int(up) 81 self._down = int(down) 82 if self._up < 1 or self._down < 1: 83 raise ValueError('Both up and down must be >= 1') 84 # This both transposes, and "flips" each phase for filtering 85 self._h_trans_flip = _pad_h(h, self._up) 86 self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) 87 88 def apply_filter(self, x, axis=-1, mode='constant', cval=0): 89 """Apply the prepared filter to the specified axis of a N-D signal x""" 90 output_len = _output_len(len(self._h_trans_flip), x.shape[axis], 91 self._up, self._down) 92 output_shape = np.asarray(x.shape) 93 output_shape[axis] = output_len 94 out = np.zeros(output_shape, dtype=self._output_type, order='C') 95 axis = axis % x.ndim 96 mode = _check_mode(mode) 97 _apply(np.asarray(x, self._output_type), 98 self._h_trans_flip, out, 99 self._up, self._down, axis, mode, cval) 100 return out 101 102 103 def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): 104 """Upsample, FIR filter, and downsample 105 106 Parameters 107 ---------- 108 h : array_like 109 1-D FIR (finite-impulse response) filter coefficients. 110 x : array_like 111 Input signal array. 112 up : int, optional 113 Upsampling rate. Default is 1. 114 down : int, optional 115 Downsampling rate. Default is 1. 116 axis : int, optional 117 The axis of the input data array along which to apply the 118 linear filter. The filter is applied to each subarray along 119 this axis. Default is -1. 120 mode : str, optional 121 The signal extension mode to use. The set 122 ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to 123 modes provided by `numpy.pad`. ``"smooth"`` implements a smooth 124 extension by extending based on the slope of the last 2 points at each 125 end of the array. ``"antireflect"`` and ``"antisymmetric"`` are 126 anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode 127 `"line"` extends the signal based on a linear trend defined by the 128 first and last points along the ``axis``. 129 130 .. versionadded:: 1.4.0 131 cval : float, optional 132 The constant value to use when ``mode == "constant"``. 133 134 .. versionadded:: 1.4.0 135 136 Returns 137 ------- 138 y : ndarray 139 The output signal array. Dimensions will be the same as `x` except 140 for along `axis`, which will change size according to the `h`, 141 `up`, and `down` parameters. 142 143 Notes 144 ----- 145 The algorithm is an implementation of the block diagram shown on page 129 146 of the Vaidyanathan text [1]_ (Figure 4.3-8d). 147 148 .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, 149 Prentice Hall, 1993. 150 151 The direct approach of upsampling by factor of P with zero insertion, 152 FIR filtering of length ``N``, and downsampling by factor of Q is 153 O(N*Q) per output sample. The polyphase implementation used here is 154 O(N/P). 155 156 .. versionadded:: 0.18 157 158 Examples 159 -------- 160 Simple operations: 161 162 >>> from scipy.signal import upfirdn 163 >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter 164 array([ 1., 2., 3., 2., 1.]) 165 >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion 166 array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.]) 167 >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold 168 array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) 169 >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation 170 array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ]) 171 >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 172 array([ 0., 3., 6., 9.]) 173 >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 174 array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ]) 175 176 Apply a single filter to multiple signals: 177 178 >>> x = np.reshape(np.arange(8), (4, 2)) 179 >>> x 180 array([[0, 1], 181 [2, 3], 182 [4, 5], 183 [6, 7]]) 184 185 Apply along the last dimension of ``x``: 186 187 >>> h = [1, 1] 188 >>> upfirdn(h, x, 2) 189 array([[ 0., 0., 1., 1.], 190 [ 2., 2., 3., 3.], 191 [ 4., 4., 5., 5.], 192 [ 6., 6., 7., 7.]]) 193 194 Apply along the 0th dimension of ``x``: 195 196 >>> upfirdn(h, x, 2, axis=0) 197 array([[ 0., 1.], 198 [ 0., 1.], 199 [ 2., 3.], 200 [ 2., 3.], 201 [ 4., 5.], 202 [ 4., 5.], 203 [ 6., 7.], 204 [ 6., 7.]]) 205 206 """ 207 x = np.asarray(x) 208 ufd = _UpFIRDn(h, x.dtype, up, down) 209 # This is equivalent to (but faster than) using np.apply_along_axis 210 return ufd.apply_filter(x, axis, mode, cval) 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scipy/signal/_upfirdn.py b/scipy/signal/_upfirdn.py --- a/scipy/signal/_upfirdn.py +++ b/scipy/signal/_upfirdn.py @@ -89,7 +89,9 @@ """Apply the prepared filter to the specified axis of a N-D signal x""" output_len = _output_len(len(self._h_trans_flip), x.shape[axis], self._up, self._down) - output_shape = np.asarray(x.shape) + # Explicit use of np.int64 for output_shape dtype avoids OverflowError + # when allocating large array on platforms where np.int_ is 32 bits + output_shape = np.asarray(x.shape, dtype=np.int64) output_shape[axis] = output_len out = np.zeros(output_shape, dtype=self._output_type, order='C') axis = axis % x.ndim
{"golden_diff": "diff --git a/scipy/signal/_upfirdn.py b/scipy/signal/_upfirdn.py\n--- a/scipy/signal/_upfirdn.py\n+++ b/scipy/signal/_upfirdn.py\n@@ -89,7 +89,9 @@\n \"\"\"Apply the prepared filter to the specified axis of a N-D signal x\"\"\"\n output_len = _output_len(len(self._h_trans_flip), x.shape[axis],\n self._up, self._down)\n- output_shape = np.asarray(x.shape)\n+ # Explicit use of np.int64 for output_shape dtype avoids OverflowError\n+ # when allocating large array on platforms where np.int_ is 32 bits\n+ output_shape = np.asarray(x.shape, dtype=np.int64)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n", "issue": "OverflowError in resample_poly (upfirdn)\nscipy.signal.resample_poly fails if the output vector length would be greater than 2^31-1.\r\n\r\n```\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-1-ac5d2b0a1632>\", line 11, in <module>\r\n yy = resample_poly(y, 128, 1)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\signaltools.py\", line 2424, in resample_poly\r\n y = upfirdn(h, x, up, down, axis=axis)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\_upfirdn.py\", line 183, in upfirdn\r\n return ufd.apply_filter(x, axis)\r\n\r\n File \"F:\\Programs\\Miniconda3\\lib\\site-packages\\scipy\\signal\\_upfirdn.py\", line 82, in apply_filter\r\n output_shape[axis] = output_len\r\n\r\nOverflowError: Python int too large to convert to C long\r\n```\r\n\r\noutput_shape is created on the previous line (81):\r\n`output_shape = np.asarray(x.shape)`\r\n\r\nWith an unspecified dtype it appears to get np.int32 by default, which is inadequate for specifying large array shapes.\r\n\r\nThis could be fixed by explicitly specifying the dtype:\r\n`output_shape = np.asarray(x.shape, dtype=np.int64)`\n", "before_files": [{"content": "# Code adapted from \"upfirdn\" python library with permission:\n#\n# Copyright (c) 2009, Motorola, Inc\n#\n# All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Motorola nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\n\nfrom ._upfirdn_apply import _output_len, _apply, mode_enum\n\n__all__ = ['upfirdn', '_output_len']\n\n_upfirdn_modes = [\n 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',\n 'antisymmetric', 'antireflect', 'line',\n]\n\n\ndef _pad_h(h, up):\n \"\"\"Store coefficients in a transposed, flipped arrangement.\n\n For example, suppose upRate is 3, and the\n input number of coefficients is 10, represented as h[0], ..., h[9].\n\n Then the internal buffer will look like this::\n\n h[9], h[6], h[3], h[0], // flipped phase 0 coefs\n 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)\n 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)\n\n \"\"\"\n h_padlen = len(h) + (-len(h) % up)\n h_full = np.zeros(h_padlen, h.dtype)\n h_full[:len(h)] = h\n h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()\n return h_full\n\n\ndef _check_mode(mode):\n mode = mode.lower()\n enum = mode_enum(mode)\n return enum\n\n\nclass _UpFIRDn(object):\n def __init__(self, h, x_dtype, up, down):\n \"\"\"Helper for resampling\"\"\"\n h = np.asarray(h)\n if h.ndim != 1 or h.size == 0:\n raise ValueError('h must be 1-D with non-zero length')\n self._output_type = np.result_type(h.dtype, x_dtype, np.float32)\n h = np.asarray(h, self._output_type)\n self._up = int(up)\n self._down = int(down)\n if self._up < 1 or self._down < 1:\n raise ValueError('Both up and down must be >= 1')\n # This both transposes, and \"flips\" each phase for filtering\n self._h_trans_flip = _pad_h(h, self._up)\n self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)\n\n def apply_filter(self, x, axis=-1, mode='constant', cval=0):\n \"\"\"Apply the prepared filter to the specified axis of a N-D signal x\"\"\"\n output_len = _output_len(len(self._h_trans_flip), x.shape[axis],\n self._up, self._down)\n output_shape = np.asarray(x.shape)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n mode = _check_mode(mode)\n _apply(np.asarray(x, self._output_type),\n self._h_trans_flip, out,\n self._up, self._down, axis, mode, cval)\n return out\n\n\ndef upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):\n \"\"\"Upsample, FIR filter, and downsample\n\n Parameters\n ----------\n h : array_like\n 1-D FIR (finite-impulse response) filter coefficients.\n x : array_like\n Input signal array.\n up : int, optional\n Upsampling rate. Default is 1.\n down : int, optional\n Downsampling rate. Default is 1.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n mode : str, optional\n The signal extension mode to use. The set\n ``{\"constant\", \"symmetric\", \"reflect\", \"edge\", \"wrap\"}`` correspond to\n modes provided by `numpy.pad`. ``\"smooth\"`` implements a smooth\n extension by extending based on the slope of the last 2 points at each\n end of the array. ``\"antireflect\"`` and ``\"antisymmetric\"`` are\n anti-symmetric versions of ``\"reflect\"`` and ``\"symmetric\"``. The mode\n `\"line\"` extends the signal based on a linear trend defined by the\n first and last points along the ``axis``.\n\n .. versionadded:: 1.4.0\n cval : float, optional\n The constant value to use when ``mode == \"constant\"``.\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n y : ndarray\n The output signal array. Dimensions will be the same as `x` except\n for along `axis`, which will change size according to the `h`,\n `up`, and `down` parameters.\n\n Notes\n -----\n The algorithm is an implementation of the block diagram shown on page 129\n of the Vaidyanathan text [1]_ (Figure 4.3-8d).\n\n .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,\n Prentice Hall, 1993.\n\n The direct approach of upsampling by factor of P with zero insertion,\n FIR filtering of length ``N``, and downsampling by factor of Q is\n O(N*Q) per output sample. The polyphase implementation used here is\n O(N/P).\n\n .. versionadded:: 0.18\n\n Examples\n --------\n Simple operations:\n\n >>> from scipy.signal import upfirdn\n >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter\n array([ 1., 2., 3., 2., 1.])\n >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion\n array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])\n >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold\n array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])\n >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation\n array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])\n >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3\n array([ 0., 3., 6., 9.])\n >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3\n array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])\n\n Apply a single filter to multiple signals:\n\n >>> x = np.reshape(np.arange(8), (4, 2))\n >>> x\n array([[0, 1],\n [2, 3],\n [4, 5],\n [6, 7]])\n\n Apply along the last dimension of ``x``:\n\n >>> h = [1, 1]\n >>> upfirdn(h, x, 2)\n array([[ 0., 0., 1., 1.],\n [ 2., 2., 3., 3.],\n [ 4., 4., 5., 5.],\n [ 6., 6., 7., 7.]])\n\n Apply along the 0th dimension of ``x``:\n\n >>> upfirdn(h, x, 2, axis=0)\n array([[ 0., 1.],\n [ 0., 1.],\n [ 2., 3.],\n [ 2., 3.],\n [ 4., 5.],\n [ 4., 5.],\n [ 6., 7.],\n [ 6., 7.]])\n\n \"\"\"\n x = np.asarray(x)\n ufd = _UpFIRDn(h, x.dtype, up, down)\n # This is equivalent to (but faster than) using np.apply_along_axis\n return ufd.apply_filter(x, axis, mode, cval)\n", "path": "scipy/signal/_upfirdn.py"}], "after_files": [{"content": "# Code adapted from \"upfirdn\" python library with permission:\n#\n# Copyright (c) 2009, Motorola, Inc\n#\n# All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Motorola nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\n\nfrom ._upfirdn_apply import _output_len, _apply, mode_enum\n\n__all__ = ['upfirdn', '_output_len']\n\n_upfirdn_modes = [\n 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',\n 'antisymmetric', 'antireflect', 'line',\n]\n\n\ndef _pad_h(h, up):\n \"\"\"Store coefficients in a transposed, flipped arrangement.\n\n For example, suppose upRate is 3, and the\n input number of coefficients is 10, represented as h[0], ..., h[9].\n\n Then the internal buffer will look like this::\n\n h[9], h[6], h[3], h[0], // flipped phase 0 coefs\n 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)\n 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)\n\n \"\"\"\n h_padlen = len(h) + (-len(h) % up)\n h_full = np.zeros(h_padlen, h.dtype)\n h_full[:len(h)] = h\n h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()\n return h_full\n\n\ndef _check_mode(mode):\n mode = mode.lower()\n enum = mode_enum(mode)\n return enum\n\n\nclass _UpFIRDn(object):\n def __init__(self, h, x_dtype, up, down):\n \"\"\"Helper for resampling\"\"\"\n h = np.asarray(h)\n if h.ndim != 1 or h.size == 0:\n raise ValueError('h must be 1-D with non-zero length')\n self._output_type = np.result_type(h.dtype, x_dtype, np.float32)\n h = np.asarray(h, self._output_type)\n self._up = int(up)\n self._down = int(down)\n if self._up < 1 or self._down < 1:\n raise ValueError('Both up and down must be >= 1')\n # This both transposes, and \"flips\" each phase for filtering\n self._h_trans_flip = _pad_h(h, self._up)\n self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)\n\n def apply_filter(self, x, axis=-1, mode='constant', cval=0):\n \"\"\"Apply the prepared filter to the specified axis of a N-D signal x\"\"\"\n output_len = _output_len(len(self._h_trans_flip), x.shape[axis],\n self._up, self._down)\n # Explicit use of np.int64 for output_shape dtype avoids OverflowError\n # when allocating large array on platforms where np.int_ is 32 bits\n output_shape = np.asarray(x.shape, dtype=np.int64)\n output_shape[axis] = output_len\n out = np.zeros(output_shape, dtype=self._output_type, order='C')\n axis = axis % x.ndim\n mode = _check_mode(mode)\n _apply(np.asarray(x, self._output_type),\n self._h_trans_flip, out,\n self._up, self._down, axis, mode, cval)\n return out\n\n\ndef upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):\n \"\"\"Upsample, FIR filter, and downsample\n\n Parameters\n ----------\n h : array_like\n 1-D FIR (finite-impulse response) filter coefficients.\n x : array_like\n Input signal array.\n up : int, optional\n Upsampling rate. Default is 1.\n down : int, optional\n Downsampling rate. Default is 1.\n axis : int, optional\n The axis of the input data array along which to apply the\n linear filter. The filter is applied to each subarray along\n this axis. Default is -1.\n mode : str, optional\n The signal extension mode to use. The set\n ``{\"constant\", \"symmetric\", \"reflect\", \"edge\", \"wrap\"}`` correspond to\n modes provided by `numpy.pad`. ``\"smooth\"`` implements a smooth\n extension by extending based on the slope of the last 2 points at each\n end of the array. ``\"antireflect\"`` and ``\"antisymmetric\"`` are\n anti-symmetric versions of ``\"reflect\"`` and ``\"symmetric\"``. The mode\n `\"line\"` extends the signal based on a linear trend defined by the\n first and last points along the ``axis``.\n\n .. versionadded:: 1.4.0\n cval : float, optional\n The constant value to use when ``mode == \"constant\"``.\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n y : ndarray\n The output signal array. Dimensions will be the same as `x` except\n for along `axis`, which will change size according to the `h`,\n `up`, and `down` parameters.\n\n Notes\n -----\n The algorithm is an implementation of the block diagram shown on page 129\n of the Vaidyanathan text [1]_ (Figure 4.3-8d).\n\n .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,\n Prentice Hall, 1993.\n\n The direct approach of upsampling by factor of P with zero insertion,\n FIR filtering of length ``N``, and downsampling by factor of Q is\n O(N*Q) per output sample. The polyphase implementation used here is\n O(N/P).\n\n .. versionadded:: 0.18\n\n Examples\n --------\n Simple operations:\n\n >>> from scipy.signal import upfirdn\n >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter\n array([ 1., 2., 3., 2., 1.])\n >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion\n array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])\n >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold\n array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])\n >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation\n array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])\n >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3\n array([ 0., 3., 6., 9.])\n >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3\n array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])\n\n Apply a single filter to multiple signals:\n\n >>> x = np.reshape(np.arange(8), (4, 2))\n >>> x\n array([[0, 1],\n [2, 3],\n [4, 5],\n [6, 7]])\n\n Apply along the last dimension of ``x``:\n\n >>> h = [1, 1]\n >>> upfirdn(h, x, 2)\n array([[ 0., 0., 1., 1.],\n [ 2., 2., 3., 3.],\n [ 4., 4., 5., 5.],\n [ 6., 6., 7., 7.]])\n\n Apply along the 0th dimension of ``x``:\n\n >>> upfirdn(h, x, 2, axis=0)\n array([[ 0., 1.],\n [ 0., 1.],\n [ 2., 3.],\n [ 2., 3.],\n [ 4., 5.],\n [ 4., 5.],\n [ 6., 7.],\n [ 6., 7.]])\n\n \"\"\"\n x = np.asarray(x)\n ufd = _UpFIRDn(h, x.dtype, up, down)\n # This is equivalent to (but faster than) using np.apply_along_axis\n return ufd.apply_filter(x, axis, mode, cval)\n", "path": "scipy/signal/_upfirdn.py"}]}
3,469
206
gh_patches_debug_60583
rasdani/github-patches
git_diff
fonttools__fonttools-1715
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ascender and ascent The [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table "`ascender`" and "`descender`". fonttools calls them "`ascent`" and "`descent`". This was surprising! Maybe it's too late to change then but can we at least have an alias? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Lib/fontTools/ttLib/tables/_h_h_e_a.py` Content: ``` 1 from fontTools.misc.py23 import * 2 from fontTools.misc import sstruct 3 from fontTools.misc.textTools import safeEval 4 from fontTools.misc.fixedTools import ( 5 ensureVersionIsLong as fi2ve, versionToFixed as ve2fi) 6 from . import DefaultTable 7 import math 8 9 10 hheaFormat = """ 11 > # big endian 12 tableVersion: L 13 ascent: h 14 descent: h 15 lineGap: h 16 advanceWidthMax: H 17 minLeftSideBearing: h 18 minRightSideBearing: h 19 xMaxExtent: h 20 caretSlopeRise: h 21 caretSlopeRun: h 22 caretOffset: h 23 reserved0: h 24 reserved1: h 25 reserved2: h 26 reserved3: h 27 metricDataFormat: h 28 numberOfHMetrics: H 29 """ 30 31 32 class table__h_h_e_a(DefaultTable.DefaultTable): 33 34 # Note: Keep in sync with table__v_h_e_a 35 36 dependencies = ['hmtx', 'glyf', 'CFF '] 37 38 def decompile(self, data, ttFont): 39 sstruct.unpack(hheaFormat, data, self) 40 41 def compile(self, ttFont): 42 if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')): 43 self.recalc(ttFont) 44 self.tableVersion = fi2ve(self.tableVersion) 45 return sstruct.pack(hheaFormat, self) 46 47 def recalc(self, ttFont): 48 if 'hmtx' in ttFont: 49 hmtxTable = ttFont['hmtx'] 50 self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values()) 51 52 boundsWidthDict = {} 53 if 'glyf' in ttFont: 54 glyfTable = ttFont['glyf'] 55 for name in ttFont.getGlyphOrder(): 56 g = glyfTable[name] 57 if g.numberOfContours == 0: 58 continue 59 if g.numberOfContours < 0 and not hasattr(g, "xMax"): 60 # Composite glyph without extents set. 61 # Calculate those. 62 g.recalcBounds(glyfTable) 63 boundsWidthDict[name] = g.xMax - g.xMin 64 elif 'CFF ' in ttFont: 65 topDict = ttFont['CFF '].cff.topDictIndex[0] 66 charStrings = topDict.CharStrings 67 for name in ttFont.getGlyphOrder(): 68 cs = charStrings[name] 69 bounds = cs.calcBounds(charStrings) 70 if bounds is not None: 71 boundsWidthDict[name] = int( 72 math.ceil(bounds[2]) - math.floor(bounds[0])) 73 74 if boundsWidthDict: 75 minLeftSideBearing = float('inf') 76 minRightSideBearing = float('inf') 77 xMaxExtent = -float('inf') 78 for name, boundsWidth in boundsWidthDict.items(): 79 advanceWidth, lsb = hmtxTable[name] 80 rsb = advanceWidth - lsb - boundsWidth 81 extent = lsb + boundsWidth 82 minLeftSideBearing = min(minLeftSideBearing, lsb) 83 minRightSideBearing = min(minRightSideBearing, rsb) 84 xMaxExtent = max(xMaxExtent, extent) 85 self.minLeftSideBearing = minLeftSideBearing 86 self.minRightSideBearing = minRightSideBearing 87 self.xMaxExtent = xMaxExtent 88 89 else: # No glyph has outlines. 90 self.minLeftSideBearing = 0 91 self.minRightSideBearing = 0 92 self.xMaxExtent = 0 93 94 def toXML(self, writer, ttFont): 95 formatstring, names, fixes = sstruct.getformat(hheaFormat) 96 for name in names: 97 value = getattr(self, name) 98 if name == "tableVersion": 99 value = fi2ve(value) 100 value = "0x%08x" % value 101 writer.simpletag(name, value=value) 102 writer.newline() 103 104 def fromXML(self, name, attrs, content, ttFont): 105 if name == "tableVersion": 106 setattr(self, name, ve2fi(attrs["value"])) 107 return 108 setattr(self, name, safeEval(attrs["value"])) 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py --- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py +++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py @@ -35,6 +35,19 @@ dependencies = ['hmtx', 'glyf', 'CFF '] + # OpenType spec renamed these, add aliases for compatibility + @property + def ascender(self): return self.ascent + + @ascender.setter + def ascender(self,value): self.ascent = value + + @property + def descender(self): return self.descent + + @descender.setter + def descender(self,value): self.descent = value + def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self)
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n@@ -35,6 +35,19 @@\n \n \tdependencies = ['hmtx', 'glyf', 'CFF ']\n \n+\t# OpenType spec renamed these, add aliases for compatibility\n+\t@property\n+\tdef ascender(self): return self.ascent\n+\n+\[email protected]\n+\tdef ascender(self,value): self.ascent = value\n+\n+\t@property\n+\tdef descender(self): return self.descent\n+\n+\[email protected]\n+\tdef descender(self,value): self.descent = value\n+\n \tdef decompile(self, data, ttFont):\n \t\tsstruct.unpack(hheaFormat, data, self)\n", "issue": "ascender and ascent\nThe [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table \"`ascender`\" and \"`descender`\". fonttools calls them \"`ascent`\" and \"`descent`\".\r\n\r\nThis was surprising! Maybe it's too late to change then but can we at least have an alias?\n", "before_files": [{"content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py"}], "after_files": [{"content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\t# OpenType spec renamed these, add aliases for compatibility\n\t@property\n\tdef ascender(self): return self.ascent\n\n\[email protected]\n\tdef ascender(self,value): self.ascent = value\n\n\t@property\n\tdef descender(self): return self.descent\n\n\[email protected]\n\tdef descender(self,value): self.descent = value\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py"}]}
1,560
215
gh_patches_debug_16206
rasdani/github-patches
git_diff
frappe__frappe-15552
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- S3 automatic backup fails when backup folder is empty <!-- Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to Frappe - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. 4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉 --> ## Description of the issue `frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_s3` fails when the backup folder (site_name/private/backups/) is empty when the function is called. This occurs because `frappe.integrations.offsite_backup_utils.get_file_size` does not anticipate a situation where its `file_path` argument is invalid i.e `None`. This bug affects v13 and v12. ## Context information (for bug reports) **Output of `bench version`** ``` frappe 12.24.0 ``` ## Steps to reproduce the issue 1. Delete _all_ files in the backup folder 2. Call `take_backups_s3` 3. ### Observed result When the scheduled backup task runs, you will receive an email with this kind of stack trace: ``` *Backup Upload Failed!* Oops, your automated backup to Amazon S3 failed. Error message: Traceback (most recent call last): File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py", line 83, in take_backups_s3 validate_file_size() File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 105, in validate_file_size file_size = get_file_size(latest_file, unit="GB") File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 75, in get_file_size file_size = os.path.getsize(file_path) File "/usr/lib/python3.7/genericpath.py", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType Please contact your system manager for more information. ``` If you call it in your console, it will fail silently but you will observe that no backup file is created nor uploaded to S3. ### Expected result ### Stacktrace / full error message ``` Traceback (most recent call last): File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py", line 83, in take_backups_s3 validate_file_size() File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 105, in validate_file_size file_size = get_file_size(latest_file, unit="GB") File "/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py", line 75, in get_file_size file_size = os.path.getsize(file_path) File "/usr/lib/python3.7/genericpath.py", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType ``` ## Additional information OS version / distribution, `Frappe` install method, etc. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `frappe/integrations/offsite_backup_utils.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2019, Frappe Technologies and contributors 3 # License: MIT. See LICENSE 4 5 import frappe 6 import glob 7 import os 8 from frappe.utils import split_emails, cint 9 10 def send_email(success, service_name, doctype, email_field, error_status=None): 11 recipients = get_recipients(doctype, email_field) 12 if not recipients: 13 frappe.log_error( 14 "No Email Recipient found for {0}".format(service_name), 15 "{0}: Failed to send backup status email".format(service_name), 16 ) 17 return 18 19 if success: 20 if not frappe.db.get_single_value(doctype, "send_email_for_successful_backup"): 21 return 22 23 subject = "Backup Upload Successful" 24 message = """ 25 <h3>Backup Uploaded Successfully!</h3> 26 <p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format( 27 service_name 28 ) 29 else: 30 subject = "[Warning] Backup Upload Failed" 31 message = """ 32 <h3>Backup Upload Failed!</h3> 33 <p>Oops, your automated backup to {0} failed.</p> 34 <p>Error message: {1}</p> 35 <p>Please contact your system manager for more information.</p>""".format( 36 service_name, error_status 37 ) 38 39 frappe.sendmail(recipients=recipients, subject=subject, message=message) 40 41 42 def get_recipients(doctype, email_field): 43 if not frappe.db: 44 frappe.connect() 45 46 return split_emails(frappe.db.get_value(doctype, None, email_field)) 47 48 49 def get_latest_backup_file(with_files=False): 50 from frappe.utils.backups import BackupGenerator 51 52 odb = BackupGenerator( 53 frappe.conf.db_name, 54 frappe.conf.db_name, 55 frappe.conf.db_password, 56 db_host=frappe.db.host, 57 db_type=frappe.conf.db_type, 58 db_port=frappe.conf.db_port, 59 ) 60 database, public, private, config = odb.get_recent_backup(older_than=24 * 30) 61 62 if with_files: 63 return database, config, public, private 64 65 return database, config 66 67 68 def get_file_size(file_path, unit): 69 if not unit: 70 unit = "MB" 71 72 file_size = os.path.getsize(file_path) 73 74 memory_size_unit_mapper = {"KB": 1, "MB": 2, "GB": 3, "TB": 4} 75 i = 0 76 while i < memory_size_unit_mapper[unit]: 77 file_size = file_size / 1000.0 78 i += 1 79 80 return file_size 81 82 def get_chunk_site(file_size): 83 ''' this function will return chunk size in megabytes based on file size ''' 84 85 file_size_in_gb = cint(file_size/1024/1024) 86 87 MB = 1024 * 1024 88 if file_size_in_gb > 5000: 89 return 200 * MB 90 elif file_size_in_gb >= 3000: 91 return 150 * MB 92 elif file_size_in_gb >= 1000: 93 return 100 * MB 94 elif file_size_in_gb >= 500: 95 return 50 * MB 96 else: 97 return 15 * MB 98 99 def validate_file_size(): 100 frappe.flags.create_new_backup = True 101 latest_file, site_config = get_latest_backup_file() 102 file_size = get_file_size(latest_file, unit="GB") 103 104 if file_size > 1: 105 frappe.flags.create_new_backup = False 106 107 def generate_files_backup(): 108 from frappe.utils.backups import BackupGenerator 109 110 backup = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name, 111 frappe.conf.db_password, db_host = frappe.db.host, 112 db_type=frappe.conf.db_type, db_port=frappe.conf.db_port) 113 114 backup.set_backup_file_name() 115 backup.zip_files() 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/frappe/integrations/offsite_backup_utils.py b/frappe/integrations/offsite_backup_utils.py --- a/frappe/integrations/offsite_backup_utils.py +++ b/frappe/integrations/offsite_backup_utils.py @@ -65,10 +65,7 @@ return database, config -def get_file_size(file_path, unit): - if not unit: - unit = "MB" - +def get_file_size(file_path, unit='MB'): file_size = os.path.getsize(file_path) memory_size_unit_mapper = {"KB": 1, "MB": 2, "GB": 3, "TB": 4} @@ -99,7 +96,7 @@ def validate_file_size(): frappe.flags.create_new_backup = True latest_file, site_config = get_latest_backup_file() - file_size = get_file_size(latest_file, unit="GB") + file_size = get_file_size(latest_file, unit="GB") if latest_file else 0 if file_size > 1: frappe.flags.create_new_backup = False
{"golden_diff": "diff --git a/frappe/integrations/offsite_backup_utils.py b/frappe/integrations/offsite_backup_utils.py\n--- a/frappe/integrations/offsite_backup_utils.py\n+++ b/frappe/integrations/offsite_backup_utils.py\n@@ -65,10 +65,7 @@\n \treturn database, config\n \n \n-def get_file_size(file_path, unit):\n-\tif not unit:\n-\t\tunit = \"MB\"\n-\n+def get_file_size(file_path, unit='MB'):\n \tfile_size = os.path.getsize(file_path)\n \n \tmemory_size_unit_mapper = {\"KB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4}\n@@ -99,7 +96,7 @@\n def validate_file_size():\n \tfrappe.flags.create_new_backup = True\n \tlatest_file, site_config = get_latest_backup_file()\n-\tfile_size = get_file_size(latest_file, unit=\"GB\")\n+\tfile_size = get_file_size(latest_file, unit=\"GB\") if latest_file else 0\n \n \tif file_size > 1:\n \t\tfrappe.flags.create_new_backup = False\n", "issue": "S3 automatic backup fails when backup folder is empty\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR \ud83d\ude09\r\n-->\r\n\r\n## Description of the issue\r\n`frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_s3` fails when the backup folder (site_name/private/backups/) is empty when the function is called. \r\n\r\nThis occurs because `frappe.integrations.offsite_backup_utils.get_file_size` does not anticipate a situation where its `file_path` argument is invalid i.e `None`.\r\n\r\nThis bug affects v13 and v12.\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nfrappe 12.24.0\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Delete _all_ files in the backup folder\r\n2. Call `take_backups_s3`\r\n3.\r\n\r\n### Observed result\r\nWhen the scheduled backup task runs, you will receive an email with this kind of stack trace:\r\n```\r\n*Backup Upload Failed!*\r\n\r\nOops, your automated backup to Amazon S3 failed.\r\n\r\nError message: Traceback (most recent call last): File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py\", line 83, in take_backups_s3 validate_file_size() File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 105, in validate_file_size file_size = get_file_size(latest_file, unit=\"GB\") File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 75, in get_file_size file_size = os.path.getsize(file_path) File \"/usr/lib/python3.7/genericpath.py\", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType\r\n\r\nPlease contact your system manager for more information.\r\n```\r\nIf you call it in your console, it will fail silently but you will observe that no backup file is created nor uploaded to S3.\r\n\r\n### Expected result\r\n\r\n\r\n### Stacktrace / full error message\r\n\r\n```\r\nTraceback (most recent call last): File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/doctype/s3_backup_settings/s3_backup_settings.py\", line 83, in take_backups_s3 validate_file_size() File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 105, in validate_file_size file_size = get_file_size(latest_file, unit=\"GB\") File \"/home/ubuntu/frappe-bench/apps/frappe/frappe/integrations/offsite_backup_utils.py\", line 75, in get_file_size file_size = os.path.getsize(file_path) File \"/usr/lib/python3.7/genericpath.py\", line 50, in getsize return os.stat(filename).st_size TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType\r\n```\r\n\r\n## Additional information\r\n\r\nOS version / distribution, `Frappe` install method, etc.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nimport glob\nimport os\nfrom frappe.utils import split_emails, cint\n\ndef send_email(success, service_name, doctype, email_field, error_status=None):\n\trecipients = get_recipients(doctype, email_field)\n\tif not recipients:\n\t\tfrappe.log_error(\n\t\t\t\"No Email Recipient found for {0}\".format(service_name),\n\t\t\t\"{0}: Failed to send backup status email\".format(service_name),\n\t\t)\n\t\treturn\n\n\tif success:\n\t\tif not frappe.db.get_single_value(doctype, \"send_email_for_successful_backup\"):\n\t\t\treturn\n\n\t\tsubject = \"Backup Upload Successful\"\n\t\tmessage = \"\"\"\n<h3>Backup Uploaded Successfully!</h3>\n<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>\"\"\".format(\n\t\t\tservice_name\n\t\t)\n\telse:\n\t\tsubject = \"[Warning] Backup Upload Failed\"\n\t\tmessage = \"\"\"\n<h3>Backup Upload Failed!</h3>\n<p>Oops, your automated backup to {0} failed.</p>\n<p>Error message: {1}</p>\n<p>Please contact your system manager for more information.</p>\"\"\".format(\n\t\t\tservice_name, error_status\n\t\t)\n\n\tfrappe.sendmail(recipients=recipients, subject=subject, message=message)\n\n\ndef get_recipients(doctype, email_field):\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\treturn split_emails(frappe.db.get_value(doctype, None, email_field))\n\n\ndef get_latest_backup_file(with_files=False):\n\tfrom frappe.utils.backups import BackupGenerator\n\n\todb = BackupGenerator(\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_password,\n\t\tdb_host=frappe.db.host,\n\t\tdb_type=frappe.conf.db_type,\n\t\tdb_port=frappe.conf.db_port,\n\t)\n\tdatabase, public, private, config = odb.get_recent_backup(older_than=24 * 30)\n\n\tif with_files:\n\t\treturn database, config, public, private\n\n\treturn database, config\n\n\ndef get_file_size(file_path, unit):\n\tif not unit:\n\t\tunit = \"MB\"\n\n\tfile_size = os.path.getsize(file_path)\n\n\tmemory_size_unit_mapper = {\"KB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4}\n\ti = 0\n\twhile i < memory_size_unit_mapper[unit]:\n\t\tfile_size = file_size / 1000.0\n\t\ti += 1\n\n\treturn file_size\n\ndef get_chunk_site(file_size):\n\t''' this function will return chunk size in megabytes based on file size '''\n\n\tfile_size_in_gb = cint(file_size/1024/1024)\n\n\tMB = 1024 * 1024\n\tif file_size_in_gb > 5000:\n\t\treturn 200 * MB\n\telif file_size_in_gb >= 3000:\n\t\treturn 150 * MB\n\telif file_size_in_gb >= 1000:\n\t\treturn 100 * MB\n\telif file_size_in_gb >= 500:\n\t\treturn 50 * MB\n\telse:\n\t\treturn 15 * MB\n\ndef validate_file_size():\n\tfrappe.flags.create_new_backup = True\n\tlatest_file, site_config = get_latest_backup_file()\n\tfile_size = get_file_size(latest_file, unit=\"GB\")\n\n\tif file_size > 1:\n\t\tfrappe.flags.create_new_backup = False\n\ndef generate_files_backup():\n\tfrom frappe.utils.backups import BackupGenerator\n\n\tbackup = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name,\n\t\tfrappe.conf.db_password, db_host = frappe.db.host,\n\t\tdb_type=frappe.conf.db_type, db_port=frappe.conf.db_port)\n\n\tbackup.set_backup_file_name()\n\tbackup.zip_files()\n", "path": "frappe/integrations/offsite_backup_utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nimport glob\nimport os\nfrom frappe.utils import split_emails, cint\n\ndef send_email(success, service_name, doctype, email_field, error_status=None):\n\trecipients = get_recipients(doctype, email_field)\n\tif not recipients:\n\t\tfrappe.log_error(\n\t\t\t\"No Email Recipient found for {0}\".format(service_name),\n\t\t\t\"{0}: Failed to send backup status email\".format(service_name),\n\t\t)\n\t\treturn\n\n\tif success:\n\t\tif not frappe.db.get_single_value(doctype, \"send_email_for_successful_backup\"):\n\t\t\treturn\n\n\t\tsubject = \"Backup Upload Successful\"\n\t\tmessage = \"\"\"\n<h3>Backup Uploaded Successfully!</h3>\n<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>\"\"\".format(\n\t\t\tservice_name\n\t\t)\n\telse:\n\t\tsubject = \"[Warning] Backup Upload Failed\"\n\t\tmessage = \"\"\"\n<h3>Backup Upload Failed!</h3>\n<p>Oops, your automated backup to {0} failed.</p>\n<p>Error message: {1}</p>\n<p>Please contact your system manager for more information.</p>\"\"\".format(\n\t\t\tservice_name, error_status\n\t\t)\n\n\tfrappe.sendmail(recipients=recipients, subject=subject, message=message)\n\n\ndef get_recipients(doctype, email_field):\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\treturn split_emails(frappe.db.get_value(doctype, None, email_field))\n\n\ndef get_latest_backup_file(with_files=False):\n\tfrom frappe.utils.backups import BackupGenerator\n\n\todb = BackupGenerator(\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_name,\n\t\tfrappe.conf.db_password,\n\t\tdb_host=frappe.db.host,\n\t\tdb_type=frappe.conf.db_type,\n\t\tdb_port=frappe.conf.db_port,\n\t)\n\tdatabase, public, private, config = odb.get_recent_backup(older_than=24 * 30)\n\n\tif with_files:\n\t\treturn database, config, public, private\n\n\treturn database, config\n\n\ndef get_file_size(file_path, unit='MB'):\n\tfile_size = os.path.getsize(file_path)\n\n\tmemory_size_unit_mapper = {\"KB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4}\n\ti = 0\n\twhile i < memory_size_unit_mapper[unit]:\n\t\tfile_size = file_size / 1000.0\n\t\ti += 1\n\n\treturn file_size\n\ndef get_chunk_site(file_size):\n\t''' this function will return chunk size in megabytes based on file size '''\n\n\tfile_size_in_gb = cint(file_size/1024/1024)\n\n\tMB = 1024 * 1024\n\tif file_size_in_gb > 5000:\n\t\treturn 200 * MB\n\telif file_size_in_gb >= 3000:\n\t\treturn 150 * MB\n\telif file_size_in_gb >= 1000:\n\t\treturn 100 * MB\n\telif file_size_in_gb >= 500:\n\t\treturn 50 * MB\n\telse:\n\t\treturn 15 * MB\n\ndef validate_file_size():\n\tfrappe.flags.create_new_backup = True\n\tlatest_file, site_config = get_latest_backup_file()\n\tfile_size = get_file_size(latest_file, unit=\"GB\") if latest_file else 0\n\n\tif file_size > 1:\n\t\tfrappe.flags.create_new_backup = False\n\ndef generate_files_backup():\n\tfrom frappe.utils.backups import BackupGenerator\n\n\tbackup = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name,\n\t\tfrappe.conf.db_password, db_host = frappe.db.host,\n\t\tdb_type=frappe.conf.db_type, db_port=frappe.conf.db_port)\n\n\tbackup.set_backup_file_name()\n\tbackup.zip_files()\n", "path": "frappe/integrations/offsite_backup_utils.py"}]}
2,289
246
gh_patches_debug_8925
rasdani/github-patches
git_diff
freqtrade__freqtrade-3200
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docker image making logfiles in user_data docker image places error logfiles in user_data by default. (apparently it should not be doing that) Maybe cud have it put them in a log dir? docker-compose.yml command: > trade --logfile /freqtrade/user_data/freqtrade.log can maybe be changed to --logfile /freqtrade/user_data/log/freqtrade.log --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `freqtrade/configuration/directory_operations.py` Content: ``` 1 import logging 2 import shutil 3 from pathlib import Path 4 from typing import Any, Dict, Optional 5 6 from freqtrade.exceptions import OperationalException 7 from freqtrade.constants import USER_DATA_FILES 8 9 logger = logging.getLogger(__name__) 10 11 12 def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path: 13 14 folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data") 15 if not datadir: 16 # set datadir 17 exchange_name = config.get('exchange', {}).get('name').lower() 18 folder = folder.joinpath(exchange_name) 19 20 if not folder.is_dir(): 21 folder.mkdir(parents=True) 22 logger.info(f'Created data directory: {datadir}') 23 return folder 24 25 26 def create_userdata_dir(directory: str, create_dir: bool = False) -> Path: 27 """ 28 Create userdata directory structure. 29 if create_dir is True, then the parent-directory will be created if it does not exist. 30 Sub-directories will always be created if the parent directory exists. 31 Raises OperationalException if given a non-existing directory. 32 :param directory: Directory to check 33 :param create_dir: Create directory if it does not exist. 34 :return: Path object containing the directory 35 """ 36 sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks", 37 "plot", "strategies", ] 38 folder = Path(directory) 39 if not folder.is_dir(): 40 if create_dir: 41 folder.mkdir(parents=True) 42 logger.info(f'Created user-data directory: {folder}') 43 else: 44 raise OperationalException( 45 f"Directory `{folder}` does not exist. " 46 "Please use `freqtrade create-userdir` to create a user directory") 47 48 # Create required subdirectories 49 for f in sub_dirs: 50 subfolder = folder / f 51 if not subfolder.is_dir(): 52 subfolder.mkdir(parents=False) 53 return folder 54 55 56 def copy_sample_files(directory: Path, overwrite: bool = False) -> None: 57 """ 58 Copy files from templates to User data directory. 59 :param directory: Directory to copy data to 60 :param overwrite: Overwrite existing sample files 61 """ 62 if not directory.is_dir(): 63 raise OperationalException(f"Directory `{directory}` does not exist.") 64 sourcedir = Path(__file__).parents[1] / "templates" 65 for source, target in USER_DATA_FILES.items(): 66 targetdir = directory / target 67 if not targetdir.is_dir(): 68 raise OperationalException(f"Directory `{targetdir}` does not exist.") 69 targetfile = targetdir / source 70 if targetfile.exists(): 71 if not overwrite: 72 logger.warning(f"File `{targetfile}` exists already, not deploying sample file.") 73 continue 74 else: 75 logger.warning(f"File `{targetfile}` exists already, overwriting.") 76 shutil.copy(str(sourcedir / source), str(targetfile)) 77 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py --- a/freqtrade/configuration/directory_operations.py +++ b/freqtrade/configuration/directory_operations.py @@ -33,8 +33,8 @@ :param create_dir: Create directory if it does not exist. :return: Path object containing the directory """ - sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks", - "plot", "strategies", ] + sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs", + "notebooks", "plot", "strategies", ] folder = Path(directory) if not folder.is_dir(): if create_dir:
{"golden_diff": "diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py\n--- a/freqtrade/configuration/directory_operations.py\n+++ b/freqtrade/configuration/directory_operations.py\n@@ -33,8 +33,8 @@\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n- sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n- \"plot\", \"strategies\", ]\n+ sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n+ \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n", "issue": " Docker image making logfiles in user_data\ndocker image places error logfiles in user_data by default. (apparently it should not be doing that)\r\nMaybe cud have it put them in a log dir?\r\n\r\n\r\ndocker-compose.yml\r\n\r\ncommand: >\r\n trade\r\n --logfile /freqtrade/user_data/freqtrade.log\r\n\r\ncan maybe be changed to \r\n --logfile /freqtrade/user_data/log/freqtrade.log\r\n\n", "before_files": [{"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.constants import USER_DATA_FILES\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n else:\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}], "after_files": [{"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.constants import USER_DATA_FILES\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n else:\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}]}
1,126
175
gh_patches_debug_20487
rasdani/github-patches
git_diff
AUTOMATIC1111__stable-diffusion-webui-8118
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: GitPython breaking API change in 3.1.30, breaks extension updates ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What happened? [Per this PR](https://github.com/gitpython-developers/GitPython/pull/1518) and [the changelog](https://github.com/gitpython-developers/GitPython/pull/1518) you can no longer feed arbitrary arguments to prevent remote code execution. Easy fix, just use the built kwarg that's already there for it. https://github.com/AUTOMATIC1111/stable-diffusion-webui/blame/0cc0ee1bcb4c24a8c9715f66cede06601bfc00c8/modules/extensions.py#LL69C28-L69C28 there may be other places in the code as well, I'll take a peek ### Steps to reproduce the problem 1. `pip install 'gitpython>=3.1.30' 2. run the web-ui and try to install/check for updates while watching terminal ### What should have happened? Should have successfully run the git commands and updated the git repos ### Commit where the problem happens 0cc0ee1b ### What platforms do you use to access the UI ? Windows ### What browsers do you use to access the UI ? Google Chrome ### Command Line Arguments ```Shell No ``` ### List of extensions No ### Console logs ```Shell Traceback (most recent call last): File "/mnt/d/stable-diffusion/stable-diffusion-webui/modules/ui_extensions.py", line 66, in check_updates ext.check_updates() File "/mnt/d/stable-diffusion/stable-diffusion-webui/modules/extensions.py", line 69, in check_updates for fetch in repo.remote().fetch("--dry-run"): File "/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/remote.py", line 1007, in fetch res = self._get_fetch_info_from_stderr(proc, progress, kill_after_timeout=kill_after_timeout) File "/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/remote.py", line 848, in _get_fetch_info_from_stderr proc.wait(stderr=stderr_text) File "/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/cmd.py", line 604, in wait raise GitCommandError(remove_password_if_present(self.args), status, errstr) git.exc.GitCommandError: Cmd('git') failed due to: exit code(128) cmdline: git fetch -v -- origin --dry-run stderr: 'fatal: couldn't find remote ref --dry-run' ``` ### Additional information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modules/extensions.py` Content: ``` 1 import os 2 import sys 3 import traceback 4 5 import time 6 import git 7 8 from modules import paths, shared 9 10 extensions = [] 11 extensions_dir = os.path.join(paths.data_path, "extensions") 12 extensions_builtin_dir = os.path.join(paths.script_path, "extensions-builtin") 13 14 if not os.path.exists(extensions_dir): 15 os.makedirs(extensions_dir) 16 17 def active(): 18 return [x for x in extensions if x.enabled] 19 20 21 class Extension: 22 def __init__(self, name, path, enabled=True, is_builtin=False): 23 self.name = name 24 self.path = path 25 self.enabled = enabled 26 self.status = '' 27 self.can_update = False 28 self.is_builtin = is_builtin 29 self.version = '' 30 31 repo = None 32 try: 33 if os.path.exists(os.path.join(path, ".git")): 34 repo = git.Repo(path) 35 except Exception: 36 print(f"Error reading github repository info from {path}:", file=sys.stderr) 37 print(traceback.format_exc(), file=sys.stderr) 38 39 if repo is None or repo.bare: 40 self.remote = None 41 else: 42 try: 43 self.remote = next(repo.remote().urls, None) 44 self.status = 'unknown' 45 head = repo.head.commit 46 ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) 47 self.version = f'{head.hexsha[:8]} ({ts})' 48 49 except Exception: 50 self.remote = None 51 52 def list_files(self, subdir, extension): 53 from modules import scripts 54 55 dirpath = os.path.join(self.path, subdir) 56 if not os.path.isdir(dirpath): 57 return [] 58 59 res = [] 60 for filename in sorted(os.listdir(dirpath)): 61 res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename))) 62 63 res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] 64 65 return res 66 67 def check_updates(self): 68 repo = git.Repo(self.path) 69 for fetch in repo.remote().fetch("--dry-run"): 70 if fetch.flags != fetch.HEAD_UPTODATE: 71 self.can_update = True 72 self.status = "behind" 73 return 74 75 self.can_update = False 76 self.status = "latest" 77 78 def fetch_and_reset_hard(self): 79 repo = git.Repo(self.path) 80 # Fix: `error: Your local changes to the following files would be overwritten by merge`, 81 # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. 82 repo.git.fetch('--all') 83 repo.git.reset('--hard', 'origin') 84 85 86 def list_extensions(): 87 extensions.clear() 88 89 if not os.path.isdir(extensions_dir): 90 return 91 92 paths = [] 93 for dirname in [extensions_dir, extensions_builtin_dir]: 94 if not os.path.isdir(dirname): 95 return 96 97 for extension_dirname in sorted(os.listdir(dirname)): 98 path = os.path.join(dirname, extension_dirname) 99 if not os.path.isdir(path): 100 continue 101 102 paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) 103 104 for dirname, path, is_builtin in paths: 105 extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin) 106 extensions.append(extension) 107 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modules/extensions.py b/modules/extensions.py --- a/modules/extensions.py +++ b/modules/extensions.py @@ -66,7 +66,7 @@ def check_updates(self): repo = git.Repo(self.path) - for fetch in repo.remote().fetch("--dry-run"): + for fetch in repo.remote().fetch(dry_run=True): if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True self.status = "behind" @@ -79,8 +79,8 @@ repo = git.Repo(self.path) # Fix: `error: Your local changes to the following files would be overwritten by merge`, # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. - repo.git.fetch('--all') - repo.git.reset('--hard', 'origin') + repo.git.fetch(all=True) + repo.git.reset('origin', hard=True) def list_extensions():
{"golden_diff": "diff --git a/modules/extensions.py b/modules/extensions.py\n--- a/modules/extensions.py\n+++ b/modules/extensions.py\n@@ -66,7 +66,7 @@\n \r\n def check_updates(self):\r\n repo = git.Repo(self.path)\r\n- for fetch in repo.remote().fetch(\"--dry-run\"):\r\n+ for fetch in repo.remote().fetch(dry_run=True):\r\n if fetch.flags != fetch.HEAD_UPTODATE:\r\n self.can_update = True\r\n self.status = \"behind\"\r\n@@ -79,8 +79,8 @@\n repo = git.Repo(self.path)\r\n # Fix: `error: Your local changes to the following files would be overwritten by merge`,\r\n # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.\r\n- repo.git.fetch('--all')\r\n- repo.git.reset('--hard', 'origin')\r\n+ repo.git.fetch(all=True)\r\n+ repo.git.reset('origin', hard=True)\r\n \r\n \r\n def list_extensions():\n", "issue": "[Bug]: GitPython breaking API change in 3.1.30, breaks extension updates\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\n[Per this PR](https://github.com/gitpython-developers/GitPython/pull/1518) and [the changelog](https://github.com/gitpython-developers/GitPython/pull/1518) you can no longer feed arbitrary arguments to prevent remote code execution.\r\n\r\nEasy fix, just use the built kwarg that's already there for it.\r\nhttps://github.com/AUTOMATIC1111/stable-diffusion-webui/blame/0cc0ee1bcb4c24a8c9715f66cede06601bfc00c8/modules/extensions.py#LL69C28-L69C28\r\n\r\nthere may be other places in the code as well, I'll take a peek\n\n### Steps to reproduce the problem\n\n1. `pip install 'gitpython>=3.1.30'\r\n2. run the web-ui and try to install/check for updates while watching terminal\n\n### What should have happened?\n\nShould have successfully run the git commands and updated the git repos\n\n### Commit where the problem happens\n\n0cc0ee1b\n\n### What platforms do you use to access the UI ?\n\nWindows\n\n### What browsers do you use to access the UI ?\n\nGoogle Chrome\n\n### Command Line Arguments\n\n```Shell\nNo\n```\n\n\n### List of extensions\n\nNo\n\n### Console logs\n\n```Shell\nTraceback (most recent call last):\r\n File \"/mnt/d/stable-diffusion/stable-diffusion-webui/modules/ui_extensions.py\", line 66, in check_updates\r\n ext.check_updates()\r\n File \"/mnt/d/stable-diffusion/stable-diffusion-webui/modules/extensions.py\", line 69, in check_updates\r\n for fetch in repo.remote().fetch(\"--dry-run\"):\r\n File \"/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/remote.py\", line 1007, in fetch\r\n res = self._get_fetch_info_from_stderr(proc, progress, kill_after_timeout=kill_after_timeout)\r\n File \"/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/remote.py\", line 848, in _get_fetch_info_from_stderr\r\n proc.wait(stderr=stderr_text)\r\n File \"/home/adam/.cache/pypoetry/virtualenvs/sd-deps-z4SYejYZ-py3.10/lib/python3.10/site-packages/git/cmd.py\", line 604, in wait\r\n raise GitCommandError(remove_password_if_present(self.args), status, errstr)\r\ngit.exc.GitCommandError: Cmd('git') failed due to: exit code(128)\r\n cmdline: git fetch -v -- origin --dry-run\r\n stderr: 'fatal: couldn't find remote ref --dry-run'\n```\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport time\r\nimport git\r\n\r\nfrom modules import paths, shared\r\n\r\nextensions = []\r\nextensions_dir = os.path.join(paths.data_path, \"extensions\")\r\nextensions_builtin_dir = os.path.join(paths.script_path, \"extensions-builtin\")\r\n\r\nif not os.path.exists(extensions_dir):\r\n os.makedirs(extensions_dir)\r\n\r\ndef active():\r\n return [x for x in extensions if x.enabled]\r\n\r\n\r\nclass Extension:\r\n def __init__(self, name, path, enabled=True, is_builtin=False):\r\n self.name = name\r\n self.path = path\r\n self.enabled = enabled\r\n self.status = ''\r\n self.can_update = False\r\n self.is_builtin = is_builtin\r\n self.version = ''\r\n\r\n repo = None\r\n try:\r\n if os.path.exists(os.path.join(path, \".git\")):\r\n repo = git.Repo(path)\r\n except Exception:\r\n print(f\"Error reading github repository info from {path}:\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n if repo is None or repo.bare:\r\n self.remote = None\r\n else:\r\n try:\r\n self.remote = next(repo.remote().urls, None)\r\n self.status = 'unknown'\r\n head = repo.head.commit\r\n ts = time.asctime(time.gmtime(repo.head.commit.committed_date))\r\n self.version = f'{head.hexsha[:8]} ({ts})'\r\n\r\n except Exception:\r\n self.remote = None\r\n\r\n def list_files(self, subdir, extension):\r\n from modules import scripts\r\n\r\n dirpath = os.path.join(self.path, subdir)\r\n if not os.path.isdir(dirpath):\r\n return []\r\n\r\n res = []\r\n for filename in sorted(os.listdir(dirpath)):\r\n res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename)))\r\n\r\n res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]\r\n\r\n return res\r\n\r\n def check_updates(self):\r\n repo = git.Repo(self.path)\r\n for fetch in repo.remote().fetch(\"--dry-run\"):\r\n if fetch.flags != fetch.HEAD_UPTODATE:\r\n self.can_update = True\r\n self.status = \"behind\"\r\n return\r\n\r\n self.can_update = False\r\n self.status = \"latest\"\r\n\r\n def fetch_and_reset_hard(self):\r\n repo = git.Repo(self.path)\r\n # Fix: `error: Your local changes to the following files would be overwritten by merge`,\r\n # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.\r\n repo.git.fetch('--all')\r\n repo.git.reset('--hard', 'origin')\r\n\r\n\r\ndef list_extensions():\r\n extensions.clear()\r\n\r\n if not os.path.isdir(extensions_dir):\r\n return\r\n\r\n paths = []\r\n for dirname in [extensions_dir, extensions_builtin_dir]:\r\n if not os.path.isdir(dirname):\r\n return\r\n\r\n for extension_dirname in sorted(os.listdir(dirname)):\r\n path = os.path.join(dirname, extension_dirname)\r\n if not os.path.isdir(path):\r\n continue\r\n\r\n paths.append((extension_dirname, path, dirname == extensions_builtin_dir))\r\n\r\n for dirname, path, is_builtin in paths:\r\n extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)\r\n extensions.append(extension)\r\n\r\n", "path": "modules/extensions.py"}], "after_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport time\r\nimport git\r\n\r\nfrom modules import paths, shared\r\n\r\nextensions = []\r\nextensions_dir = os.path.join(paths.data_path, \"extensions\")\r\nextensions_builtin_dir = os.path.join(paths.script_path, \"extensions-builtin\")\r\n\r\nif not os.path.exists(extensions_dir):\r\n os.makedirs(extensions_dir)\r\n\r\ndef active():\r\n return [x for x in extensions if x.enabled]\r\n\r\n\r\nclass Extension:\r\n def __init__(self, name, path, enabled=True, is_builtin=False):\r\n self.name = name\r\n self.path = path\r\n self.enabled = enabled\r\n self.status = ''\r\n self.can_update = False\r\n self.is_builtin = is_builtin\r\n self.version = ''\r\n\r\n repo = None\r\n try:\r\n if os.path.exists(os.path.join(path, \".git\")):\r\n repo = git.Repo(path)\r\n except Exception:\r\n print(f\"Error reading github repository info from {path}:\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n if repo is None or repo.bare:\r\n self.remote = None\r\n else:\r\n try:\r\n self.remote = next(repo.remote().urls, None)\r\n self.status = 'unknown'\r\n head = repo.head.commit\r\n ts = time.asctime(time.gmtime(repo.head.commit.committed_date))\r\n self.version = f'{head.hexsha[:8]} ({ts})'\r\n\r\n except Exception:\r\n self.remote = None\r\n\r\n def list_files(self, subdir, extension):\r\n from modules import scripts\r\n\r\n dirpath = os.path.join(self.path, subdir)\r\n if not os.path.isdir(dirpath):\r\n return []\r\n\r\n res = []\r\n for filename in sorted(os.listdir(dirpath)):\r\n res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename)))\r\n\r\n res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]\r\n\r\n return res\r\n\r\n def check_updates(self):\r\n repo = git.Repo(self.path)\r\n for fetch in repo.remote().fetch(dry_run=True):\r\n if fetch.flags != fetch.HEAD_UPTODATE:\r\n self.can_update = True\r\n self.status = \"behind\"\r\n return\r\n\r\n self.can_update = False\r\n self.status = \"latest\"\r\n\r\n def fetch_and_reset_hard(self):\r\n repo = git.Repo(self.path)\r\n # Fix: `error: Your local changes to the following files would be overwritten by merge`,\r\n # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.\r\n repo.git.fetch(all=True)\r\n repo.git.reset('origin', hard=True)\r\n\r\n\r\ndef list_extensions():\r\n extensions.clear()\r\n\r\n if not os.path.isdir(extensions_dir):\r\n return\r\n\r\n paths = []\r\n for dirname in [extensions_dir, extensions_builtin_dir]:\r\n if not os.path.isdir(dirname):\r\n return\r\n\r\n for extension_dirname in sorted(os.listdir(dirname)):\r\n path = os.path.join(dirname, extension_dirname)\r\n if not os.path.isdir(path):\r\n continue\r\n\r\n paths.append((extension_dirname, path, dirname == extensions_builtin_dir))\r\n\r\n for dirname, path, is_builtin in paths:\r\n extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)\r\n extensions.append(extension)\r\n\r\n", "path": "modules/extensions.py"}]}
1,901
219
gh_patches_debug_34955
rasdani/github-patches
git_diff
elastic__apm-agent-python-881
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'FragmentDefinition' object has no attribute 'operation'" **Describe the bug**: I'm using elastic APM with Django 3.1.2 and graphql. On every GraphQL Query, I'm seeing now this error: `AttributeError: 'FragmentDefinition' object has no attribute 'operation'` The relevant file is: `elasticapm/instrumentation/packages/graphql.py in get_graphql_tx_name at line 99` **To Reproduce** I'm not sure yet, why the error is occurring and I'm just getting started with the service. If you can guide me to the right direction, I can create a reproducible example. **Environment (please complete the following information)** - OS: Linux docker Container - Python version: - Framework and version : Django 3.1.2 - APM Server version: - Agent version: 5.9.0 **Additional context** Add any other context about the problem here. - Agent config options <!-- be careful not to post sensitive information --> <details> <summary>Click to expand</summary> ``` replace this line with your agent config options remember to mask any sensitive fields like tokens ``` </details> - `requirements.txt`: <details> <summary>Click to expand</summary> ``` replace this line with your `requirements.txt` ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/graphql.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from elasticapm import set_transaction_name 32 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule 33 from elasticapm.traces import capture_span 34 35 36 class GraphQLExecutorInstrumentation(AbstractInstrumentedModule): 37 name = "graphql" 38 39 instrument_list = [ 40 ("graphql.execution.executors.sync", "SyncExecutor.execute"), 41 ("graphql.execution.executors.gevent", "GeventExecutor.execute"), 42 ("graphql.execution.executors.asyncio", "AsyncioExecutor.execute"), 43 ("graphql.execution.executors.process", "ProcessExecutor.execute"), 44 ("graphql.execution.executors.thread", "ThreadExecutor.execute_in_thread"), 45 ("graphql.execution.executors.thread", "ThreadExecutor.execute_in_pool"), 46 ] 47 48 def call(self, module, method, wrapped, instance, args, kwargs): 49 name = "GraphQL" 50 51 info = "" 52 query = args[2] 53 54 if "ResolveInfo" == type(query).__name__: 55 if str(query.return_type) in [ 56 'Boolean', 57 'Context', 58 'Date', 59 'DateTime', 60 'Decimal', 61 'Dynamic', 62 'Float', 63 'ID', 64 'Int', 65 'String', 66 'Time', 67 'UUID', 68 'Boolean', 69 'String' 70 ]: 71 return wrapped(*args, **kwargs) 72 73 op = query.operation.operation 74 field = query.field_name 75 info = "%s %s" % (op, field) 76 elif "RequestParams" == type(query).__name__: 77 info = "%s %s" % ("request", query.query) 78 else: 79 info = str(query) 80 81 with capture_span( 82 "%s.%s" % (name, info), 83 span_type="external", 84 span_subtype="graphql", 85 span_action="query" 86 ): 87 return wrapped(*args, **kwargs) 88 89 90 class GraphQLBackendInstrumentation(AbstractInstrumentedModule): 91 name = "graphql" 92 93 instrument_list = [ 94 ("graphql.backend.core", "GraphQLCoreBackend.document_from_string"), 95 ("graphql.backend.cache", "GraphQLCachedBackend.document_from_string"), 96 ] 97 98 def get_graphql_tx_name(self, graphql_doc): 99 op = graphql_doc.definitions[0].operation 100 fields = graphql_doc.definitions[0].selection_set.selections 101 return "GraphQL %s %s" % (op.upper(), "+".join([f.name.value for f in fields])) 102 103 def call(self, module, method, wrapped, instance, args, kwargs): 104 graphql_document = wrapped(*args, **kwargs) 105 transaction_name = self.get_graphql_tx_name(graphql_document.document_ast) 106 set_transaction_name(transaction_name) 107 return graphql_document 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/graphql.py b/elasticapm/instrumentation/packages/graphql.py --- a/elasticapm/instrumentation/packages/graphql.py +++ b/elasticapm/instrumentation/packages/graphql.py @@ -53,20 +53,20 @@ if "ResolveInfo" == type(query).__name__: if str(query.return_type) in [ - 'Boolean', - 'Context', - 'Date', - 'DateTime', - 'Decimal', - 'Dynamic', - 'Float', - 'ID', - 'Int', - 'String', - 'Time', - 'UUID', - 'Boolean', - 'String' + "Boolean", + "Context", + "Date", + "DateTime", + "Decimal", + "Dynamic", + "Float", + "ID", + "Int", + "String", + "Time", + "UUID", + "Boolean", + "String", ]: return wrapped(*args, **kwargs) @@ -78,12 +78,7 @@ else: info = str(query) - with capture_span( - "%s.%s" % (name, info), - span_type="external", - span_subtype="graphql", - span_action="query" - ): + with capture_span("%s.%s" % (name, info), span_type="external", span_subtype="graphql", span_action="query"): return wrapped(*args, **kwargs) @@ -96,9 +91,15 @@ ] def get_graphql_tx_name(self, graphql_doc): - op = graphql_doc.definitions[0].operation - fields = graphql_doc.definitions[0].selection_set.selections - return "GraphQL %s %s" % (op.upper(), "+".join([f.name.value for f in fields])) + try: + op_def = [i for i in graphql_doc.definitions if type(i).__name__ == "OperationDefinition"][0] + except KeyError: + return "GraphQL unknown operation" + + op = op_def.operation + name = op_def.name + fields = op_def.selection_set.selections + return "GraphQL %s %s" % (op.upper(), name if name else "+".join([f.name.value for f in fields])) def call(self, module, method, wrapped, instance, args, kwargs): graphql_document = wrapped(*args, **kwargs)
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/graphql.py b/elasticapm/instrumentation/packages/graphql.py\n--- a/elasticapm/instrumentation/packages/graphql.py\n+++ b/elasticapm/instrumentation/packages/graphql.py\n@@ -53,20 +53,20 @@\n \n if \"ResolveInfo\" == type(query).__name__:\n if str(query.return_type) in [\n- 'Boolean',\n- 'Context',\n- 'Date',\n- 'DateTime',\n- 'Decimal',\n- 'Dynamic',\n- 'Float',\n- 'ID',\n- 'Int',\n- 'String',\n- 'Time',\n- 'UUID',\n- 'Boolean',\n- 'String'\n+ \"Boolean\",\n+ \"Context\",\n+ \"Date\",\n+ \"DateTime\",\n+ \"Decimal\",\n+ \"Dynamic\",\n+ \"Float\",\n+ \"ID\",\n+ \"Int\",\n+ \"String\",\n+ \"Time\",\n+ \"UUID\",\n+ \"Boolean\",\n+ \"String\",\n ]:\n return wrapped(*args, **kwargs)\n \n@@ -78,12 +78,7 @@\n else:\n info = str(query)\n \n- with capture_span(\n- \"%s.%s\" % (name, info),\n- span_type=\"external\",\n- span_subtype=\"graphql\",\n- span_action=\"query\"\n- ):\n+ with capture_span(\"%s.%s\" % (name, info), span_type=\"external\", span_subtype=\"graphql\", span_action=\"query\"):\n return wrapped(*args, **kwargs)\n \n \n@@ -96,9 +91,15 @@\n ]\n \n def get_graphql_tx_name(self, graphql_doc):\n- op = graphql_doc.definitions[0].operation\n- fields = graphql_doc.definitions[0].selection_set.selections\n- return \"GraphQL %s %s\" % (op.upper(), \"+\".join([f.name.value for f in fields]))\n+ try:\n+ op_def = [i for i in graphql_doc.definitions if type(i).__name__ == \"OperationDefinition\"][0]\n+ except KeyError:\n+ return \"GraphQL unknown operation\"\n+\n+ op = op_def.operation\n+ name = op_def.name\n+ fields = op_def.selection_set.selections\n+ return \"GraphQL %s %s\" % (op.upper(), name if name else \"+\".join([f.name.value for f in fields]))\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n graphql_document = wrapped(*args, **kwargs)\n", "issue": "'FragmentDefinition' object has no attribute 'operation'\"\n**Describe the bug**: \r\nI'm using elastic APM with Django 3.1.2 and graphql.\r\nOn every GraphQL Query, I'm seeing now this error: `AttributeError: 'FragmentDefinition' object has no attribute 'operation'`\r\n\r\nThe relevant file is: `elasticapm/instrumentation/packages/graphql.py in get_graphql_tx_name at line 99`\r\n\r\n**To Reproduce**\r\nI'm not sure yet, why the error is occurring and I'm just getting started with the service. If you can guide me to the right direction, I can create a reproducible example.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux docker Container\r\n- Python version:\r\n- Framework and version : Django 3.1.2\r\n- APM Server version: \r\n- Agent version: 5.9.0\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options <!-- be careful not to post sensitive information -->\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your agent config options\r\n remember to mask any sensitive fields like tokens\r\n ```\r\n </details>\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your `requirements.txt`\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm import set_transaction_name\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\n\n\nclass GraphQLExecutorInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.execution.executors.sync\", \"SyncExecutor.execute\"),\n (\"graphql.execution.executors.gevent\", \"GeventExecutor.execute\"),\n (\"graphql.execution.executors.asyncio\", \"AsyncioExecutor.execute\"),\n (\"graphql.execution.executors.process\", \"ProcessExecutor.execute\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_thread\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_pool\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n name = \"GraphQL\"\n\n info = \"\"\n query = args[2]\n\n if \"ResolveInfo\" == type(query).__name__:\n if str(query.return_type) in [\n 'Boolean',\n 'Context',\n 'Date',\n 'DateTime',\n 'Decimal',\n 'Dynamic',\n 'Float',\n 'ID',\n 'Int',\n 'String',\n 'Time',\n 'UUID',\n 'Boolean',\n 'String'\n ]:\n return wrapped(*args, **kwargs)\n\n op = query.operation.operation\n field = query.field_name\n info = \"%s %s\" % (op, field)\n elif \"RequestParams\" == type(query).__name__:\n info = \"%s %s\" % (\"request\", query.query)\n else:\n info = str(query)\n\n with capture_span(\n \"%s.%s\" % (name, info),\n span_type=\"external\",\n span_subtype=\"graphql\",\n span_action=\"query\"\n ):\n return wrapped(*args, **kwargs)\n\n\nclass GraphQLBackendInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.backend.core\", \"GraphQLCoreBackend.document_from_string\"),\n (\"graphql.backend.cache\", \"GraphQLCachedBackend.document_from_string\"),\n ]\n\n def get_graphql_tx_name(self, graphql_doc):\n op = graphql_doc.definitions[0].operation\n fields = graphql_doc.definitions[0].selection_set.selections\n return \"GraphQL %s %s\" % (op.upper(), \"+\".join([f.name.value for f in fields]))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n graphql_document = wrapped(*args, **kwargs)\n transaction_name = self.get_graphql_tx_name(graphql_document.document_ast)\n set_transaction_name(transaction_name)\n return graphql_document\n", "path": "elasticapm/instrumentation/packages/graphql.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm import set_transaction_name\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\n\n\nclass GraphQLExecutorInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.execution.executors.sync\", \"SyncExecutor.execute\"),\n (\"graphql.execution.executors.gevent\", \"GeventExecutor.execute\"),\n (\"graphql.execution.executors.asyncio\", \"AsyncioExecutor.execute\"),\n (\"graphql.execution.executors.process\", \"ProcessExecutor.execute\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_thread\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_pool\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n name = \"GraphQL\"\n\n info = \"\"\n query = args[2]\n\n if \"ResolveInfo\" == type(query).__name__:\n if str(query.return_type) in [\n \"Boolean\",\n \"Context\",\n \"Date\",\n \"DateTime\",\n \"Decimal\",\n \"Dynamic\",\n \"Float\",\n \"ID\",\n \"Int\",\n \"String\",\n \"Time\",\n \"UUID\",\n \"Boolean\",\n \"String\",\n ]:\n return wrapped(*args, **kwargs)\n\n op = query.operation.operation\n field = query.field_name\n info = \"%s %s\" % (op, field)\n elif \"RequestParams\" == type(query).__name__:\n info = \"%s %s\" % (\"request\", query.query)\n else:\n info = str(query)\n\n with capture_span(\"%s.%s\" % (name, info), span_type=\"external\", span_subtype=\"graphql\", span_action=\"query\"):\n return wrapped(*args, **kwargs)\n\n\nclass GraphQLBackendInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.backend.core\", \"GraphQLCoreBackend.document_from_string\"),\n (\"graphql.backend.cache\", \"GraphQLCachedBackend.document_from_string\"),\n ]\n\n def get_graphql_tx_name(self, graphql_doc):\n try:\n op_def = [i for i in graphql_doc.definitions if type(i).__name__ == \"OperationDefinition\"][0]\n except KeyError:\n return \"GraphQL unknown operation\"\n\n op = op_def.operation\n name = op_def.name\n fields = op_def.selection_set.selections\n return \"GraphQL %s %s\" % (op.upper(), name if name else \"+\".join([f.name.value for f in fields]))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n graphql_document = wrapped(*args, **kwargs)\n transaction_name = self.get_graphql_tx_name(graphql_document.document_ast)\n set_transaction_name(transaction_name)\n return graphql_document\n", "path": "elasticapm/instrumentation/packages/graphql.py"}]}
1,681
565
gh_patches_debug_37682
rasdani/github-patches
git_diff
apluslms__a-plus-1005
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- A+ Security logs, CEF format After security audit in spring 2021, some new security-related log events were added, using SecurityLog class. The log output format should be converted to CEF format that can be exported to Aalto ITS logging systems. Also, the current log events should be reviewed: do they contain sufficient information, and should some additional events be added. Note that security log should contain only relevant events, that can be justified from security point of view. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/logging.py` Content: ``` 1 from django.http import UnreadablePostError 2 import logging 3 from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed 4 from django.dispatch import receiver 5 from django.http.request import HttpRequest 6 7 def skip_unreadable_post(record): 8 """Skips log records of unfinished post requests.""" 9 return not record.exc_info or not issubclass(record.exc_info[0], UnreadablePostError) 10 11 class SecurityLog: 12 """ 13 Static singleton class used for A+ - wide security logging. 14 Django signals are used to track login/logout events. 15 """ 16 17 seclogger = logging.getLogger('aplus.security') 18 19 @staticmethod 20 def logger() -> logging.Logger: 21 return SecurityLog.seclogger 22 23 @staticmethod 24 def logevent(request: HttpRequest, type: str, message: str) -> None: 25 # Unit tests do not have user defined in request object. 26 if request and hasattr(request, 'user'): 27 user=request.user 28 ip=request.META.get('REMOTE_ADDR') 29 else: 30 user='?' 31 ip='?' 32 SecurityLog.logger().info("({}/{}): {}: {}".format( 33 user, 34 ip, 35 type, 36 message 37 )) 38 39 40 # This example was used as an inspiration for the following functions: 41 # https://stackoverflow.com/questions/37618473/how-can-i-log-both-successful-and-failed-login-and-logout-attempts-in-django 42 @receiver(user_logged_in) 43 def user_logged_in_callback(sender, request, user, **kwargs): 44 SecurityLog.logevent(request, "login", "") 45 46 @receiver(user_logged_out) 47 def user_logged_out_callback(sender, request, user, **kwargs): 48 SecurityLog.logevent(request, "logout", "") 49 50 # Unfortunately the request object is not passed with this signal, 51 # so we cannot get the IP. 52 @receiver(user_login_failed) 53 def user_login_failed_callback(sender, credentials, **kwargs): 54 SecurityLog.logevent(None, "login-failed","{credentials}".format( 55 credentials=credentials) 56 ) 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/logging.py b/lib/logging.py --- a/lib/logging.py +++ b/lib/logging.py @@ -3,6 +3,12 @@ from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed from django.dispatch import receiver from django.http.request import HttpRequest +from format_cef import format_cef +from aplus import VERSION + + +CEF_VENDOR = 'Aalto' +CEF_PRODUCT = 'aplus' def skip_unreadable_post(record): """Skips log records of unfinished post requests.""" @@ -10,7 +16,8 @@ class SecurityLog: """ - Static singleton class used for A+ - wide security logging. + Static singleton class used for A+ - wide security logging, + to produce ArcSight Common Event Format (CEF) log. Django signals are used to track login/logout events. """ @@ -21,20 +28,32 @@ return SecurityLog.seclogger @staticmethod - def logevent(request: HttpRequest, type: str, message: str) -> None: - # Unit tests do not have user defined in request object. - if request and hasattr(request, 'user'): - user=request.user - ip=request.META.get('REMOTE_ADDR') - else: - user='?' - ip='?' - SecurityLog.logger().info("({}/{}): {}: {}".format( - user, - ip, - type, - message - )) + def logevent( + request: HttpRequest, + type: str, + message: str, + severity: int = 5, + ) -> None: + extensions = {} + # Unit tests may not have user or source address defined. + if request: + if hasattr(request, 'user'): + extensions['sourceUserName'] = str(request.user) + extensions['sourceUserId'] = str(request.user.id) + if (addr := request.META.get('REMOTE_ADDR')): + extensions['sourceAddress'] = addr + + SecurityLog.logger().info( + format_cef( + CEF_VENDOR, + CEF_PRODUCT, + VERSION, + type, + message, + severity, + extensions, + ).decode("utf-8") + ) # This example was used as an inspiration for the following functions: @@ -51,6 +70,8 @@ # so we cannot get the IP. @receiver(user_login_failed) def user_login_failed_callback(sender, credentials, **kwargs): - SecurityLog.logevent(None, "login-failed","{credentials}".format( - credentials=credentials) - ) + try: + SecurityLog.logevent(None, "login-failed", f"username: {credentials['username']}") + except KeyError: + # Unit tests do not have 'username' in credentials, let's not fail them for that + pass
{"golden_diff": "diff --git a/lib/logging.py b/lib/logging.py\n--- a/lib/logging.py\n+++ b/lib/logging.py\n@@ -3,6 +3,12 @@\n from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed\n from django.dispatch import receiver\n from django.http.request import HttpRequest\n+from format_cef import format_cef\n+from aplus import VERSION\n+\n+\n+CEF_VENDOR = 'Aalto'\n+CEF_PRODUCT = 'aplus'\n \n def skip_unreadable_post(record):\n \"\"\"Skips log records of unfinished post requests.\"\"\"\n@@ -10,7 +16,8 @@\n \n class SecurityLog:\n \"\"\"\n- Static singleton class used for A+ - wide security logging.\n+ Static singleton class used for A+ - wide security logging,\n+ to produce ArcSight Common Event Format (CEF) log.\n Django signals are used to track login/logout events.\n \"\"\"\n \n@@ -21,20 +28,32 @@\n return SecurityLog.seclogger\n \n @staticmethod\n- def logevent(request: HttpRequest, type: str, message: str) -> None:\n- # Unit tests do not have user defined in request object.\n- if request and hasattr(request, 'user'):\n- user=request.user\n- ip=request.META.get('REMOTE_ADDR')\n- else:\n- user='?'\n- ip='?'\n- SecurityLog.logger().info(\"({}/{}): {}: {}\".format(\n- user,\n- ip,\n- type,\n- message\n- ))\n+ def logevent(\n+ request: HttpRequest,\n+ type: str,\n+ message: str,\n+ severity: int = 5,\n+ ) -> None:\n+ extensions = {}\n+ # Unit tests may not have user or source address defined.\n+ if request:\n+ if hasattr(request, 'user'):\n+ extensions['sourceUserName'] = str(request.user)\n+ extensions['sourceUserId'] = str(request.user.id)\n+ if (addr := request.META.get('REMOTE_ADDR')):\n+ extensions['sourceAddress'] = addr\n+\n+ SecurityLog.logger().info(\n+ format_cef(\n+ CEF_VENDOR,\n+ CEF_PRODUCT,\n+ VERSION,\n+ type,\n+ message,\n+ severity,\n+ extensions,\n+ ).decode(\"utf-8\")\n+ )\n \n \n # This example was used as an inspiration for the following functions:\n@@ -51,6 +70,8 @@\n # so we cannot get the IP.\n @receiver(user_login_failed)\n def user_login_failed_callback(sender, credentials, **kwargs):\n- SecurityLog.logevent(None, \"login-failed\",\"{credentials}\".format(\n- credentials=credentials)\n- )\n+ try:\n+ SecurityLog.logevent(None, \"login-failed\", f\"username: {credentials['username']}\")\n+ except KeyError:\n+ # Unit tests do not have 'username' in credentials, let's not fail them for that\n+ pass\n", "issue": "A+ Security logs, CEF format\nAfter security audit in spring 2021, some new security-related log events were added, using SecurityLog class. The log output format should be converted to CEF format that can be exported to Aalto ITS logging systems. Also, the current log events should be reviewed: do they contain sufficient information, and should some additional events be added. Note that security log should contain only relevant events, that can be justified from security point of view.\n", "before_files": [{"content": "from django.http import UnreadablePostError\nimport logging\nfrom django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed\nfrom django.dispatch import receiver\nfrom django.http.request import HttpRequest\n\ndef skip_unreadable_post(record):\n \"\"\"Skips log records of unfinished post requests.\"\"\"\n return not record.exc_info or not issubclass(record.exc_info[0], UnreadablePostError)\n\nclass SecurityLog:\n \"\"\"\n Static singleton class used for A+ - wide security logging.\n Django signals are used to track login/logout events.\n \"\"\"\n\n seclogger = logging.getLogger('aplus.security')\n\n @staticmethod\n def logger() -> logging.Logger:\n return SecurityLog.seclogger\n\n @staticmethod\n def logevent(request: HttpRequest, type: str, message: str) -> None:\n # Unit tests do not have user defined in request object.\n if request and hasattr(request, 'user'):\n user=request.user\n ip=request.META.get('REMOTE_ADDR')\n else:\n user='?'\n ip='?'\n SecurityLog.logger().info(\"({}/{}): {}: {}\".format(\n user,\n ip,\n type,\n message\n ))\n\n\n# This example was used as an inspiration for the following functions:\n# https://stackoverflow.com/questions/37618473/how-can-i-log-both-successful-and-failed-login-and-logout-attempts-in-django\n@receiver(user_logged_in)\ndef user_logged_in_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"login\", \"\")\n\n@receiver(user_logged_out)\ndef user_logged_out_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"logout\", \"\")\n\n# Unfortunately the request object is not passed with this signal,\n# so we cannot get the IP.\n@receiver(user_login_failed)\ndef user_login_failed_callback(sender, credentials, **kwargs):\n SecurityLog.logevent(None, \"login-failed\",\"{credentials}\".format(\n credentials=credentials)\n )\n", "path": "lib/logging.py"}], "after_files": [{"content": "from django.http import UnreadablePostError\nimport logging\nfrom django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed\nfrom django.dispatch import receiver\nfrom django.http.request import HttpRequest\nfrom format_cef import format_cef\nfrom aplus import VERSION\n\n\nCEF_VENDOR = 'Aalto'\nCEF_PRODUCT = 'aplus'\n\ndef skip_unreadable_post(record):\n \"\"\"Skips log records of unfinished post requests.\"\"\"\n return not record.exc_info or not issubclass(record.exc_info[0], UnreadablePostError)\n\nclass SecurityLog:\n \"\"\"\n Static singleton class used for A+ - wide security logging,\n to produce ArcSight Common Event Format (CEF) log.\n Django signals are used to track login/logout events.\n \"\"\"\n\n seclogger = logging.getLogger('aplus.security')\n\n @staticmethod\n def logger() -> logging.Logger:\n return SecurityLog.seclogger\n\n @staticmethod\n def logevent(\n request: HttpRequest,\n type: str,\n message: str,\n severity: int = 5,\n ) -> None:\n extensions = {}\n # Unit tests may not have user or source address defined.\n if request:\n if hasattr(request, 'user'):\n extensions['sourceUserName'] = str(request.user)\n extensions['sourceUserId'] = str(request.user.id)\n if (addr := request.META.get('REMOTE_ADDR')):\n extensions['sourceAddress'] = addr\n\n SecurityLog.logger().info(\n format_cef(\n CEF_VENDOR,\n CEF_PRODUCT,\n VERSION,\n type,\n message,\n severity,\n extensions,\n ).decode(\"utf-8\")\n )\n\n\n# This example was used as an inspiration for the following functions:\n# https://stackoverflow.com/questions/37618473/how-can-i-log-both-successful-and-failed-login-and-logout-attempts-in-django\n@receiver(user_logged_in)\ndef user_logged_in_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"login\", \"\")\n\n@receiver(user_logged_out)\ndef user_logged_out_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"logout\", \"\")\n\n# Unfortunately the request object is not passed with this signal,\n# so we cannot get the IP.\n@receiver(user_login_failed)\ndef user_login_failed_callback(sender, credentials, **kwargs):\n try:\n SecurityLog.logevent(None, \"login-failed\", f\"username: {credentials['username']}\")\n except KeyError:\n # Unit tests do not have 'username' in credentials, let's not fail them for that\n pass\n", "path": "lib/logging.py"}]}
897
654
gh_patches_debug_30069
rasdani/github-patches
git_diff
pre-commit__pre-commit-2746
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- deprecate `python_venv` language this has been an alias to `python` for a very long time but it cannot be removed without a deprecation period this is going to need a long deprecation period since it's sorta subtle and usually not the user's fault and will need hook authors to (potentially) make updates the plan is to do the following: 1. introduce the following in a minor release - migrate-config will autofix `.pre-commit-config.yaml` usages of `language: python_venv` (there isn't an equivalent `migrate-manifest` -- though users outnumber hook authors by several orders of magnitude) 1. introduce the following in a minor release - a warning is shown for configuration using the `language: python_venv` - a warning is shown for repos using `language: python_venv` (do this at install time so it only shows once as to not be super annoying for users who have no control) - a recommendation for hook authors to also set `minimum_pre_commit_version` to this version 1. a long time passes (typically my deprecation period has been 12-18+ months) 1. introduce the following in a major release - removal of the `python_venv` alias --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/repository.py` Content: ``` 1 from __future__ import annotations 2 3 import json 4 import logging 5 import os 6 from typing import Any 7 from typing import Sequence 8 9 import pre_commit.constants as C 10 from pre_commit.clientlib import load_manifest 11 from pre_commit.clientlib import LOCAL 12 from pre_commit.clientlib import META 13 from pre_commit.clientlib import parse_version 14 from pre_commit.hook import Hook 15 from pre_commit.languages.all import languages 16 from pre_commit.languages.helpers import environment_dir 17 from pre_commit.prefix import Prefix 18 from pre_commit.store import Store 19 from pre_commit.util import clean_path_on_failure 20 from pre_commit.util import rmtree 21 22 23 logger = logging.getLogger('pre_commit') 24 25 26 def _state_filename_v1(venv: str) -> str: 27 return os.path.join(venv, '.install_state_v1') 28 29 30 def _state_filename_v2(venv: str) -> str: 31 return os.path.join(venv, '.install_state_v2') 32 33 34 def _state(additional_deps: Sequence[str]) -> object: 35 return {'additional_dependencies': sorted(additional_deps)} 36 37 38 def _read_state(venv: str) -> object | None: 39 filename = _state_filename_v1(venv) 40 if not os.path.exists(filename): 41 return None 42 else: 43 with open(filename) as f: 44 return json.load(f) 45 46 47 def _hook_installed(hook: Hook) -> bool: 48 lang = languages[hook.language] 49 if lang.ENVIRONMENT_DIR is None: 50 return True 51 52 venv = environment_dir( 53 hook.prefix, 54 lang.ENVIRONMENT_DIR, 55 hook.language_version, 56 ) 57 return ( 58 ( 59 os.path.exists(_state_filename_v2(venv)) or 60 _read_state(venv) == _state(hook.additional_dependencies) 61 ) and 62 not lang.health_check(hook.prefix, hook.language_version) 63 ) 64 65 66 def _hook_install(hook: Hook) -> None: 67 logger.info(f'Installing environment for {hook.src}.') 68 logger.info('Once installed this environment will be reused.') 69 logger.info('This may take a few minutes...') 70 71 lang = languages[hook.language] 72 assert lang.ENVIRONMENT_DIR is not None 73 74 venv = environment_dir( 75 hook.prefix, 76 lang.ENVIRONMENT_DIR, 77 hook.language_version, 78 ) 79 80 # There's potentially incomplete cleanup from previous runs 81 # Clean it up! 82 if os.path.exists(venv): 83 rmtree(venv) 84 85 with clean_path_on_failure(venv): 86 lang.install_environment( 87 hook.prefix, hook.language_version, hook.additional_dependencies, 88 ) 89 health_error = lang.health_check(hook.prefix, hook.language_version) 90 if health_error: 91 raise AssertionError( 92 f'BUG: expected environment for {hook.language} to be healthy ' 93 f'immediately after install, please open an issue describing ' 94 f'your environment\n\n' 95 f'more info:\n\n{health_error}', 96 ) 97 98 # TODO: remove v1 state writing, no longer needed after pre-commit 3.0 99 # Write our state to indicate we're installed 100 state_filename = _state_filename_v1(venv) 101 staging = f'{state_filename}staging' 102 with open(staging, 'w') as state_file: 103 state_file.write(json.dumps(_state(hook.additional_dependencies))) 104 # Move the file into place atomically to indicate we've installed 105 os.replace(staging, state_filename) 106 107 open(_state_filename_v2(venv), 'a+').close() 108 109 110 def _hook( 111 *hook_dicts: dict[str, Any], 112 root_config: dict[str, Any], 113 ) -> dict[str, Any]: 114 ret, rest = dict(hook_dicts[0]), hook_dicts[1:] 115 for dct in rest: 116 ret.update(dct) 117 118 version = ret['minimum_pre_commit_version'] 119 if parse_version(version) > parse_version(C.VERSION): 120 logger.error( 121 f'The hook `{ret["id"]}` requires pre-commit version {version} ' 122 f'but version {C.VERSION} is installed. ' 123 f'Perhaps run `pip install --upgrade pre-commit`.', 124 ) 125 exit(1) 126 127 lang = ret['language'] 128 if ret['language_version'] == C.DEFAULT: 129 ret['language_version'] = root_config['default_language_version'][lang] 130 if ret['language_version'] == C.DEFAULT: 131 ret['language_version'] = languages[lang].get_default_version() 132 133 if not ret['stages']: 134 ret['stages'] = root_config['default_stages'] 135 136 if languages[lang].ENVIRONMENT_DIR is None: 137 if ret['language_version'] != C.DEFAULT: 138 logger.error( 139 f'The hook `{ret["id"]}` specifies `language_version` but is ' 140 f'using language `{lang}` which does not install an ' 141 f'environment. ' 142 f'Perhaps you meant to use a specific language?', 143 ) 144 exit(1) 145 if ret['additional_dependencies']: 146 logger.error( 147 f'The hook `{ret["id"]}` specifies `additional_dependencies` ' 148 f'but is using language `{lang}` which does not install an ' 149 f'environment. ' 150 f'Perhaps you meant to use a specific language?', 151 ) 152 exit(1) 153 154 return ret 155 156 157 def _non_cloned_repository_hooks( 158 repo_config: dict[str, Any], 159 store: Store, 160 root_config: dict[str, Any], 161 ) -> tuple[Hook, ...]: 162 def _prefix(language_name: str, deps: Sequence[str]) -> Prefix: 163 language = languages[language_name] 164 # pygrep / script / system / docker_image do not have 165 # environments so they work out of the current directory 166 if language.ENVIRONMENT_DIR is None: 167 return Prefix(os.getcwd()) 168 else: 169 return Prefix(store.make_local(deps)) 170 171 return tuple( 172 Hook.create( 173 repo_config['repo'], 174 _prefix(hook['language'], hook['additional_dependencies']), 175 _hook(hook, root_config=root_config), 176 ) 177 for hook in repo_config['hooks'] 178 ) 179 180 181 def _cloned_repository_hooks( 182 repo_config: dict[str, Any], 183 store: Store, 184 root_config: dict[str, Any], 185 ) -> tuple[Hook, ...]: 186 repo, rev = repo_config['repo'], repo_config['rev'] 187 manifest_path = os.path.join(store.clone(repo, rev), C.MANIFEST_FILE) 188 by_id = {hook['id']: hook for hook in load_manifest(manifest_path)} 189 190 for hook in repo_config['hooks']: 191 if hook['id'] not in by_id: 192 logger.error( 193 f'`{hook["id"]}` is not present in repository {repo}. ' 194 f'Typo? Perhaps it is introduced in a newer version? ' 195 f'Often `pre-commit autoupdate` fixes this.', 196 ) 197 exit(1) 198 199 hook_dcts = [ 200 _hook(by_id[hook['id']], hook, root_config=root_config) 201 for hook in repo_config['hooks'] 202 ] 203 return tuple( 204 Hook.create( 205 repo_config['repo'], 206 Prefix(store.clone(repo, rev, hook['additional_dependencies'])), 207 hook, 208 ) 209 for hook in hook_dcts 210 ) 211 212 213 def _repository_hooks( 214 repo_config: dict[str, Any], 215 store: Store, 216 root_config: dict[str, Any], 217 ) -> tuple[Hook, ...]: 218 if repo_config['repo'] in {LOCAL, META}: 219 return _non_cloned_repository_hooks(repo_config, store, root_config) 220 else: 221 return _cloned_repository_hooks(repo_config, store, root_config) 222 223 224 def install_hook_envs(hooks: Sequence[Hook], store: Store) -> None: 225 def _need_installed() -> list[Hook]: 226 seen: set[tuple[Prefix, str, str, tuple[str, ...]]] = set() 227 ret = [] 228 for hook in hooks: 229 if hook.install_key not in seen and not _hook_installed(hook): 230 ret.append(hook) 231 seen.add(hook.install_key) 232 return ret 233 234 if not _need_installed(): 235 return 236 with store.exclusive_lock(): 237 # Another process may have already completed this work 238 for hook in _need_installed(): 239 _hook_install(hook) 240 241 242 def all_hooks(root_config: dict[str, Any], store: Store) -> tuple[Hook, ...]: 243 return tuple( 244 hook 245 for repo in root_config['repos'] 246 for hook in _repository_hooks(repo, store, root_config) 247 ) 248 ``` Path: `pre_commit/commands/migrate_config.py` Content: ``` 1 from __future__ import annotations 2 3 import re 4 import textwrap 5 6 import cfgv 7 import yaml 8 9 from pre_commit.clientlib import InvalidConfigError 10 from pre_commit.yaml import yaml_load 11 12 13 def _is_header_line(line: str) -> bool: 14 return line.startswith(('#', '---')) or not line.strip() 15 16 17 def _migrate_map(contents: str) -> str: 18 if isinstance(yaml_load(contents), list): 19 # Find the first non-header line 20 lines = contents.splitlines(True) 21 i = 0 22 # Only loop on non empty configuration file 23 while i < len(lines) and _is_header_line(lines[i]): 24 i += 1 25 26 header = ''.join(lines[:i]) 27 rest = ''.join(lines[i:]) 28 29 # If they are using the "default" flow style of yaml, this operation 30 # will yield a valid configuration 31 try: 32 trial_contents = f'{header}repos:\n{rest}' 33 yaml_load(trial_contents) 34 contents = trial_contents 35 except yaml.YAMLError: 36 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}' 37 38 return contents 39 40 41 def _migrate_sha_to_rev(contents: str) -> str: 42 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents) 43 44 45 def migrate_config(config_file: str, quiet: bool = False) -> int: 46 with open(config_file) as f: 47 orig_contents = contents = f.read() 48 49 with cfgv.reraise_as(InvalidConfigError): 50 with cfgv.validate_context(f'File {config_file}'): 51 try: 52 yaml_load(orig_contents) 53 except Exception as e: 54 raise cfgv.ValidationError(str(e)) 55 56 contents = _migrate_map(contents) 57 contents = _migrate_sha_to_rev(contents) 58 59 if contents != orig_contents: 60 with open(config_file, 'w') as f: 61 f.write(contents) 62 63 print('Configuration has been migrated.') 64 elif not quiet: 65 print('Configuration is already migrated.') 66 return 0 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py --- a/pre_commit/commands/migrate_config.py +++ b/pre_commit/commands/migrate_config.py @@ -42,6 +42,14 @@ return re.sub(r'(\n\s+)sha:', r'\1rev:', contents) +def _migrate_python_venv(contents: str) -> str: + return re.sub( + r'(\n\s+)language: python_venv\b', + r'\1language: python', + contents, + ) + + def migrate_config(config_file: str, quiet: bool = False) -> int: with open(config_file) as f: orig_contents = contents = f.read() @@ -55,6 +63,7 @@ contents = _migrate_map(contents) contents = _migrate_sha_to_rev(contents) + contents = _migrate_python_venv(contents) if contents != orig_contents: with open(config_file, 'w') as f: diff --git a/pre_commit/repository.py b/pre_commit/repository.py --- a/pre_commit/repository.py +++ b/pre_commit/repository.py @@ -3,6 +3,7 @@ import json import logging import os +import shlex from typing import Any from typing import Sequence @@ -68,6 +69,14 @@ logger.info('Once installed this environment will be reused.') logger.info('This may take a few minutes...') + if hook.language == 'python_venv': + logger.warning( + f'`repo: {hook.src}` uses deprecated `language: python_venv`. ' + f'This is an alias for `language: python`. ' + f'Often `pre-commit autoupdate --repo {shlex.quote(hook.src)}` ' + f'will fix this.', + ) + lang = languages[hook.language] assert lang.ENVIRONMENT_DIR is not None
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -42,6 +42,14 @@\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n \n \n+def _migrate_python_venv(contents: str) -> str:\n+ return re.sub(\n+ r'(\\n\\s+)language: python_venv\\b',\n+ r'\\1language: python',\n+ contents,\n+ )\n+\n+\n def migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n@@ -55,6 +63,7 @@\n \n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n+ contents = _migrate_python_venv(contents)\n \n if contents != orig_contents:\n with open(config_file, 'w') as f:\ndiff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -3,6 +3,7 @@\n import json\n import logging\n import os\n+import shlex\n from typing import Any\n from typing import Sequence\n \n@@ -68,6 +69,14 @@\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n \n+ if hook.language == 'python_venv':\n+ logger.warning(\n+ f'`repo: {hook.src}` uses deprecated `language: python_venv`. '\n+ f'This is an alias for `language: python`. '\n+ f'Often `pre-commit autoupdate --repo {shlex.quote(hook.src)}` '\n+ f'will fix this.',\n+ )\n+\n lang = languages[hook.language]\n assert lang.ENVIRONMENT_DIR is not None\n", "issue": "deprecate `python_venv` language\nthis has been an alias to `python` for a very long time but it cannot be removed without a deprecation period\r\n\r\nthis is going to need a long deprecation period since it's sorta subtle and usually not the user's fault and will need hook authors to (potentially) make updates\r\n\r\nthe plan is to do the following:\r\n\r\n1. introduce the following in a minor release\r\n - migrate-config will autofix `.pre-commit-config.yaml` usages of `language: python_venv` (there isn't an equivalent `migrate-manifest` -- though users outnumber hook authors by several orders of magnitude)\r\n1. introduce the following in a minor release\r\n - a warning is shown for configuration using the `language: python_venv`\r\n - a warning is shown for repos using `language: python_venv` (do this at install time so it only shows once as to not be super annoying for users who have no control)\r\n - a recommendation for hook authors to also set `minimum_pre_commit_version` to this version\r\n1. a long time passes (typically my deprecation period has been 12-18+ months)\r\n1. introduce the following in a major release\r\n - removal of the `python_venv` alias\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport logging\nimport os\nfrom typing import Any\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.clientlib import parse_version\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.store import Store\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import rmtree\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _state_filename_v1(venv: str) -> str:\n return os.path.join(venv, '.install_state_v1')\n\n\ndef _state_filename_v2(venv: str) -> str:\n return os.path.join(venv, '.install_state_v2')\n\n\ndef _state(additional_deps: Sequence[str]) -> object:\n return {'additional_dependencies': sorted(additional_deps)}\n\n\ndef _read_state(venv: str) -> object | None:\n filename = _state_filename_v1(venv)\n if not os.path.exists(filename):\n return None\n else:\n with open(filename) as f:\n return json.load(f)\n\n\ndef _hook_installed(hook: Hook) -> bool:\n lang = languages[hook.language]\n if lang.ENVIRONMENT_DIR is None:\n return True\n\n venv = environment_dir(\n hook.prefix,\n lang.ENVIRONMENT_DIR,\n hook.language_version,\n )\n return (\n (\n os.path.exists(_state_filename_v2(venv)) or\n _read_state(venv) == _state(hook.additional_dependencies)\n ) and\n not lang.health_check(hook.prefix, hook.language_version)\n )\n\n\ndef _hook_install(hook: Hook) -> None:\n logger.info(f'Installing environment for {hook.src}.')\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n lang = languages[hook.language]\n assert lang.ENVIRONMENT_DIR is not None\n\n venv = environment_dir(\n hook.prefix,\n lang.ENVIRONMENT_DIR,\n hook.language_version,\n )\n\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if os.path.exists(venv):\n rmtree(venv)\n\n with clean_path_on_failure(venv):\n lang.install_environment(\n hook.prefix, hook.language_version, hook.additional_dependencies,\n )\n health_error = lang.health_check(hook.prefix, hook.language_version)\n if health_error:\n raise AssertionError(\n f'BUG: expected environment for {hook.language} to be healthy '\n f'immediately after install, please open an issue describing '\n f'your environment\\n\\n'\n f'more info:\\n\\n{health_error}',\n )\n\n # TODO: remove v1 state writing, no longer needed after pre-commit 3.0\n # Write our state to indicate we're installed\n state_filename = _state_filename_v1(venv)\n staging = f'{state_filename}staging'\n with open(staging, 'w') as state_file:\n state_file.write(json.dumps(_state(hook.additional_dependencies)))\n # Move the file into place atomically to indicate we've installed\n os.replace(staging, state_filename)\n\n open(_state_filename_v2(venv), 'a+').close()\n\n\ndef _hook(\n *hook_dicts: dict[str, Any],\n root_config: dict[str, Any],\n) -> dict[str, Any]:\n ret, rest = dict(hook_dicts[0]), hook_dicts[1:]\n for dct in rest:\n ret.update(dct)\n\n version = ret['minimum_pre_commit_version']\n if parse_version(version) > parse_version(C.VERSION):\n logger.error(\n f'The hook `{ret[\"id\"]}` requires pre-commit version {version} '\n f'but version {C.VERSION} is installed. '\n f'Perhaps run `pip install --upgrade pre-commit`.',\n )\n exit(1)\n\n lang = ret['language']\n if ret['language_version'] == C.DEFAULT:\n ret['language_version'] = root_config['default_language_version'][lang]\n if ret['language_version'] == C.DEFAULT:\n ret['language_version'] = languages[lang].get_default_version()\n\n if not ret['stages']:\n ret['stages'] = root_config['default_stages']\n\n if languages[lang].ENVIRONMENT_DIR is None:\n if ret['language_version'] != C.DEFAULT:\n logger.error(\n f'The hook `{ret[\"id\"]}` specifies `language_version` but is '\n f'using language `{lang}` which does not install an '\n f'environment. '\n f'Perhaps you meant to use a specific language?',\n )\n exit(1)\n if ret['additional_dependencies']:\n logger.error(\n f'The hook `{ret[\"id\"]}` specifies `additional_dependencies` '\n f'but is using language `{lang}` which does not install an '\n f'environment. '\n f'Perhaps you meant to use a specific language?',\n )\n exit(1)\n\n return ret\n\n\ndef _non_cloned_repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n def _prefix(language_name: str, deps: Sequence[str]) -> Prefix:\n language = languages[language_name]\n # pygrep / script / system / docker_image do not have\n # environments so they work out of the current directory\n if language.ENVIRONMENT_DIR is None:\n return Prefix(os.getcwd())\n else:\n return Prefix(store.make_local(deps))\n\n return tuple(\n Hook.create(\n repo_config['repo'],\n _prefix(hook['language'], hook['additional_dependencies']),\n _hook(hook, root_config=root_config),\n )\n for hook in repo_config['hooks']\n )\n\n\ndef _cloned_repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n repo, rev = repo_config['repo'], repo_config['rev']\n manifest_path = os.path.join(store.clone(repo, rev), C.MANIFEST_FILE)\n by_id = {hook['id']: hook for hook in load_manifest(manifest_path)}\n\n for hook in repo_config['hooks']:\n if hook['id'] not in by_id:\n logger.error(\n f'`{hook[\"id\"]}` is not present in repository {repo}. '\n f'Typo? Perhaps it is introduced in a newer version? '\n f'Often `pre-commit autoupdate` fixes this.',\n )\n exit(1)\n\n hook_dcts = [\n _hook(by_id[hook['id']], hook, root_config=root_config)\n for hook in repo_config['hooks']\n ]\n return tuple(\n Hook.create(\n repo_config['repo'],\n Prefix(store.clone(repo, rev, hook['additional_dependencies'])),\n hook,\n )\n for hook in hook_dcts\n )\n\n\ndef _repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n if repo_config['repo'] in {LOCAL, META}:\n return _non_cloned_repository_hooks(repo_config, store, root_config)\n else:\n return _cloned_repository_hooks(repo_config, store, root_config)\n\n\ndef install_hook_envs(hooks: Sequence[Hook], store: Store) -> None:\n def _need_installed() -> list[Hook]:\n seen: set[tuple[Prefix, str, str, tuple[str, ...]]] = set()\n ret = []\n for hook in hooks:\n if hook.install_key not in seen and not _hook_installed(hook):\n ret.append(hook)\n seen.add(hook.install_key)\n return ret\n\n if not _need_installed():\n return\n with store.exclusive_lock():\n # Another process may have already completed this work\n for hook in _need_installed():\n _hook_install(hook)\n\n\ndef all_hooks(root_config: dict[str, Any], store: Store) -> tuple[Hook, ...]:\n return tuple(\n hook\n for repo in root_config['repos']\n for hook in _repository_hooks(repo, store, root_config)\n )\n", "path": "pre_commit/repository.py"}, {"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport cfgv\nimport yaml\n\nfrom pre_commit.clientlib import InvalidConfigError\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n with cfgv.reraise_as(InvalidConfigError):\n with cfgv.validate_context(f'File {config_file}'):\n try:\n yaml_load(orig_contents)\n except Exception as e:\n raise cfgv.ValidationError(str(e))\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport json\nimport logging\nimport os\nimport shlex\nfrom typing import Any\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.clientlib import parse_version\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.store import Store\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import rmtree\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _state_filename_v1(venv: str) -> str:\n return os.path.join(venv, '.install_state_v1')\n\n\ndef _state_filename_v2(venv: str) -> str:\n return os.path.join(venv, '.install_state_v2')\n\n\ndef _state(additional_deps: Sequence[str]) -> object:\n return {'additional_dependencies': sorted(additional_deps)}\n\n\ndef _read_state(venv: str) -> object | None:\n filename = _state_filename_v1(venv)\n if not os.path.exists(filename):\n return None\n else:\n with open(filename) as f:\n return json.load(f)\n\n\ndef _hook_installed(hook: Hook) -> bool:\n lang = languages[hook.language]\n if lang.ENVIRONMENT_DIR is None:\n return True\n\n venv = environment_dir(\n hook.prefix,\n lang.ENVIRONMENT_DIR,\n hook.language_version,\n )\n return (\n (\n os.path.exists(_state_filename_v2(venv)) or\n _read_state(venv) == _state(hook.additional_dependencies)\n ) and\n not lang.health_check(hook.prefix, hook.language_version)\n )\n\n\ndef _hook_install(hook: Hook) -> None:\n logger.info(f'Installing environment for {hook.src}.')\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n if hook.language == 'python_venv':\n logger.warning(\n f'`repo: {hook.src}` uses deprecated `language: python_venv`. '\n f'This is an alias for `language: python`. '\n f'Often `pre-commit autoupdate --repo {shlex.quote(hook.src)}` '\n f'will fix this.',\n )\n\n lang = languages[hook.language]\n assert lang.ENVIRONMENT_DIR is not None\n\n venv = environment_dir(\n hook.prefix,\n lang.ENVIRONMENT_DIR,\n hook.language_version,\n )\n\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if os.path.exists(venv):\n rmtree(venv)\n\n with clean_path_on_failure(venv):\n lang.install_environment(\n hook.prefix, hook.language_version, hook.additional_dependencies,\n )\n health_error = lang.health_check(hook.prefix, hook.language_version)\n if health_error:\n raise AssertionError(\n f'BUG: expected environment for {hook.language} to be healthy '\n f'immediately after install, please open an issue describing '\n f'your environment\\n\\n'\n f'more info:\\n\\n{health_error}',\n )\n\n # TODO: remove v1 state writing, no longer needed after pre-commit 3.0\n # Write our state to indicate we're installed\n state_filename = _state_filename_v1(venv)\n staging = f'{state_filename}staging'\n with open(staging, 'w') as state_file:\n state_file.write(json.dumps(_state(hook.additional_dependencies)))\n # Move the file into place atomically to indicate we've installed\n os.replace(staging, state_filename)\n\n open(_state_filename_v2(venv), 'a+').close()\n\n\ndef _hook(\n *hook_dicts: dict[str, Any],\n root_config: dict[str, Any],\n) -> dict[str, Any]:\n ret, rest = dict(hook_dicts[0]), hook_dicts[1:]\n for dct in rest:\n ret.update(dct)\n\n version = ret['minimum_pre_commit_version']\n if parse_version(version) > parse_version(C.VERSION):\n logger.error(\n f'The hook `{ret[\"id\"]}` requires pre-commit version {version} '\n f'but version {C.VERSION} is installed. '\n f'Perhaps run `pip install --upgrade pre-commit`.',\n )\n exit(1)\n\n lang = ret['language']\n if ret['language_version'] == C.DEFAULT:\n ret['language_version'] = root_config['default_language_version'][lang]\n if ret['language_version'] == C.DEFAULT:\n ret['language_version'] = languages[lang].get_default_version()\n\n if not ret['stages']:\n ret['stages'] = root_config['default_stages']\n\n if languages[lang].ENVIRONMENT_DIR is None:\n if ret['language_version'] != C.DEFAULT:\n logger.error(\n f'The hook `{ret[\"id\"]}` specifies `language_version` but is '\n f'using language `{lang}` which does not install an '\n f'environment. '\n f'Perhaps you meant to use a specific language?',\n )\n exit(1)\n if ret['additional_dependencies']:\n logger.error(\n f'The hook `{ret[\"id\"]}` specifies `additional_dependencies` '\n f'but is using language `{lang}` which does not install an '\n f'environment. '\n f'Perhaps you meant to use a specific language?',\n )\n exit(1)\n\n return ret\n\n\ndef _non_cloned_repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n def _prefix(language_name: str, deps: Sequence[str]) -> Prefix:\n language = languages[language_name]\n # pygrep / script / system / docker_image do not have\n # environments so they work out of the current directory\n if language.ENVIRONMENT_DIR is None:\n return Prefix(os.getcwd())\n else:\n return Prefix(store.make_local(deps))\n\n return tuple(\n Hook.create(\n repo_config['repo'],\n _prefix(hook['language'], hook['additional_dependencies']),\n _hook(hook, root_config=root_config),\n )\n for hook in repo_config['hooks']\n )\n\n\ndef _cloned_repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n repo, rev = repo_config['repo'], repo_config['rev']\n manifest_path = os.path.join(store.clone(repo, rev), C.MANIFEST_FILE)\n by_id = {hook['id']: hook for hook in load_manifest(manifest_path)}\n\n for hook in repo_config['hooks']:\n if hook['id'] not in by_id:\n logger.error(\n f'`{hook[\"id\"]}` is not present in repository {repo}. '\n f'Typo? Perhaps it is introduced in a newer version? '\n f'Often `pre-commit autoupdate` fixes this.',\n )\n exit(1)\n\n hook_dcts = [\n _hook(by_id[hook['id']], hook, root_config=root_config)\n for hook in repo_config['hooks']\n ]\n return tuple(\n Hook.create(\n repo_config['repo'],\n Prefix(store.clone(repo, rev, hook['additional_dependencies'])),\n hook,\n )\n for hook in hook_dcts\n )\n\n\ndef _repository_hooks(\n repo_config: dict[str, Any],\n store: Store,\n root_config: dict[str, Any],\n) -> tuple[Hook, ...]:\n if repo_config['repo'] in {LOCAL, META}:\n return _non_cloned_repository_hooks(repo_config, store, root_config)\n else:\n return _cloned_repository_hooks(repo_config, store, root_config)\n\n\ndef install_hook_envs(hooks: Sequence[Hook], store: Store) -> None:\n def _need_installed() -> list[Hook]:\n seen: set[tuple[Prefix, str, str, tuple[str, ...]]] = set()\n ret = []\n for hook in hooks:\n if hook.install_key not in seen and not _hook_installed(hook):\n ret.append(hook)\n seen.add(hook.install_key)\n return ret\n\n if not _need_installed():\n return\n with store.exclusive_lock():\n # Another process may have already completed this work\n for hook in _need_installed():\n _hook_install(hook)\n\n\ndef all_hooks(root_config: dict[str, Any], store: Store) -> tuple[Hook, ...]:\n return tuple(\n hook\n for repo in root_config['repos']\n for hook in _repository_hooks(repo, store, root_config)\n )\n", "path": "pre_commit/repository.py"}, {"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport cfgv\nimport yaml\n\nfrom pre_commit.clientlib import InvalidConfigError\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef _migrate_python_venv(contents: str) -> str:\n return re.sub(\n r'(\\n\\s+)language: python_venv\\b',\n r'\\1language: python',\n contents,\n )\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n with cfgv.reraise_as(InvalidConfigError):\n with cfgv.validate_context(f'File {config_file}'):\n try:\n yaml_load(orig_contents)\n except Exception as e:\n raise cfgv.ValidationError(str(e))\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n contents = _migrate_python_venv(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
3,625
443
gh_patches_debug_1256
rasdani/github-patches
git_diff
conan-io__conan-127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mark headers as "SYSTEM" headers to silence warnings Many libraries generate tons of warnings in public headers. WebSocket++ uses auto_ptr for example and many Boost libraries truncate integers implicitly (-Wconversion). To consume these libraries you have to treat them as system headers because GCC won't emit warnings in these. This is how Conan currently sets the include directories: ``` CMake include_directories(${CONAN_INCLUDE_DIRS}) ``` This is how you would add them as "system" headers to silence warnings: ``` CMake include_directories(SYSTEM ${CONAN_INCLUDE_DIRS}) ``` Is there a reason it is not already done this way? This issue may apply to configurations other than CMake/GCC, too, but this is the most important one for me. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/generators/cmake.py` Content: ``` 1 from conans.model import Generator 2 from conans.paths import BUILD_INFO_CMAKE 3 4 5 class DepsCppCmake(object): 6 def __init__(self, deps_cpp_info): 7 self.include_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/") 8 for p in deps_cpp_info.include_paths) 9 self.lib_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/") 10 for p in deps_cpp_info.lib_paths) 11 self.libs = " ".join(deps_cpp_info.libs) 12 self.defines = "\n\t\t\t".join("-D%s" % d for d in deps_cpp_info.defines) 13 self.cppflags = " ".join(deps_cpp_info.cppflags) 14 self.cflags = " ".join(deps_cpp_info.cflags) 15 self.sharedlinkflags = " ".join(deps_cpp_info.sharedlinkflags) 16 self.exelinkflags = " ".join(deps_cpp_info.exelinkflags) 17 self.bin_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/") 18 for p in deps_cpp_info.bin_paths) 19 20 self.rootpath = '"%s"' % deps_cpp_info.rootpath.replace("\\", "/") 21 22 23 class CMakeGenerator(Generator): 24 @property 25 def filename(self): 26 return BUILD_INFO_CMAKE 27 28 @property 29 def content(self): 30 sections = [] 31 32 # DEPS VARIABLES 33 template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\n' 34 'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\n' 35 'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\n' 36 'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\n' 37 'set(CONAN_LIBS_{dep} {deps.libs})\n' 38 'set(CONAN_DEFINES_{dep} {deps.defines})\n' 39 'set(CONAN_CXX_FLAGS_{dep} "{deps.cppflags}")\n' 40 'set(CONAN_SHARED_LINKER_FLAGS_{dep} "{deps.sharedlinkflags}")\n' 41 'set(CONAN_EXE_LINKER_FLAGS_{dep} "{deps.exelinkflags}")\n' 42 'set(CONAN_C_FLAGS_{dep} "{deps.cflags}")\n') 43 44 for dep_name, dep_cpp_info in self.deps_build_info.dependencies: 45 deps = DepsCppCmake(dep_cpp_info) 46 dep_flags = template_dep.format(dep=dep_name.upper(), 47 deps=deps) 48 sections.append(dep_flags) 49 50 # GENERAL VARIABLES 51 deps = DepsCppCmake(self.deps_build_info) 52 53 template = ('set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\n' 54 'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\n' 55 'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\n' 56 'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\n' 57 'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\n' 58 'set(CONAN_CXX_FLAGS "{deps.cppflags} ${{CONAN_CXX_FLAGS}}")\n' 59 'set(CONAN_SHARED_LINKER_FLAGS "{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}")\n' 60 'set(CONAN_EXE_LINKER_FLAGS "{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}")\n' 61 'set(CONAN_C_FLAGS "{deps.cflags} ${{CONAN_C_FLAGS}}")\n' 62 'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})') 63 64 rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info 65 in self.deps_build_info.dependencies] 66 module_paths = " ".join(rootpaths) 67 all_flags = template.format(deps=deps, module_paths=module_paths) 68 sections.append(all_flags) 69 70 # MACROS 71 sections.append(self._aux_cmake_test_setup()) 72 73 return "\n".join(sections) 74 75 def _aux_cmake_test_setup(self): 76 return """macro(CONAN_BASIC_SETUP) 77 conan_check_compiler() 78 conan_output_dirs_setup() 79 conan_flags_setup() 80 # CMake can find findXXX.cmake files in the root of packages 81 set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH}) 82 endmacro() 83 84 macro(CONAN_FLAGS_SETUP) 85 include_directories(${CONAN_INCLUDE_DIRS}) 86 link_directories(${CONAN_LIB_DIRS}) 87 add_definitions(${CONAN_DEFINES}) 88 89 # For find_library 90 set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH}) 91 set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH}) 92 93 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}") 94 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}") 95 set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}") 96 set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}") 97 98 if(APPLE) 99 # https://cmake.org/Wiki/CMake_RPATH_handling 100 # CONAN GUIDE: All generated libraries should have the id and dependencies to other 101 # dylibs without path, just the name, EX: 102 # libMyLib1.dylib: 103 # libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0) 104 # libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0) 105 # /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0) 106 # /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1) 107 set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE 108 # SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM) 109 endif() 110 if(CONAN_LINK_RUNTIME) 111 string(REPLACE "/MD" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) 112 string(REPLACE "/MDd" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG}) 113 string(REPLACE "/MD" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE}) 114 string(REPLACE "/MDd" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG}) 115 endif() 116 endmacro() 117 118 macro(CONAN_OUTPUT_DIRS_SETUP) 119 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin) 120 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) 121 set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) 122 123 set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib) 124 set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}) 125 set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}) 126 endmacro() 127 128 macro(CONAN_SPLIT_VERSION VERSION_STRING MAJOR MINOR) 129 #make a list from the version string 130 string(REPLACE "." ";" VERSION_LIST ${${VERSION_STRING}}) 131 132 #write output values 133 list(GET VERSION_LIST 0 ${MAJOR}) 134 list(GET VERSION_LIST 1 ${MINOR}) 135 endmacro() 136 137 macro(ERROR_COMPILER_VERSION) 138 message(FATAL_ERROR "Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'" 139 " is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}="${VERSION_MAJOR}.${VERSION_MINOR}') 140 endmacro() 141 142 macro(CHECK_COMPILER_VERSION) 143 144 CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR) 145 146 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") 147 # https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html 148 if( (${CONAN_COMPILER_VERSION} STREQUAL "14" AND NOT ${VERSION_MAJOR} STREQUAL "19") OR 149 (${CONAN_COMPILER_VERSION} STREQUAL "12" AND NOT ${VERSION_MAJOR} STREQUAL "18") OR 150 (${CONAN_COMPILER_VERSION} STREQUAL "11" AND NOT ${VERSION_MAJOR} STREQUAL "17") OR 151 (${CONAN_COMPILER_VERSION} STREQUAL "10" AND NOT ${VERSION_MAJOR} STREQUAL "16") OR 152 (${CONAN_COMPILER_VERSION} STREQUAL "9" AND NOT ${VERSION_MAJOR} STREQUAL "15") OR 153 (${CONAN_COMPILER_VERSION} STREQUAL "8" AND NOT ${VERSION_MAJOR} STREQUAL "14") OR 154 (${CONAN_COMPILER_VERSION} STREQUAL "7" AND NOT ${VERSION_MAJOR} STREQUAL "13") OR 155 (${CONAN_COMPILER_VERSION} STREQUAL "6" AND NOT ${VERSION_MAJOR} STREQUAL "12") ) 156 ERROR_COMPILER_VERSION() 157 endif() 158 elseif("${CONAN_COMPILER}" STREQUAL "gcc" OR "${CONAN_COMPILER}" MATCHES "Clang") 159 if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL "${CONAN_COMPILER_VERSION}") 160 ERROR_COMPILER_VERSION() 161 endif() 162 else() 163 message("Skipping version checking of not detected compiler...") 164 endif() 165 endmacro() 166 167 macro(CONAN_CHECK_COMPILER) 168 if( ("${CONAN_COMPILER}" STREQUAL "Visual Studio" AND NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") OR 169 ("${CONAN_COMPILER}" STREQUAL "gcc" AND NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") OR 170 ("${CONAN_COMPILER}" STREQUAL "apple-clang" AND (NOT APPLE OR NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")) OR 171 ("${CONAN_COMPILER}" STREQUAL "clang" AND NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") ) 172 message(FATAL_ERROR "Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'") 173 endif() 174 CHECK_COMPILER_VERSION() 175 endmacro() 176 """ 177 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/generators/cmake.py b/conans/client/generators/cmake.py --- a/conans/client/generators/cmake.py +++ b/conans/client/generators/cmake.py @@ -82,7 +82,7 @@ endmacro() macro(CONAN_FLAGS_SETUP) - include_directories(${CONAN_INCLUDE_DIRS}) + include_directories(SYSTEM ${CONAN_INCLUDE_DIRS}) link_directories(${CONAN_LIB_DIRS}) add_definitions(${CONAN_DEFINES})
{"golden_diff": "diff --git a/conans/client/generators/cmake.py b/conans/client/generators/cmake.py\n--- a/conans/client/generators/cmake.py\n+++ b/conans/client/generators/cmake.py\n@@ -82,7 +82,7 @@\n endmacro()\n \n macro(CONAN_FLAGS_SETUP)\n- include_directories(${CONAN_INCLUDE_DIRS})\n+ include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})\n link_directories(${CONAN_LIB_DIRS})\n add_definitions(${CONAN_DEFINES})\n", "issue": "mark headers as \"SYSTEM\" headers to silence warnings\nMany libraries generate tons of warnings in public headers. WebSocket++ uses auto_ptr for example and many Boost libraries truncate integers implicitly (-Wconversion). To consume these libraries you have to treat them as system headers because GCC won't emit warnings in these.\n\nThis is how Conan currently sets the include directories:\n\n``` CMake\ninclude_directories(${CONAN_INCLUDE_DIRS})\n```\n\nThis is how you would add them as \"system\" headers to silence warnings:\n\n``` CMake\ninclude_directories(SYSTEM ${CONAN_INCLUDE_DIRS})\n```\n\nIs there a reason it is not already done this way?\nThis issue may apply to configurations other than CMake/GCC, too, but this is the most important one for me.\n\n", "before_files": [{"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_CMAKE\n\n\nclass DepsCppCmake(object):\n def __init__(self, deps_cpp_info):\n self.include_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.include_paths)\n self.lib_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.lib_paths)\n self.libs = \" \".join(deps_cpp_info.libs)\n self.defines = \"\\n\\t\\t\\t\".join(\"-D%s\" % d for d in deps_cpp_info.defines)\n self.cppflags = \" \".join(deps_cpp_info.cppflags)\n self.cflags = \" \".join(deps_cpp_info.cflags)\n self.sharedlinkflags = \" \".join(deps_cpp_info.sharedlinkflags)\n self.exelinkflags = \" \".join(deps_cpp_info.exelinkflags)\n self.bin_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.bin_paths)\n\n self.rootpath = '\"%s\"' % deps_cpp_info.rootpath.replace(\"\\\\\", \"/\")\n\n\nclass CMakeGenerator(Generator):\n @property\n def filename(self):\n return BUILD_INFO_CMAKE\n\n @property\n def content(self):\n sections = []\n\n # DEPS VARIABLES\n template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\\n'\n 'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\\n'\n 'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\\n'\n 'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\\n'\n 'set(CONAN_LIBS_{dep} {deps.libs})\\n'\n 'set(CONAN_DEFINES_{dep} {deps.defines})\\n'\n 'set(CONAN_CXX_FLAGS_{dep} \"{deps.cppflags}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS_{dep} \"{deps.sharedlinkflags}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS_{dep} \"{deps.exelinkflags}\")\\n'\n 'set(CONAN_C_FLAGS_{dep} \"{deps.cflags}\")\\n')\n\n for dep_name, dep_cpp_info in self.deps_build_info.dependencies:\n deps = DepsCppCmake(dep_cpp_info)\n dep_flags = template_dep.format(dep=dep_name.upper(),\n deps=deps)\n sections.append(dep_flags)\n\n # GENERAL VARIABLES\n deps = DepsCppCmake(self.deps_build_info)\n\n template = ('set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\\n'\n 'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\\n'\n 'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\\n'\n 'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\\n'\n 'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\\n'\n 'set(CONAN_CXX_FLAGS \"{deps.cppflags} ${{CONAN_CXX_FLAGS}}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS \"{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS \"{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_C_FLAGS \"{deps.cflags} ${{CONAN_C_FLAGS}}\")\\n'\n 'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})')\n\n rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info\n in self.deps_build_info.dependencies]\n module_paths = \" \".join(rootpaths)\n all_flags = template.format(deps=deps, module_paths=module_paths)\n sections.append(all_flags)\n\n # MACROS\n sections.append(self._aux_cmake_test_setup())\n\n return \"\\n\".join(sections)\n\n def _aux_cmake_test_setup(self):\n return \"\"\"macro(CONAN_BASIC_SETUP)\n conan_check_compiler()\n conan_output_dirs_setup()\n conan_flags_setup()\n # CMake can find findXXX.cmake files in the root of packages\n set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})\nendmacro()\n\nmacro(CONAN_FLAGS_SETUP)\n include_directories(${CONAN_INCLUDE_DIRS})\n link_directories(${CONAN_LIB_DIRS})\n add_definitions(${CONAN_DEFINES})\n\n # For find_library\n set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH})\n set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH})\n\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}\")\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}\")\n set(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}\")\n\n if(APPLE)\n # https://cmake.org/Wiki/CMake_RPATH_handling\n # CONAN GUIDE: All generated libraries should have the id and dependencies to other\n # dylibs without path, just the name, EX:\n # libMyLib1.dylib:\n # libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0)\n # libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0)\n # /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0)\n # /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)\n set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE\n # SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)\n endif()\n if(CONAN_LINK_RUNTIME)\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})\n endif()\nendmacro()\n\nmacro(CONAN_OUTPUT_DIRS_SETUP)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\nendmacro()\n\nmacro(CONAN_SPLIT_VERSION VERSION_STRING MAJOR MINOR)\n #make a list from the version string\n string(REPLACE \".\" \";\" VERSION_LIST ${${VERSION_STRING}})\n\n #write output values\n list(GET VERSION_LIST 0 ${MAJOR})\n list(GET VERSION_LIST 1 ${MINOR})\nendmacro()\n\nmacro(ERROR_COMPILER_VERSION)\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'\"\n \" is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}=\"${VERSION_MAJOR}.${VERSION_MINOR}')\nendmacro()\n\nmacro(CHECK_COMPILER_VERSION)\n\n CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR)\n\n if(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\")\n # https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html\n if( (${CONAN_COMPILER_VERSION} STREQUAL \"14\" AND NOT ${VERSION_MAJOR} STREQUAL \"19\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"12\" AND NOT ${VERSION_MAJOR} STREQUAL \"18\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"11\" AND NOT ${VERSION_MAJOR} STREQUAL \"17\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"10\" AND NOT ${VERSION_MAJOR} STREQUAL \"16\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"9\" AND NOT ${VERSION_MAJOR} STREQUAL \"15\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"8\" AND NOT ${VERSION_MAJOR} STREQUAL \"14\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"7\" AND NOT ${VERSION_MAJOR} STREQUAL \"13\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"6\" AND NOT ${VERSION_MAJOR} STREQUAL \"12\") )\n ERROR_COMPILER_VERSION()\n endif()\n elseif(\"${CONAN_COMPILER}\" STREQUAL \"gcc\" OR \"${CONAN_COMPILER}\" MATCHES \"Clang\")\n if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL \"${CONAN_COMPILER_VERSION}\")\n ERROR_COMPILER_VERSION()\n endif()\n else()\n message(\"Skipping version checking of not detected compiler...\")\n endif()\nendmacro()\n\nmacro(CONAN_CHECK_COMPILER)\n if( (\"${CONAN_COMPILER}\" STREQUAL \"Visual Studio\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"gcc\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"apple-clang\" AND (NOT APPLE OR NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\")) OR\n (\"${CONAN_COMPILER}\" STREQUAL \"clang\" AND NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\") )\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'\")\n endif()\n CHECK_COMPILER_VERSION()\nendmacro()\n\"\"\"\n", "path": "conans/client/generators/cmake.py"}], "after_files": [{"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_CMAKE\n\n\nclass DepsCppCmake(object):\n def __init__(self, deps_cpp_info):\n self.include_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.include_paths)\n self.lib_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.lib_paths)\n self.libs = \" \".join(deps_cpp_info.libs)\n self.defines = \"\\n\\t\\t\\t\".join(\"-D%s\" % d for d in deps_cpp_info.defines)\n self.cppflags = \" \".join(deps_cpp_info.cppflags)\n self.cflags = \" \".join(deps_cpp_info.cflags)\n self.sharedlinkflags = \" \".join(deps_cpp_info.sharedlinkflags)\n self.exelinkflags = \" \".join(deps_cpp_info.exelinkflags)\n self.bin_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.bin_paths)\n\n self.rootpath = '\"%s\"' % deps_cpp_info.rootpath.replace(\"\\\\\", \"/\")\n\n\nclass CMakeGenerator(Generator):\n @property\n def filename(self):\n return BUILD_INFO_CMAKE\n\n @property\n def content(self):\n sections = []\n\n # DEPS VARIABLES\n template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\\n'\n 'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\\n'\n 'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\\n'\n 'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\\n'\n 'set(CONAN_LIBS_{dep} {deps.libs})\\n'\n 'set(CONAN_DEFINES_{dep} {deps.defines})\\n'\n 'set(CONAN_CXX_FLAGS_{dep} \"{deps.cppflags}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS_{dep} \"{deps.sharedlinkflags}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS_{dep} \"{deps.exelinkflags}\")\\n'\n 'set(CONAN_C_FLAGS_{dep} \"{deps.cflags}\")\\n')\n\n for dep_name, dep_cpp_info in self.deps_build_info.dependencies:\n deps = DepsCppCmake(dep_cpp_info)\n dep_flags = template_dep.format(dep=dep_name.upper(),\n deps=deps)\n sections.append(dep_flags)\n\n # GENERAL VARIABLES\n deps = DepsCppCmake(self.deps_build_info)\n\n template = ('set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\\n'\n 'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\\n'\n 'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\\n'\n 'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\\n'\n 'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\\n'\n 'set(CONAN_CXX_FLAGS \"{deps.cppflags} ${{CONAN_CXX_FLAGS}}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS \"{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS \"{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_C_FLAGS \"{deps.cflags} ${{CONAN_C_FLAGS}}\")\\n'\n 'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})')\n\n rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info\n in self.deps_build_info.dependencies]\n module_paths = \" \".join(rootpaths)\n all_flags = template.format(deps=deps, module_paths=module_paths)\n sections.append(all_flags)\n\n # MACROS\n sections.append(self._aux_cmake_test_setup())\n\n return \"\\n\".join(sections)\n\n def _aux_cmake_test_setup(self):\n return \"\"\"macro(CONAN_BASIC_SETUP)\n conan_check_compiler()\n conan_output_dirs_setup()\n conan_flags_setup()\n # CMake can find findXXX.cmake files in the root of packages\n set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})\nendmacro()\n\nmacro(CONAN_FLAGS_SETUP)\n include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})\n link_directories(${CONAN_LIB_DIRS})\n add_definitions(${CONAN_DEFINES})\n\n # For find_library\n set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH})\n set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH})\n\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}\")\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}\")\n set(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}\")\n\n if(APPLE)\n # https://cmake.org/Wiki/CMake_RPATH_handling\n # CONAN GUIDE: All generated libraries should have the id and dependencies to other\n # dylibs without path, just the name, EX:\n # libMyLib1.dylib:\n # libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0)\n # libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0)\n # /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0)\n # /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)\n set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE\n # SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)\n endif()\n if(CONAN_LINK_RUNTIME)\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})\n endif()\nendmacro()\n\nmacro(CONAN_OUTPUT_DIRS_SETUP)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\nendmacro()\n\nmacro(CONAN_SPLIT_VERSION VERSION_STRING MAJOR MINOR)\n #make a list from the version string\n string(REPLACE \".\" \";\" VERSION_LIST ${${VERSION_STRING}})\n\n #write output values\n list(GET VERSION_LIST 0 ${MAJOR})\n list(GET VERSION_LIST 1 ${MINOR})\nendmacro()\n\nmacro(ERROR_COMPILER_VERSION)\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'\"\n \" is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}=\"${VERSION_MAJOR}.${VERSION_MINOR}')\nendmacro()\n\nmacro(CHECK_COMPILER_VERSION)\n\n CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR)\n\n if(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\")\n # https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html\n if( (${CONAN_COMPILER_VERSION} STREQUAL \"14\" AND NOT ${VERSION_MAJOR} STREQUAL \"19\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"12\" AND NOT ${VERSION_MAJOR} STREQUAL \"18\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"11\" AND NOT ${VERSION_MAJOR} STREQUAL \"17\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"10\" AND NOT ${VERSION_MAJOR} STREQUAL \"16\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"9\" AND NOT ${VERSION_MAJOR} STREQUAL \"15\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"8\" AND NOT ${VERSION_MAJOR} STREQUAL \"14\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"7\" AND NOT ${VERSION_MAJOR} STREQUAL \"13\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"6\" AND NOT ${VERSION_MAJOR} STREQUAL \"12\") )\n ERROR_COMPILER_VERSION()\n endif()\n elseif(\"${CONAN_COMPILER}\" STREQUAL \"gcc\" OR \"${CONAN_COMPILER}\" MATCHES \"Clang\")\n if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL \"${CONAN_COMPILER_VERSION}\")\n ERROR_COMPILER_VERSION()\n endif()\n else()\n message(\"Skipping version checking of not detected compiler...\")\n endif()\nendmacro()\n\nmacro(CONAN_CHECK_COMPILER)\n if( (\"${CONAN_COMPILER}\" STREQUAL \"Visual Studio\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"gcc\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"apple-clang\" AND (NOT APPLE OR NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\")) OR\n (\"${CONAN_COMPILER}\" STREQUAL \"clang\" AND NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\") )\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'\")\n endif()\n CHECK_COMPILER_VERSION()\nendmacro()\n\"\"\"\n", "path": "conans/client/generators/cmake.py"}]}
3,136
113
gh_patches_debug_18289
rasdani/github-patches
git_diff
ivy-llc__ivy-15430
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fold_in --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/jax/random.py` Content: ``` 1 # local 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.jax.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 handle_jax_dtype, 7 ) 8 9 10 @to_ivy_arrays_and_back 11 def PRNGKey(seed): 12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64) 13 14 15 @handle_jax_dtype 16 @to_ivy_arrays_and_back 17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0): 18 return ivy.random_uniform( 19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]) 20 ) 21 22 23 @handle_jax_dtype 24 @to_ivy_arrays_and_back 25 def normal(key, shape=(), dtype=None): 26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) 27 28 29 def _get_seed(key): 30 key1, key2 = int(key[0]), int(key[1]) 31 return ivy.to_scalar(int("".join(map(str, [key1, key2])))) 32 33 34 @handle_jax_dtype 35 @to_ivy_arrays_and_back 36 @with_unsupported_dtypes( 37 { 38 "0.3.14 and below": ( 39 "float16", 40 "bfloat16", 41 ) 42 }, 43 "jax", 44 ) 45 def beta(key, a, b, shape=None, dtype=None): 46 seed = _get_seed(key) 47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed) 48 49 50 @handle_jax_dtype 51 @to_ivy_arrays_and_back 52 @with_unsupported_dtypes( 53 { 54 "0.3.14 and below": ( 55 "float16", 56 "bfloat16", 57 ) 58 }, 59 "jax", 60 ) 61 def dirichlet(key, alpha, shape=None, dtype="float32"): 62 seed = _get_seed(key) 63 alpha = ivy.astype(alpha, dtype) 64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed) 65 66 67 @handle_jax_dtype 68 @to_ivy_arrays_and_back 69 def cauchy(key, shape=(), dtype="float64"): 70 seed = _get_seed(key) 71 u = ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=seed) 72 return ivy.tan(ivy.pi * (u - 0.5)) 73 74 75 @handle_jax_dtype 76 @to_ivy_arrays_and_back 77 @with_unsupported_dtypes( 78 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 79 "jax", 80 ) 81 def poisson(key, lam, shape=None, dtype=None): 82 seed = _get_seed(key) 83 return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed) 84 85 86 @handle_jax_dtype 87 @to_ivy_arrays_and_back 88 @with_unsupported_dtypes( 89 { 90 "0.3.14 and below": ( 91 "float16", 92 "bfloat16", 93 ) 94 }, 95 "jax", 96 ) 97 def gamma(key, a, shape=None, dtype="float64"): 98 seed = _get_seed(key) 99 return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed) 100 101 102 @handle_jax_dtype 103 @to_ivy_arrays_and_back 104 @with_unsupported_dtypes( 105 { 106 "0.3.14 and below": ( 107 "float16", 108 "bfloat16", 109 ) 110 }, 111 "jax", 112 ) 113 def gumbel(key, shape=(), dtype="float64"): 114 seed = _get_seed(key) 115 uniform_x = ivy.random_uniform( 116 low=0.0, 117 high=1.0, 118 shape=shape, 119 dtype=dtype, 120 seed=seed, 121 ) 122 return -ivy.log(-ivy.log(uniform_x)) 123 124 125 @handle_jax_dtype 126 @to_ivy_arrays_and_back 127 @with_unsupported_dtypes( 128 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 129 "jax", 130 ) 131 def rademacher(key, shape, dtype="int64"): 132 seed = _get_seed(key) 133 b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype="float32", seed=seed) 134 b = ivy.astype(b, dtype) 135 return 2 * b - 1 136 137 138 @handle_jax_dtype 139 @to_ivy_arrays_and_back 140 @with_unsupported_dtypes( 141 { 142 "0.3.14 and below": ( 143 "float16", 144 "bfloat16", 145 ) 146 }, 147 "jax", 148 ) 149 def generalized_normal(key, p, shape=(), dtype="float64"): 150 seed = _get_seed(key) 151 g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed) 152 b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed) 153 r = 2 * b - 1 154 return r * g ** (1 / p) 155 156 157 def t(key, df, shape=(), dtype="float64"): 158 seed = _get_seed(key) 159 n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed) 160 half_df = df / 2.0 161 g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed) 162 return n * ivy.sqrt(ivy.divide(half_df, g)) 163 164 165 @handle_jax_dtype 166 @to_ivy_arrays_and_back 167 @with_unsupported_dtypes( 168 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 169 "jax", 170 ) 171 def randint(key, shape, minval, maxval, dtype="int64"): 172 seed = _get_seed(key) 173 return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed) 174 175 176 @to_ivy_arrays_and_back 177 def permutation(key, x, axis=0, independent=False): 178 x = ivy.array(x) 179 seed = _get_seed(key) 180 if not ivy.get_num_dims(x): 181 r = int(x) 182 return ivy.shuffle(ivy.arange(r), axis, seed=seed) 183 if independent: 184 return ivy.shuffle(x, axis, seed=seed) 185 rand = ivy.arange(x.shape[axis]) 186 ind = ivy.shuffle(rand, 0, seed=seed) 187 188 return ivy.gather(x, ind, axis=axis) 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py --- a/ivy/functional/frontends/jax/random.py +++ b/ivy/functional/frontends/jax/random.py @@ -172,6 +172,17 @@ seed = _get_seed(key) return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed) +@to_ivy_arrays_and_back +def bernoulli(key, p=0.5, shape=None): + seed = _get_seed(key) + return ivy.bernoulli(p, shape=shape, seed=seed) + +@to_ivy_arrays_and_back +def fold_in(key, data): + s = ivy.bitwise_left_shift( + ivy.asarray(data, dtype=ivy.uint32), ivy.array(32, dtype=ivy.uint32) + ) + return ivy.bitwise_xor(key, s) @to_ivy_arrays_and_back def permutation(key, x, axis=0, independent=False): @@ -184,5 +195,4 @@ return ivy.shuffle(x, axis, seed=seed) rand = ivy.arange(x.shape[axis]) ind = ivy.shuffle(rand, 0, seed=seed) - return ivy.gather(x, ind, axis=axis)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -172,6 +172,17 @@\n seed = _get_seed(key)\n return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed)\n \n+@to_ivy_arrays_and_back\n+def bernoulli(key, p=0.5, shape=None):\n+ seed = _get_seed(key)\n+ return ivy.bernoulli(p, shape=shape, seed=seed)\n+\n+@to_ivy_arrays_and_back\n+def fold_in(key, data):\n+ s = ivy.bitwise_left_shift(\n+ ivy.asarray(data, dtype=ivy.uint32), ivy.array(32, dtype=ivy.uint32)\n+ )\n+ return ivy.bitwise_xor(key, s)\n \n @to_ivy_arrays_and_back\n def permutation(key, x, axis=0, independent=False):\n@@ -184,5 +195,4 @@\n return ivy.shuffle(x, axis, seed=seed)\n rand = ivy.arange(x.shape[axis])\n ind = ivy.shuffle(rand, 0, seed=seed)\n-\n return ivy.gather(x, ind, axis=axis)\n", "issue": "fold_in\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef cauchy(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n u = ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=seed)\n return ivy.tan(ivy.pi * (u - 0.5))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gamma(key, a, shape=None, dtype=\"float64\"):\n seed = _get_seed(key)\n return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gumbel(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n uniform_x = ivy.random_uniform(\n low=0.0,\n high=1.0,\n shape=shape,\n dtype=dtype,\n seed=seed,\n )\n return -ivy.log(-ivy.log(uniform_x))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef rademacher(key, shape, dtype=\"int64\"):\n seed = _get_seed(key)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=\"float32\", seed=seed)\n b = ivy.astype(b, dtype)\n return 2 * b - 1\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef generalized_normal(key, p, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)\n r = 2 * b - 1\n return r * g ** (1 / p)\n\n\ndef t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n half_df = df / 2.0\n g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)\n return n * ivy.sqrt(ivy.divide(half_df, g))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef randint(key, shape, minval, maxval, dtype=\"int64\"):\n seed = _get_seed(key)\n return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed)\n\n\n@to_ivy_arrays_and_back\ndef permutation(key, x, axis=0, independent=False):\n x = ivy.array(x)\n seed = _get_seed(key)\n if not ivy.get_num_dims(x):\n r = int(x)\n return ivy.shuffle(ivy.arange(r), axis, seed=seed)\n if independent:\n return ivy.shuffle(x, axis, seed=seed)\n rand = ivy.arange(x.shape[axis])\n ind = ivy.shuffle(rand, 0, seed=seed)\n\n return ivy.gather(x, ind, axis=axis)\n", "path": "ivy/functional/frontends/jax/random.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef cauchy(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n u = ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=seed)\n return ivy.tan(ivy.pi * (u - 0.5))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gamma(key, a, shape=None, dtype=\"float64\"):\n seed = _get_seed(key)\n return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gumbel(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n uniform_x = ivy.random_uniform(\n low=0.0,\n high=1.0,\n shape=shape,\n dtype=dtype,\n seed=seed,\n )\n return -ivy.log(-ivy.log(uniform_x))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef rademacher(key, shape, dtype=\"int64\"):\n seed = _get_seed(key)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=\"float32\", seed=seed)\n b = ivy.astype(b, dtype)\n return 2 * b - 1\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef generalized_normal(key, p, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)\n r = 2 * b - 1\n return r * g ** (1 / p)\n\n\ndef t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n half_df = df / 2.0\n g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)\n return n * ivy.sqrt(ivy.divide(half_df, g))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef randint(key, shape, minval, maxval, dtype=\"int64\"):\n seed = _get_seed(key)\n return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed)\n\n@to_ivy_arrays_and_back\ndef bernoulli(key, p=0.5, shape=None):\n seed = _get_seed(key)\n return ivy.bernoulli(p, shape=shape, seed=seed)\n\n@to_ivy_arrays_and_back\ndef fold_in(key, data):\n s = ivy.bitwise_left_shift(\n ivy.asarray(data, dtype=ivy.uint32), ivy.array(32, dtype=ivy.uint32)\n )\n return ivy.bitwise_xor(key, s)\n\n@to_ivy_arrays_and_back\ndef permutation(key, x, axis=0, independent=False):\n x = ivy.array(x)\n seed = _get_seed(key)\n if not ivy.get_num_dims(x):\n r = int(x)\n return ivy.shuffle(ivy.arange(r), axis, seed=seed)\n if independent:\n return ivy.shuffle(x, axis, seed=seed)\n rand = ivy.arange(x.shape[axis])\n ind = ivy.shuffle(rand, 0, seed=seed)\n return ivy.gather(x, ind, axis=axis)\n", "path": "ivy/functional/frontends/jax/random.py"}]}
2,252
313
gh_patches_debug_23501
rasdani/github-patches
git_diff
hylang__hy-736
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Hy Repl's behaviour on multiline strings Hy Repl throws a `LexException` when attempting to write a multiline docstring. ``` => (defn a [] ... "This is a dummy fn that really does LexException: Could not identify the next token at line -1, column -1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hy/lex/parser.py` Content: ``` 1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a 4 # copy of this software and associated documentation files (the "Software"), 5 # to deal in the Software without restriction, including without limitation 6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 # and/or sell copies of the Software, and to permit persons to whom the 8 # Software is furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 import sys 22 from functools import wraps 23 24 from rply import ParserGenerator 25 26 from hy.models.complex import HyComplex 27 from hy.models.cons import HyCons 28 from hy.models.dict import HyDict 29 from hy.models.expression import HyExpression 30 from hy.models.float import HyFloat 31 from hy.models.integer import HyInteger 32 from hy.models.keyword import HyKeyword 33 from hy.models.list import HyList 34 from hy.models.string import HyString 35 from hy.models.symbol import HySymbol 36 37 from .lexer import lexer 38 from .exceptions import LexException, PrematureEndOfInput 39 40 41 pg = ParserGenerator( 42 [rule.name for rule in lexer.rules] + ['$end'], 43 cache_id="hy_parser" 44 ) 45 46 47 def set_boundaries(fun): 48 @wraps(fun) 49 def wrapped(p): 50 start = p[0].source_pos 51 end = p[-1].source_pos 52 ret = fun(p) 53 ret.start_line = start.lineno 54 ret.start_column = start.colno 55 if start is not end: 56 ret.end_line = end.lineno 57 ret.end_column = end.colno 58 else: 59 ret.end_line = start.lineno 60 ret.end_column = start.colno + len(p[0].value) 61 return ret 62 return wrapped 63 64 65 def set_quote_boundaries(fun): 66 @wraps(fun) 67 def wrapped(p): 68 start = p[0].source_pos 69 ret = fun(p) 70 ret.start_line = start.lineno 71 ret.start_column = start.colno 72 ret.end_line = p[-1].end_line 73 ret.end_column = p[-1].end_column 74 return ret 75 return wrapped 76 77 78 @pg.production("main : HASHBANG real_main") 79 def main_hashbang(p): 80 return p[1] 81 82 83 @pg.production("main : real_main") 84 def main(p): 85 return p[0] 86 87 88 @pg.production("real_main : list_contents") 89 def real_main(p): 90 return p[0] 91 92 93 @pg.production("real_main : $end") 94 def real_main_empty(p): 95 return [] 96 97 98 def reject_spurious_dots(*items): 99 "Reject the spurious dots from items" 100 for list in items: 101 for tok in list: 102 if tok == "." and type(tok) == HySymbol: 103 raise LexException("Malformed dotted list", 104 tok.start_line, tok.start_column) 105 106 107 @pg.production("paren : LPAREN list_contents RPAREN") 108 @set_boundaries 109 def paren(p): 110 cont = p[1] 111 112 # Dotted lists are expressions of the form 113 # (a b c . d) 114 # that evaluate to nested cons cells of the form 115 # (a . (b . (c . d))) 116 if len(cont) >= 3 and isinstance(cont[-2], HySymbol) and cont[-2] == ".": 117 118 reject_spurious_dots(cont[:-2], cont[-1:]) 119 120 if len(cont) == 3: 121 # Two-item dotted list: return the cons cell directly 122 return HyCons(cont[0], cont[2]) 123 else: 124 # Return a nested cons cell 125 return HyCons(cont[0], paren([p[0], cont[1:], p[2]])) 126 127 # Warn preemptively on a malformed dotted list. 128 # Only check for dots after the first item to allow for a potential 129 # attribute accessor shorthand 130 reject_spurious_dots(cont[1:]) 131 132 return HyExpression(p[1]) 133 134 135 @pg.production("paren : LPAREN RPAREN") 136 @set_boundaries 137 def empty_paren(p): 138 return HyExpression([]) 139 140 141 @pg.production("list_contents : term list_contents") 142 def list_contents(p): 143 return [p[0]] + p[1] 144 145 146 @pg.production("list_contents : term") 147 def list_contents_single(p): 148 return [p[0]] 149 150 151 @pg.production("term : identifier") 152 @pg.production("term : paren") 153 @pg.production("term : dict") 154 @pg.production("term : list") 155 @pg.production("term : string") 156 def term(p): 157 return p[0] 158 159 160 @pg.production("term : QUOTE term") 161 @set_quote_boundaries 162 def term_quote(p): 163 return HyExpression([HySymbol("quote"), p[1]]) 164 165 166 @pg.production("term : QUASIQUOTE term") 167 @set_quote_boundaries 168 def term_quasiquote(p): 169 return HyExpression([HySymbol("quasiquote"), p[1]]) 170 171 172 @pg.production("term : UNQUOTE term") 173 @set_quote_boundaries 174 def term_unquote(p): 175 return HyExpression([HySymbol("unquote"), p[1]]) 176 177 178 @pg.production("term : UNQUOTESPLICE term") 179 @set_quote_boundaries 180 def term_unquote_splice(p): 181 return HyExpression([HySymbol("unquote_splice"), p[1]]) 182 183 184 @pg.production("term : HASHREADER term") 185 @set_quote_boundaries 186 def hash_reader(p): 187 st = p[0].getstr()[1] 188 str_object = HyString(st) 189 expr = p[1] 190 return HyExpression([HySymbol("dispatch_reader_macro"), str_object, expr]) 191 192 193 @pg.production("dict : LCURLY list_contents RCURLY") 194 @set_boundaries 195 def t_dict(p): 196 return HyDict(p[1]) 197 198 199 @pg.production("dict : LCURLY RCURLY") 200 @set_boundaries 201 def empty_dict(p): 202 return HyDict([]) 203 204 205 @pg.production("list : LBRACKET list_contents RBRACKET") 206 @set_boundaries 207 def t_list(p): 208 return HyList(p[1]) 209 210 211 @pg.production("list : LBRACKET RBRACKET") 212 @set_boundaries 213 def t_empty_list(p): 214 return HyList([]) 215 216 217 if sys.version_info[0] >= 3: 218 def uni_hystring(s): 219 return HyString(eval(s)) 220 else: 221 def uni_hystring(s): 222 return HyString(eval('u'+s)) 223 224 225 @pg.production("string : STRING") 226 @set_boundaries 227 def t_string(p): 228 # remove trailing quote 229 s = p[0].value[:-1] 230 # get the header 231 header, s = s.split('"', 1) 232 # remove unicode marker 233 header = header.replace("u", "") 234 # build python string 235 s = header + '"""' + s + '"""' 236 return uni_hystring(s) 237 238 239 @pg.production("identifier : IDENTIFIER") 240 @set_boundaries 241 def t_identifier(p): 242 obj = p[0].value 243 244 try: 245 return HyInteger(obj) 246 except ValueError: 247 pass 248 249 try: 250 return HyFloat(obj) 251 except ValueError: 252 pass 253 254 if obj != 'j': 255 try: 256 return HyComplex(obj) 257 except ValueError: 258 pass 259 260 table = { 261 "true": "True", 262 "false": "False", 263 "nil": "None", 264 "null": "None", 265 } 266 267 if obj in table: 268 return HySymbol(table[obj]) 269 270 if obj.startswith(":"): 271 return HyKeyword(obj) 272 273 def mangle(p): 274 if p.startswith("*") and p.endswith("*") and p not in ("*", "**"): 275 p = p[1:-1].upper() 276 277 if "-" in p and p != "-": 278 p = p.replace("-", "_") 279 280 if p.endswith("?") and p != "?": 281 p = "is_%s" % (p[:-1]) 282 283 return p 284 285 obj = ".".join([mangle(part) for part in obj.split(".")]) 286 287 return HySymbol(obj) 288 289 290 @pg.error 291 def error_handler(token): 292 tokentype = token.gettokentype() 293 if tokentype == '$end': 294 raise PrematureEndOfInput("Premature end of input") 295 else: 296 raise LexException( 297 "Ran into a %s where it wasn't expected." % tokentype, 298 token.source_pos.lineno, token.source_pos.colno) 299 300 301 parser = pg.build() 302 ``` Path: `hy/lex/lexer.py` Content: ``` 1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a 4 # copy of this software and associated documentation files (the "Software"), 5 # to deal in the Software without restriction, including without limitation 6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 # and/or sell copies of the Software, and to permit persons to whom the 8 # Software is furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 from rply import LexerGenerator 22 23 24 lg = LexerGenerator() 25 26 27 # A regexp for something that should end a quoting/unquoting operator 28 # i.e. a space or a closing brace/paren/curly 29 end_quote = r'(?![\s\)\]\}])' 30 31 32 lg.add('LPAREN', r'\(') 33 lg.add('RPAREN', r'\)') 34 lg.add('LBRACKET', r'\[') 35 lg.add('RBRACKET', r'\]') 36 lg.add('LCURLY', r'\{') 37 lg.add('RCURLY', r'\}') 38 lg.add('QUOTE', r'\'%s' % end_quote) 39 lg.add('QUASIQUOTE', r'`%s' % end_quote) 40 lg.add('UNQUOTESPLICE', r'~@%s' % end_quote) 41 lg.add('UNQUOTE', r'~%s' % end_quote) 42 lg.add('HASHBANG', r'#!.*[^\r\n]') 43 lg.add('HASHREADER', r'#.') 44 45 46 lg.add('STRING', r'''(?x) 47 (?:u|r|ur|ru)? # prefix 48 " # start string 49 (?: 50 | [^"\\] # non-quote or backslash 51 | \\. # or escaped single character 52 | \\x[0-9a-fA-F]{2} # or escaped raw character 53 | \\u[0-9a-fA-F]{4} # or unicode escape 54 | \\U[0-9a-fA-F]{8} # or long unicode escape 55 )* # one or more times 56 " # end string 57 ''') 58 59 60 lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+') 61 62 63 lg.ignore(r';.*(?=\r|\n|$)') 64 lg.ignore(r'\s+') 65 66 67 lexer = lg.build() 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hy/lex/lexer.py b/hy/lex/lexer.py --- a/hy/lex/lexer.py +++ b/hy/lex/lexer.py @@ -42,8 +42,9 @@ lg.add('HASHBANG', r'#!.*[^\r\n]') lg.add('HASHREADER', r'#.') - -lg.add('STRING', r'''(?x) +# A regexp which matches incomplete strings, used to support +# multi-line strings in the interpreter +partial_string = r'''(?x) (?:u|r|ur|ru)? # prefix " # start string (?: @@ -53,9 +54,10 @@ | \\u[0-9a-fA-F]{4} # or unicode escape | \\U[0-9a-fA-F]{8} # or long unicode escape )* # one or more times - " # end string -''') +''' +lg.add('STRING', r'%s"' % partial_string) +lg.add('PARTIAL_STRING', partial_string) lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+') diff --git a/hy/lex/parser.py b/hy/lex/parser.py --- a/hy/lex/parser.py +++ b/hy/lex/parser.py @@ -236,6 +236,12 @@ return uni_hystring(s) [email protected]("string : PARTIAL_STRING") +def t_partial_string(p): + # Any unterminated string requires more input + raise PrematureEndOfInput("Premature end of input") + + @pg.production("identifier : IDENTIFIER") @set_boundaries def t_identifier(p):
{"golden_diff": "diff --git a/hy/lex/lexer.py b/hy/lex/lexer.py\n--- a/hy/lex/lexer.py\n+++ b/hy/lex/lexer.py\n@@ -42,8 +42,9 @@\n lg.add('HASHBANG', r'#!.*[^\\r\\n]')\n lg.add('HASHREADER', r'#.')\n \n-\n-lg.add('STRING', r'''(?x)\n+# A regexp which matches incomplete strings, used to support\n+# multi-line strings in the interpreter\n+partial_string = r'''(?x)\n (?:u|r|ur|ru)? # prefix\n \" # start string\n (?:\n@@ -53,9 +54,10 @@\n | \\\\u[0-9a-fA-F]{4} # or unicode escape\n | \\\\U[0-9a-fA-F]{8} # or long unicode escape\n )* # one or more times\n- \" # end string\n-''')\n+'''\n \n+lg.add('STRING', r'%s\"' % partial_string)\n+lg.add('PARTIAL_STRING', partial_string)\n \n lg.add('IDENTIFIER', r'[^()\\[\\]{}\\'\"\\s;]+')\n \ndiff --git a/hy/lex/parser.py b/hy/lex/parser.py\n--- a/hy/lex/parser.py\n+++ b/hy/lex/parser.py\n@@ -236,6 +236,12 @@\n return uni_hystring(s)\n \n \[email protected](\"string : PARTIAL_STRING\")\n+def t_partial_string(p):\n+ # Any unterminated string requires more input\n+ raise PrematureEndOfInput(\"Premature end of input\")\n+\n+\n @pg.production(\"identifier : IDENTIFIER\")\n @set_boundaries\n def t_identifier(p):\n", "issue": "Hy Repl's behaviour on multiline strings\nHy Repl throws a `LexException` when attempting to write a multiline docstring. \n\n```\n => (defn a []\n... \"This is a dummy fn that really does\nLexException: Could not identify the next token at line -1, column -1\n```\n\n", "before_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport sys\nfrom functools import wraps\n\nfrom rply import ParserGenerator\n\nfrom hy.models.complex import HyComplex\nfrom hy.models.cons import HyCons\nfrom hy.models.dict import HyDict\nfrom hy.models.expression import HyExpression\nfrom hy.models.float import HyFloat\nfrom hy.models.integer import HyInteger\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.list import HyList\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\n\nfrom .lexer import lexer\nfrom .exceptions import LexException, PrematureEndOfInput\n\n\npg = ParserGenerator(\n [rule.name for rule in lexer.rules] + ['$end'],\n cache_id=\"hy_parser\"\n)\n\n\ndef set_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n end = p[-1].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n if start is not end:\n ret.end_line = end.lineno\n ret.end_column = end.colno\n else:\n ret.end_line = start.lineno\n ret.end_column = start.colno + len(p[0].value)\n return ret\n return wrapped\n\n\ndef set_quote_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n ret.end_line = p[-1].end_line\n ret.end_column = p[-1].end_column\n return ret\n return wrapped\n\n\[email protected](\"main : HASHBANG real_main\")\ndef main_hashbang(p):\n return p[1]\n\n\[email protected](\"main : real_main\")\ndef main(p):\n return p[0]\n\n\[email protected](\"real_main : list_contents\")\ndef real_main(p):\n return p[0]\n\n\[email protected](\"real_main : $end\")\ndef real_main_empty(p):\n return []\n\n\ndef reject_spurious_dots(*items):\n \"Reject the spurious dots from items\"\n for list in items:\n for tok in list:\n if tok == \".\" and type(tok) == HySymbol:\n raise LexException(\"Malformed dotted list\",\n tok.start_line, tok.start_column)\n\n\[email protected](\"paren : LPAREN list_contents RPAREN\")\n@set_boundaries\ndef paren(p):\n cont = p[1]\n\n # Dotted lists are expressions of the form\n # (a b c . d)\n # that evaluate to nested cons cells of the form\n # (a . (b . (c . d)))\n if len(cont) >= 3 and isinstance(cont[-2], HySymbol) and cont[-2] == \".\":\n\n reject_spurious_dots(cont[:-2], cont[-1:])\n\n if len(cont) == 3:\n # Two-item dotted list: return the cons cell directly\n return HyCons(cont[0], cont[2])\n else:\n # Return a nested cons cell\n return HyCons(cont[0], paren([p[0], cont[1:], p[2]]))\n\n # Warn preemptively on a malformed dotted list.\n # Only check for dots after the first item to allow for a potential\n # attribute accessor shorthand\n reject_spurious_dots(cont[1:])\n\n return HyExpression(p[1])\n\n\[email protected](\"paren : LPAREN RPAREN\")\n@set_boundaries\ndef empty_paren(p):\n return HyExpression([])\n\n\[email protected](\"list_contents : term list_contents\")\ndef list_contents(p):\n return [p[0]] + p[1]\n\n\[email protected](\"list_contents : term\")\ndef list_contents_single(p):\n return [p[0]]\n\n\[email protected](\"term : identifier\")\[email protected](\"term : paren\")\[email protected](\"term : dict\")\[email protected](\"term : list\")\[email protected](\"term : string\")\ndef term(p):\n return p[0]\n\n\[email protected](\"term : QUOTE term\")\n@set_quote_boundaries\ndef term_quote(p):\n return HyExpression([HySymbol(\"quote\"), p[1]])\n\n\[email protected](\"term : QUASIQUOTE term\")\n@set_quote_boundaries\ndef term_quasiquote(p):\n return HyExpression([HySymbol(\"quasiquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTE term\")\n@set_quote_boundaries\ndef term_unquote(p):\n return HyExpression([HySymbol(\"unquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTESPLICE term\")\n@set_quote_boundaries\ndef term_unquote_splice(p):\n return HyExpression([HySymbol(\"unquote_splice\"), p[1]])\n\n\[email protected](\"term : HASHREADER term\")\n@set_quote_boundaries\ndef hash_reader(p):\n st = p[0].getstr()[1]\n str_object = HyString(st)\n expr = p[1]\n return HyExpression([HySymbol(\"dispatch_reader_macro\"), str_object, expr])\n\n\[email protected](\"dict : LCURLY list_contents RCURLY\")\n@set_boundaries\ndef t_dict(p):\n return HyDict(p[1])\n\n\[email protected](\"dict : LCURLY RCURLY\")\n@set_boundaries\ndef empty_dict(p):\n return HyDict([])\n\n\[email protected](\"list : LBRACKET list_contents RBRACKET\")\n@set_boundaries\ndef t_list(p):\n return HyList(p[1])\n\n\[email protected](\"list : LBRACKET RBRACKET\")\n@set_boundaries\ndef t_empty_list(p):\n return HyList([])\n\n\nif sys.version_info[0] >= 3:\n def uni_hystring(s):\n return HyString(eval(s))\nelse:\n def uni_hystring(s):\n return HyString(eval('u'+s))\n\n\[email protected](\"string : STRING\")\n@set_boundaries\ndef t_string(p):\n # remove trailing quote\n s = p[0].value[:-1]\n # get the header\n header, s = s.split('\"', 1)\n # remove unicode marker\n header = header.replace(\"u\", \"\")\n # build python string\n s = header + '\"\"\"' + s + '\"\"\"'\n return uni_hystring(s)\n\n\[email protected](\"identifier : IDENTIFIER\")\n@set_boundaries\ndef t_identifier(p):\n obj = p[0].value\n\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n if obj != 'j':\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"nil\": \"None\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n def mangle(p):\n if p.startswith(\"*\") and p.endswith(\"*\") and p not in (\"*\", \"**\"):\n p = p[1:-1].upper()\n\n if \"-\" in p and p != \"-\":\n p = p.replace(\"-\", \"_\")\n\n if p.endswith(\"?\") and p != \"?\":\n p = \"is_%s\" % (p[:-1])\n\n return p\n\n obj = \".\".join([mangle(part) for part in obj.split(\".\")])\n\n return HySymbol(obj)\n\n\[email protected]\ndef error_handler(token):\n tokentype = token.gettokentype()\n if tokentype == '$end':\n raise PrematureEndOfInput(\"Premature end of input\")\n else:\n raise LexException(\n \"Ran into a %s where it wasn't expected.\" % tokentype,\n token.source_pos.lineno, token.source_pos.colno)\n\n\nparser = pg.build()\n", "path": "hy/lex/parser.py"}, {"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rply import LexerGenerator\n\n\nlg = LexerGenerator()\n\n\n# A regexp for something that should end a quoting/unquoting operator\n# i.e. a space or a closing brace/paren/curly\nend_quote = r'(?![\\s\\)\\]\\}])'\n\n\nlg.add('LPAREN', r'\\(')\nlg.add('RPAREN', r'\\)')\nlg.add('LBRACKET', r'\\[')\nlg.add('RBRACKET', r'\\]')\nlg.add('LCURLY', r'\\{')\nlg.add('RCURLY', r'\\}')\nlg.add('QUOTE', r'\\'%s' % end_quote)\nlg.add('QUASIQUOTE', r'`%s' % end_quote)\nlg.add('UNQUOTESPLICE', r'~@%s' % end_quote)\nlg.add('UNQUOTE', r'~%s' % end_quote)\nlg.add('HASHBANG', r'#!.*[^\\r\\n]')\nlg.add('HASHREADER', r'#.')\n\n\nlg.add('STRING', r'''(?x)\n (?:u|r|ur|ru)? # prefix\n \" # start string\n (?:\n | [^\"\\\\] # non-quote or backslash\n | \\\\. # or escaped single character\n | \\\\x[0-9a-fA-F]{2} # or escaped raw character\n | \\\\u[0-9a-fA-F]{4} # or unicode escape\n | \\\\U[0-9a-fA-F]{8} # or long unicode escape\n )* # one or more times\n \" # end string\n''')\n\n\nlg.add('IDENTIFIER', r'[^()\\[\\]{}\\'\"\\s;]+')\n\n\nlg.ignore(r';.*(?=\\r|\\n|$)')\nlg.ignore(r'\\s+')\n\n\nlexer = lg.build()\n", "path": "hy/lex/lexer.py"}], "after_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport sys\nfrom functools import wraps\n\nfrom rply import ParserGenerator\n\nfrom hy.models.complex import HyComplex\nfrom hy.models.cons import HyCons\nfrom hy.models.dict import HyDict\nfrom hy.models.expression import HyExpression\nfrom hy.models.float import HyFloat\nfrom hy.models.integer import HyInteger\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.list import HyList\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\n\nfrom .lexer import lexer\nfrom .exceptions import LexException, PrematureEndOfInput\n\n\npg = ParserGenerator(\n [rule.name for rule in lexer.rules] + ['$end'],\n cache_id=\"hy_parser\"\n)\n\n\ndef set_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n end = p[-1].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n if start is not end:\n ret.end_line = end.lineno\n ret.end_column = end.colno\n else:\n ret.end_line = start.lineno\n ret.end_column = start.colno + len(p[0].value)\n return ret\n return wrapped\n\n\ndef set_quote_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n ret.end_line = p[-1].end_line\n ret.end_column = p[-1].end_column\n return ret\n return wrapped\n\n\[email protected](\"main : HASHBANG real_main\")\ndef main_hashbang(p):\n return p[1]\n\n\[email protected](\"main : real_main\")\ndef main(p):\n return p[0]\n\n\[email protected](\"real_main : list_contents\")\ndef real_main(p):\n return p[0]\n\n\[email protected](\"real_main : $end\")\ndef real_main_empty(p):\n return []\n\n\ndef reject_spurious_dots(*items):\n \"Reject the spurious dots from items\"\n for list in items:\n for tok in list:\n if tok == \".\" and type(tok) == HySymbol:\n raise LexException(\"Malformed dotted list\",\n tok.start_line, tok.start_column)\n\n\[email protected](\"paren : LPAREN list_contents RPAREN\")\n@set_boundaries\ndef paren(p):\n cont = p[1]\n\n # Dotted lists are expressions of the form\n # (a b c . d)\n # that evaluate to nested cons cells of the form\n # (a . (b . (c . d)))\n if len(cont) >= 3 and isinstance(cont[-2], HySymbol) and cont[-2] == \".\":\n\n reject_spurious_dots(cont[:-2], cont[-1:])\n\n if len(cont) == 3:\n # Two-item dotted list: return the cons cell directly\n return HyCons(cont[0], cont[2])\n else:\n # Return a nested cons cell\n return HyCons(cont[0], paren([p[0], cont[1:], p[2]]))\n\n # Warn preemptively on a malformed dotted list.\n # Only check for dots after the first item to allow for a potential\n # attribute accessor shorthand\n reject_spurious_dots(cont[1:])\n\n return HyExpression(p[1])\n\n\[email protected](\"paren : LPAREN RPAREN\")\n@set_boundaries\ndef empty_paren(p):\n return HyExpression([])\n\n\[email protected](\"list_contents : term list_contents\")\ndef list_contents(p):\n return [p[0]] + p[1]\n\n\[email protected](\"list_contents : term\")\ndef list_contents_single(p):\n return [p[0]]\n\n\[email protected](\"term : identifier\")\[email protected](\"term : paren\")\[email protected](\"term : dict\")\[email protected](\"term : list\")\[email protected](\"term : string\")\ndef term(p):\n return p[0]\n\n\[email protected](\"term : QUOTE term\")\n@set_quote_boundaries\ndef term_quote(p):\n return HyExpression([HySymbol(\"quote\"), p[1]])\n\n\[email protected](\"term : QUASIQUOTE term\")\n@set_quote_boundaries\ndef term_quasiquote(p):\n return HyExpression([HySymbol(\"quasiquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTE term\")\n@set_quote_boundaries\ndef term_unquote(p):\n return HyExpression([HySymbol(\"unquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTESPLICE term\")\n@set_quote_boundaries\ndef term_unquote_splice(p):\n return HyExpression([HySymbol(\"unquote_splice\"), p[1]])\n\n\[email protected](\"term : HASHREADER term\")\n@set_quote_boundaries\ndef hash_reader(p):\n st = p[0].getstr()[1]\n str_object = HyString(st)\n expr = p[1]\n return HyExpression([HySymbol(\"dispatch_reader_macro\"), str_object, expr])\n\n\[email protected](\"dict : LCURLY list_contents RCURLY\")\n@set_boundaries\ndef t_dict(p):\n return HyDict(p[1])\n\n\[email protected](\"dict : LCURLY RCURLY\")\n@set_boundaries\ndef empty_dict(p):\n return HyDict([])\n\n\[email protected](\"list : LBRACKET list_contents RBRACKET\")\n@set_boundaries\ndef t_list(p):\n return HyList(p[1])\n\n\[email protected](\"list : LBRACKET RBRACKET\")\n@set_boundaries\ndef t_empty_list(p):\n return HyList([])\n\n\nif sys.version_info[0] >= 3:\n def uni_hystring(s):\n return HyString(eval(s))\nelse:\n def uni_hystring(s):\n return HyString(eval('u'+s))\n\n\[email protected](\"string : STRING\")\n@set_boundaries\ndef t_string(p):\n # remove trailing quote\n s = p[0].value[:-1]\n # get the header\n header, s = s.split('\"', 1)\n # remove unicode marker\n header = header.replace(\"u\", \"\")\n # build python string\n s = header + '\"\"\"' + s + '\"\"\"'\n return uni_hystring(s)\n\n\[email protected](\"string : PARTIAL_STRING\")\ndef t_partial_string(p):\n # Any unterminated string requires more input\n raise PrematureEndOfInput(\"Premature end of input\")\n\n\[email protected](\"identifier : IDENTIFIER\")\n@set_boundaries\ndef t_identifier(p):\n obj = p[0].value\n\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n if obj != 'j':\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"nil\": \"None\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n def mangle(p):\n if p.startswith(\"*\") and p.endswith(\"*\") and p not in (\"*\", \"**\"):\n p = p[1:-1].upper()\n\n if \"-\" in p and p != \"-\":\n p = p.replace(\"-\", \"_\")\n\n if p.endswith(\"?\") and p != \"?\":\n p = \"is_%s\" % (p[:-1])\n\n return p\n\n obj = \".\".join([mangle(part) for part in obj.split(\".\")])\n\n return HySymbol(obj)\n\n\[email protected]\ndef error_handler(token):\n tokentype = token.gettokentype()\n if tokentype == '$end':\n raise PrematureEndOfInput(\"Premature end of input\")\n else:\n raise LexException(\n \"Ran into a %s where it wasn't expected.\" % tokentype,\n token.source_pos.lineno, token.source_pos.colno)\n\n\nparser = pg.build()\n", "path": "hy/lex/parser.py"}, {"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rply import LexerGenerator\n\n\nlg = LexerGenerator()\n\n\n# A regexp for something that should end a quoting/unquoting operator\n# i.e. a space or a closing brace/paren/curly\nend_quote = r'(?![\\s\\)\\]\\}])'\n\n\nlg.add('LPAREN', r'\\(')\nlg.add('RPAREN', r'\\)')\nlg.add('LBRACKET', r'\\[')\nlg.add('RBRACKET', r'\\]')\nlg.add('LCURLY', r'\\{')\nlg.add('RCURLY', r'\\}')\nlg.add('QUOTE', r'\\'%s' % end_quote)\nlg.add('QUASIQUOTE', r'`%s' % end_quote)\nlg.add('UNQUOTESPLICE', r'~@%s' % end_quote)\nlg.add('UNQUOTE', r'~%s' % end_quote)\nlg.add('HASHBANG', r'#!.*[^\\r\\n]')\nlg.add('HASHREADER', r'#.')\n\n# A regexp which matches incomplete strings, used to support\n# multi-line strings in the interpreter\npartial_string = r'''(?x)\n (?:u|r|ur|ru)? # prefix\n \" # start string\n (?:\n | [^\"\\\\] # non-quote or backslash\n | \\\\. # or escaped single character\n | \\\\x[0-9a-fA-F]{2} # or escaped raw character\n | \\\\u[0-9a-fA-F]{4} # or unicode escape\n | \\\\U[0-9a-fA-F]{8} # or long unicode escape\n )* # one or more times\n'''\n\nlg.add('STRING', r'%s\"' % partial_string)\nlg.add('PARTIAL_STRING', partial_string)\n\nlg.add('IDENTIFIER', r'[^()\\[\\]{}\\'\"\\s;]+')\n\n\nlg.ignore(r';.*(?=\\r|\\n|$)')\nlg.ignore(r'\\s+')\n\n\nlexer = lg.build()\n", "path": "hy/lex/lexer.py"}]}
3,927
391
gh_patches_debug_22756
rasdani/github-patches
git_diff
streamlit__streamlit-929
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make it easy to screencast a Streamlit app See spec here: https://docs.google.com/presentation/d/18bNul9a6rjScGhxRmGbZbCcU3uYp_b3ckxA9DVFJlKM/edit And see a crappy demo I wrote with some code you can steal: https://gist.github.com/tvst/c114620cf36b77732d5d67f411c55f12 Questions: * What browsers support this? * Can we record as mp4 / h264? * Can we record in a format that works in both Windows and Mac without extra installs? (Linux is not a problem -- users know how to open video in different formats) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `e2e/scripts/st_latex.py` Content: ``` 1 import streamlit as st 2 3 st.latex(r"\LaTeX") 4 5 try: 6 import sympy 7 8 a, b = sympy.symbols("a b") 9 out = a + b 10 except: 11 out = "a + b" 12 13 st.latex(out) 14 ``` Path: `e2e/scripts/st_chart_utc_time.py` Content: ``` 1 from datetime import date 2 3 import pandas as pd 4 import streamlit as st 5 6 df = pd.DataFrame( 7 { 8 "index": [ 9 date(2019, 8, 9), 10 date(2019, 8, 10), 11 date(2019, 8, 11), 12 date(2019, 8, 12), 13 ], 14 "numbers": [10, 50, 30, 40], 15 } 16 ) 17 18 df.set_index("index", inplace=True) 19 20 # st.area/bar/line_chart all use Altair/Vega-Lite under the hood. 21 # By default, Vega-Lite displays time values in the browser's local 22 # time zone. In `altair.generate_chart`, we explicitly set the time 23 # display to UTC, so that our results are consistent. This test verifies 24 # that change! 25 st.area_chart(df) 26 st.bar_chart(df) 27 st.line_chart(df) 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/e2e/scripts/st_chart_utc_time.py b/e2e/scripts/st_chart_utc_time.py --- a/e2e/scripts/st_chart_utc_time.py +++ b/e2e/scripts/st_chart_utc_time.py @@ -1,3 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright 2018-2020 Streamlit Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from datetime import date import pandas as pd diff --git a/e2e/scripts/st_latex.py b/e2e/scripts/st_latex.py --- a/e2e/scripts/st_latex.py +++ b/e2e/scripts/st_latex.py @@ -1,3 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright 2018-2020 Streamlit Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import streamlit as st st.latex(r"\LaTeX")
{"golden_diff": "diff --git a/e2e/scripts/st_chart_utc_time.py b/e2e/scripts/st_chart_utc_time.py\n--- a/e2e/scripts/st_chart_utc_time.py\n+++ b/e2e/scripts/st_chart_utc_time.py\n@@ -1,3 +1,18 @@\n+# -*- coding: utf-8 -*-\n+# Copyright 2018-2020 Streamlit Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n from datetime import date\n \n import pandas as pd\ndiff --git a/e2e/scripts/st_latex.py b/e2e/scripts/st_latex.py\n--- a/e2e/scripts/st_latex.py\n+++ b/e2e/scripts/st_latex.py\n@@ -1,3 +1,18 @@\n+# -*- coding: utf-8 -*-\n+# Copyright 2018-2020 Streamlit Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import streamlit as st\n \n st.latex(r\"\\LaTeX\")\n", "issue": "Make it easy to screencast a Streamlit app\nSee spec here: https://docs.google.com/presentation/d/18bNul9a6rjScGhxRmGbZbCcU3uYp_b3ckxA9DVFJlKM/edit\r\n\r\nAnd see a crappy demo I wrote with some code you can steal:\r\nhttps://gist.github.com/tvst/c114620cf36b77732d5d67f411c55f12\r\n\r\nQuestions:\r\n* What browsers support this?\r\n* Can we record as mp4 / h264?\r\n* Can we record in a format that works in both Windows and Mac without extra installs? (Linux is not a problem -- users know how to open video in different formats)\n", "before_files": [{"content": "import streamlit as st\n\nst.latex(r\"\\LaTeX\")\n\ntry:\n import sympy\n\n a, b = sympy.symbols(\"a b\")\n out = a + b\nexcept:\n out = \"a + b\"\n\nst.latex(out)\n", "path": "e2e/scripts/st_latex.py"}, {"content": "from datetime import date\n\nimport pandas as pd\nimport streamlit as st\n\ndf = pd.DataFrame(\n {\n \"index\": [\n date(2019, 8, 9),\n date(2019, 8, 10),\n date(2019, 8, 11),\n date(2019, 8, 12),\n ],\n \"numbers\": [10, 50, 30, 40],\n }\n)\n\ndf.set_index(\"index\", inplace=True)\n\n# st.area/bar/line_chart all use Altair/Vega-Lite under the hood.\n# By default, Vega-Lite displays time values in the browser's local\n# time zone. In `altair.generate_chart`, we explicitly set the time\n# display to UTC, so that our results are consistent. This test verifies\n# that change!\nst.area_chart(df)\nst.bar_chart(df)\nst.line_chart(df)\n", "path": "e2e/scripts/st_chart_utc_time.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.latex(r\"\\LaTeX\")\n\ntry:\n import sympy\n\n a, b = sympy.symbols(\"a b\")\n out = a + b\nexcept:\n out = \"a + b\"\n\nst.latex(out)\n", "path": "e2e/scripts/st_latex.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import date\n\nimport pandas as pd\nimport streamlit as st\n\ndf = pd.DataFrame(\n {\n \"index\": [\n date(2019, 8, 9),\n date(2019, 8, 10),\n date(2019, 8, 11),\n date(2019, 8, 12),\n ],\n \"numbers\": [10, 50, 30, 40],\n }\n)\n\ndf.set_index(\"index\", inplace=True)\n\n# st.area/bar/line_chart all use Altair/Vega-Lite under the hood.\n# By default, Vega-Lite displays time values in the browser's local\n# time zone. In `altair.generate_chart`, we explicitly set the time\n# display to UTC, so that our results are consistent. This test verifies\n# that change!\nst.area_chart(df)\nst.bar_chart(df)\nst.line_chart(df)\n", "path": "e2e/scripts/st_chart_utc_time.py"}]}
791
434
gh_patches_debug_8419
rasdani/github-patches
git_diff
searxng__searxng-2830
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: internetarchivescholar engine **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/searxng/searxng Branch: master Version: 2023.9.19+3ac7c40b6 <!-- Check if these values are correct --> **How did you install SearXNG?** <!-- Did you install SearXNG using the official wiki or using searxng-docker or manually by executing the searx/webapp.py file? --> **What happened?** <!-- A clear and concise description of what the bug is. --> **How To Reproduce** <!-- How can we reproduce this issue? (as minimally and as precisely as possible) --> **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> **Technical report** Error * Error: KeyError * Percentage: 25 * Parameters: `()` * File name: `searx/engines/internet_archive_scholar.py:59` * Function: `response` * Code: `'title': result['biblio']['title'],` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/engines/internet_archive_scholar.py` Content: ``` 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """Internet Archive scholar(science) 4 """ 5 6 from datetime import datetime 7 from urllib.parse import urlencode 8 from searx.utils import html_to_text 9 10 about = { 11 "website": "https://scholar.archive.org/", 12 "wikidata_id": "Q115667709", 13 "official_api_documentation": "https://scholar.archive.org/api/redoc", 14 "use_official_api": True, 15 "require_api_key": False, 16 "results": "JSON", 17 } 18 categories = ['science', 'scientific publications'] 19 paging = True 20 21 base_url = "https://scholar.archive.org" 22 results_per_page = 15 23 24 25 def request(query, params): 26 args = { 27 "q": query, 28 "limit": results_per_page, 29 "offset": (params["pageno"] - 1) * results_per_page, 30 } 31 params["url"] = f"{base_url}/search?{urlencode(args)}" 32 params["headers"]["Accept"] = "application/json" 33 return params 34 35 36 def response(resp): 37 results = [] 38 39 json = resp.json() 40 41 for result in json["results"]: 42 publishedDate, content, doi = None, '', None 43 44 if result['biblio'].get('release_date'): 45 publishedDate = datetime.strptime(result['biblio']['release_date'], "%Y-%m-%d") 46 47 if len(result['abstracts']) > 0: 48 content = result['abstracts'][0].get('body') 49 elif len(result['_highlights']) > 0: 50 content = result['_highlights'][0] 51 52 if len(result['releases']) > 0: 53 doi = result['releases'][0].get('doi') 54 55 results.append( 56 { 57 'template': 'paper.html', 58 'url': result['fulltext']['access_url'], 59 'title': result['biblio']['title'], 60 'content': html_to_text(content), 61 'publisher': result['biblio'].get('publisher'), 62 'doi': doi, 63 'journal': result['biblio'].get('container_name'), 64 'authors': result['biblio'].get('contrib_names'), 65 'tags': result['tags'], 66 'publishedDate': publishedDate, 67 'issns': result['biblio'].get('issns'), 68 'pdf_url': result['fulltext'].get('access_url'), 69 } 70 ) 71 72 return results 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/engines/internet_archive_scholar.py b/searx/engines/internet_archive_scholar.py --- a/searx/engines/internet_archive_scholar.py +++ b/searx/engines/internet_archive_scholar.py @@ -56,7 +56,7 @@ { 'template': 'paper.html', 'url': result['fulltext']['access_url'], - 'title': result['biblio']['title'], + 'title': result['biblio'].get('title') or result['biblio'].get('container_name'), 'content': html_to_text(content), 'publisher': result['biblio'].get('publisher'), 'doi': doi,
{"golden_diff": "diff --git a/searx/engines/internet_archive_scholar.py b/searx/engines/internet_archive_scholar.py\n--- a/searx/engines/internet_archive_scholar.py\n+++ b/searx/engines/internet_archive_scholar.py\n@@ -56,7 +56,7 @@\n {\n 'template': 'paper.html',\n 'url': result['fulltext']['access_url'],\n- 'title': result['biblio']['title'],\n+ 'title': result['biblio'].get('title') or result['biblio'].get('container_name'),\n 'content': html_to_text(content),\n 'publisher': result['biblio'].get('publisher'),\n 'doi': doi,\n", "issue": "Bug: internetarchivescholar engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.9.19+3ac7c40b6\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: KeyError\r\n * Percentage: 25\r\n * Parameters: `()`\r\n * File name: `searx/engines/internet_archive_scholar.py:59`\r\n * Function: `response`\r\n * Code: `'title': result['biblio']['title'],`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Internet Archive scholar(science)\n\"\"\"\n\nfrom datetime import datetime\nfrom urllib.parse import urlencode\nfrom searx.utils import html_to_text\n\nabout = {\n \"website\": \"https://scholar.archive.org/\",\n \"wikidata_id\": \"Q115667709\",\n \"official_api_documentation\": \"https://scholar.archive.org/api/redoc\",\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\ncategories = ['science', 'scientific publications']\npaging = True\n\nbase_url = \"https://scholar.archive.org\"\nresults_per_page = 15\n\n\ndef request(query, params):\n args = {\n \"q\": query,\n \"limit\": results_per_page,\n \"offset\": (params[\"pageno\"] - 1) * results_per_page,\n }\n params[\"url\"] = f\"{base_url}/search?{urlencode(args)}\"\n params[\"headers\"][\"Accept\"] = \"application/json\"\n return params\n\n\ndef response(resp):\n results = []\n\n json = resp.json()\n\n for result in json[\"results\"]:\n publishedDate, content, doi = None, '', None\n\n if result['biblio'].get('release_date'):\n publishedDate = datetime.strptime(result['biblio']['release_date'], \"%Y-%m-%d\")\n\n if len(result['abstracts']) > 0:\n content = result['abstracts'][0].get('body')\n elif len(result['_highlights']) > 0:\n content = result['_highlights'][0]\n\n if len(result['releases']) > 0:\n doi = result['releases'][0].get('doi')\n\n results.append(\n {\n 'template': 'paper.html',\n 'url': result['fulltext']['access_url'],\n 'title': result['biblio']['title'],\n 'content': html_to_text(content),\n 'publisher': result['biblio'].get('publisher'),\n 'doi': doi,\n 'journal': result['biblio'].get('container_name'),\n 'authors': result['biblio'].get('contrib_names'),\n 'tags': result['tags'],\n 'publishedDate': publishedDate,\n 'issns': result['biblio'].get('issns'),\n 'pdf_url': result['fulltext'].get('access_url'),\n }\n )\n\n return results\n", "path": "searx/engines/internet_archive_scholar.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Internet Archive scholar(science)\n\"\"\"\n\nfrom datetime import datetime\nfrom urllib.parse import urlencode\nfrom searx.utils import html_to_text\n\nabout = {\n \"website\": \"https://scholar.archive.org/\",\n \"wikidata_id\": \"Q115667709\",\n \"official_api_documentation\": \"https://scholar.archive.org/api/redoc\",\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\ncategories = ['science', 'scientific publications']\npaging = True\n\nbase_url = \"https://scholar.archive.org\"\nresults_per_page = 15\n\n\ndef request(query, params):\n args = {\n \"q\": query,\n \"limit\": results_per_page,\n \"offset\": (params[\"pageno\"] - 1) * results_per_page,\n }\n params[\"url\"] = f\"{base_url}/search?{urlencode(args)}\"\n params[\"headers\"][\"Accept\"] = \"application/json\"\n return params\n\n\ndef response(resp):\n results = []\n\n json = resp.json()\n\n for result in json[\"results\"]:\n publishedDate, content, doi = None, '', None\n\n if result['biblio'].get('release_date'):\n publishedDate = datetime.strptime(result['biblio']['release_date'], \"%Y-%m-%d\")\n\n if len(result['abstracts']) > 0:\n content = result['abstracts'][0].get('body')\n elif len(result['_highlights']) > 0:\n content = result['_highlights'][0]\n\n if len(result['releases']) > 0:\n doi = result['releases'][0].get('doi')\n\n results.append(\n {\n 'template': 'paper.html',\n 'url': result['fulltext']['access_url'],\n 'title': result['biblio'].get('title') or result['biblio'].get('container_name'),\n 'content': html_to_text(content),\n 'publisher': result['biblio'].get('publisher'),\n 'doi': doi,\n 'journal': result['biblio'].get('container_name'),\n 'authors': result['biblio'].get('contrib_names'),\n 'tags': result['tags'],\n 'publishedDate': publishedDate,\n 'issns': result['biblio'].get('issns'),\n 'pdf_url': result['fulltext'].get('access_url'),\n }\n )\n\n return results\n", "path": "searx/engines/internet_archive_scholar.py"}]}
1,246
163
gh_patches_debug_21006
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-2983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Docker 1.13-dev: Multi-User Setup, can't change anything with to document details ### Description Hi, I am pretty new to paperless. I wanted to start directly with the multi-user feature for my whole family. So I decided to install the dev-docker image. It runs quote nice, there is just this one thing... When a document has permission set to owner: Person A and read permission to Person B. I cannot change anything within the document e.g. Name, Permission, Tags, Document type etc. When I try to do so, there is just this error: `Error saving document: Http failure response for https://example.com/api/documents/23/: 500 OK` The change thru the webui won't be saved. When I remove the permission (read-permission to Person B) everything is working thru the webui. When I put the permission back, the error comes back as well. BTW I can change things without a problem within the Django admin panel. ### Steps to reproduce 1. Have a document 2. Have different users 3. Share this document as an owner to a second person 4. Error pops up when trying to change information on the document 5. remove permission, leave owner 6. Document can be changed 7. give read permission to the second person again 8. same error comes back ### Webserver logs ```bash [2023-03-31 22:13:36,819] [ERROR] [django.request] Internal Server Error: /api/documents/23/ Traceback (most recent call last): File "/usr/local/lib/python3.9/site-packages/asgiref/sync.py", line 486, in thread_handler raise exc_info[1] File "/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 43, in inner response = await get_response(request) File "/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py", line 253, in _get_response_async response = await wrapped_callback( File "/usr/local/lib/python3.9/site-packages/asgiref/sync.py", line 448, in __call__ ret = await asyncio.wait_for(future, timeout=None) File "/usr/local/lib/python3.9/asyncio/tasks.py", line 442, in wait_for return await fut File "/usr/local/lib/python3.9/site-packages/asgiref/current_thread_executor.py", line 22, in run result = self.fn(*self.args, **self.kwargs) File "/usr/local/lib/python3.9/site-packages/asgiref/sync.py", line 490, in thread_handler return func(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py", line 55, in wrapped_view return view_func(*args, **kwargs) File "/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py", line 125, in view return self.dispatch(request, *args, **kwargs) File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) File "/usr/src/paperless/src/documents/views.py", line 278, in update response = super().update(request, *args, **kwargs) File "/usr/local/lib/python3.9/site-packages/rest_framework/mixins.py", line 68, in update self.perform_update(serializer) File "/usr/local/lib/python3.9/site-packages/rest_framework/mixins.py", line 78, in perform_update serializer.save() File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 207, in save self.instance = self.update(self.instance, validated_data) File "/usr/src/paperless/src/documents/serialisers.py", line 419, in update super().update(instance, validated_data) File "/usr/src/paperless/src/documents/serialisers.py", line 210, in update self._set_permissions(validated_data["set_permissions"], instance) File "/usr/src/paperless/src/documents/serialisers.py", line 149, in _set_permissions set_permissions_for_object(permissions, object) File "/usr/src/paperless/src/documents/permissions.py", line 70, in set_permissions_for_object if len(users_to_remove) > 0: File "/usr/local/lib/python3.9/site-packages/django/db/models/query.py", line 376, in __len__ self._fetch_all() File "/usr/local/lib/python3.9/site-packages/django/db/models/query.py", line 1867, in _fetch_all self._result_cache = list(self._iterable_class(self)) File "/usr/local/lib/python3.9/site-packages/django/db/models/query.py", line 87, in __iter__ results = compiler.execute_sql( File "/usr/local/lib/python3.9/site-packages/django/db/models/sql/compiler.py", line 1385, in execute_sql sql, params = self.as_sql() File "/usr/local/lib/python3.9/site-packages/django/db/models/sql/compiler.py", line 603, in as_sql raise NotSupportedError( django.db.utils.NotSupportedError: difference is not supported on this database backend. ``` ### Browser logs _No response_ ### Paperless-ngx version 1.13.0-dev ### Host OS Docker@Ubuntu 22.04 ### Installation method Docker - official image ### Browser Safari ### Configuration changes _No response_ ### Other _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/documents/permissions.py` Content: ``` 1 from django.contrib.auth.models import Group 2 from django.contrib.auth.models import Permission 3 from django.contrib.auth.models import User 4 from django.contrib.contenttypes.models import ContentType 5 from guardian.models import GroupObjectPermission 6 from guardian.shortcuts import assign_perm 7 from guardian.shortcuts import get_users_with_perms 8 from guardian.shortcuts import remove_perm 9 from rest_framework.permissions import BasePermission 10 from rest_framework.permissions import DjangoObjectPermissions 11 12 13 class PaperlessObjectPermissions(DjangoObjectPermissions): 14 """ 15 A permissions backend that checks for object-level permissions 16 or for ownership. 17 """ 18 19 perms_map = { 20 "GET": ["%(app_label)s.view_%(model_name)s"], 21 "OPTIONS": ["%(app_label)s.view_%(model_name)s"], 22 "HEAD": ["%(app_label)s.view_%(model_name)s"], 23 "POST": ["%(app_label)s.add_%(model_name)s"], 24 "PUT": ["%(app_label)s.change_%(model_name)s"], 25 "PATCH": ["%(app_label)s.change_%(model_name)s"], 26 "DELETE": ["%(app_label)s.delete_%(model_name)s"], 27 } 28 29 def has_object_permission(self, request, view, obj): 30 if hasattr(obj, "owner") and obj.owner is not None: 31 if request.user == obj.owner: 32 return True 33 else: 34 return super().has_object_permission(request, view, obj) 35 else: 36 return True # no owner 37 38 39 class PaperlessAdminPermissions(BasePermission): 40 def has_permission(self, request, view): 41 return request.user.has_perm("admin.view_logentry") 42 43 44 def get_groups_with_only_permission(obj, codename): 45 ctype = ContentType.objects.get_for_model(obj) 46 permission = Permission.objects.get(content_type=ctype, codename=codename) 47 group_object_perm_group_ids = ( 48 GroupObjectPermission.objects.filter( 49 object_pk=obj.pk, 50 content_type=ctype, 51 ) 52 .filter(permission=permission) 53 .values_list("group_id") 54 ) 55 return Group.objects.filter(id__in=group_object_perm_group_ids).distinct() 56 57 58 def set_permissions_for_object(permissions, object): 59 for action in permissions: 60 permission = f"{action}_{object.__class__.__name__.lower()}" 61 # users 62 users_to_add = User.objects.filter(id__in=permissions[action]["users"]) 63 users_to_remove = get_users_with_perms( 64 object, 65 only_with_perms_in=[permission], 66 with_group_users=False, 67 ) 68 if len(users_to_add) > 0 and len(users_to_remove) > 0: 69 users_to_remove = users_to_remove.difference(users_to_add) 70 if len(users_to_remove) > 0: 71 for user in users_to_remove: 72 remove_perm(permission, user, object) 73 if len(users_to_add) > 0: 74 for user in users_to_add: 75 assign_perm(permission, user, object) 76 if action == "change": 77 # change gives view too 78 assign_perm( 79 f"view_{object.__class__.__name__.lower()}", 80 user, 81 object, 82 ) 83 # groups 84 groups_to_add = Group.objects.filter(id__in=permissions[action]["groups"]) 85 groups_to_remove = get_groups_with_only_permission( 86 object, 87 permission, 88 ) 89 if len(groups_to_add) > 0 and len(groups_to_remove) > 0: 90 groups_to_remove = groups_to_remove.difference(groups_to_add) 91 if len(groups_to_remove) > 0: 92 for group in groups_to_remove: 93 remove_perm(permission, group, object) 94 if len(groups_to_add) > 0: 95 for group in groups_to_add: 96 assign_perm(permission, group, object) 97 if action == "change": 98 # change gives view too 99 assign_perm( 100 f"view_{object.__class__.__name__.lower()}", 101 group, 102 object, 103 ) 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/documents/permissions.py b/src/documents/permissions.py --- a/src/documents/permissions.py +++ b/src/documents/permissions.py @@ -66,7 +66,7 @@ with_group_users=False, ) if len(users_to_add) > 0 and len(users_to_remove) > 0: - users_to_remove = users_to_remove.difference(users_to_add) + users_to_remove = users_to_remove.exclude(id__in=users_to_add) if len(users_to_remove) > 0: for user in users_to_remove: remove_perm(permission, user, object) @@ -87,7 +87,7 @@ permission, ) if len(groups_to_add) > 0 and len(groups_to_remove) > 0: - groups_to_remove = groups_to_remove.difference(groups_to_add) + groups_to_remove = groups_to_remove.exclude(id__in=groups_to_add) if len(groups_to_remove) > 0: for group in groups_to_remove: remove_perm(permission, group, object)
{"golden_diff": "diff --git a/src/documents/permissions.py b/src/documents/permissions.py\n--- a/src/documents/permissions.py\n+++ b/src/documents/permissions.py\n@@ -66,7 +66,7 @@\n with_group_users=False,\n )\n if len(users_to_add) > 0 and len(users_to_remove) > 0:\n- users_to_remove = users_to_remove.difference(users_to_add)\n+ users_to_remove = users_to_remove.exclude(id__in=users_to_add)\n if len(users_to_remove) > 0:\n for user in users_to_remove:\n remove_perm(permission, user, object)\n@@ -87,7 +87,7 @@\n permission,\n )\n if len(groups_to_add) > 0 and len(groups_to_remove) > 0:\n- groups_to_remove = groups_to_remove.difference(groups_to_add)\n+ groups_to_remove = groups_to_remove.exclude(id__in=groups_to_add)\n if len(groups_to_remove) > 0:\n for group in groups_to_remove:\n remove_perm(permission, group, object)\n", "issue": "[BUG] Docker 1.13-dev: Multi-User Setup, can't change anything with to document details\n### Description\n\nHi, I am pretty new to paperless. I wanted to start directly with the multi-user feature for my whole family. So I decided to install the dev-docker image. It runs quote nice, there is just this one thing...\r\nWhen a document has permission set to owner: Person A and read permission to Person B. I cannot change anything within the document e.g. Name, Permission, Tags, Document type etc.\r\n\r\nWhen I try to do so, there is just this error:\r\n`Error saving document: Http failure response for https://example.com/api/documents/23/: 500 OK`\r\n\r\nThe change thru the webui won't be saved. When I remove the permission (read-permission to Person B) everything is working thru the webui. When I put the permission back, the error comes back as well.\r\n\r\nBTW I can change things without a problem within the Django admin panel.\n\n### Steps to reproduce\n\n1. Have a document\r\n2. Have different users\r\n3. Share this document as an owner to a second person\r\n4. Error pops up when trying to change information on the document\r\n5. remove permission, leave owner\r\n6. Document can be changed\r\n7. give read permission to the second person again\r\n8. same error comes back\n\n### Webserver logs\n\n```bash\n[2023-03-31 22:13:36,819] [ERROR] [django.request] Internal Server Error: /api/documents/23/\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/asgiref/sync.py\", line 486, in thread_handler\r\n raise exc_info[1]\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 43, in inner\r\n response = await get_response(request)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py\", line 253, in _get_response_async\r\n response = await wrapped_callback(\r\n File \"/usr/local/lib/python3.9/site-packages/asgiref/sync.py\", line 448, in __call__\r\n ret = await asyncio.wait_for(future, timeout=None)\r\n File \"/usr/local/lib/python3.9/asyncio/tasks.py\", line 442, in wait_for\r\n return await fut\r\n File \"/usr/local/lib/python3.9/site-packages/asgiref/current_thread_executor.py\", line 22, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/asgiref/sync.py\", line 490, in thread_handler\r\n return func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py\", line 55, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py\", line 125, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 469, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\r\n raise exc\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"/usr/src/paperless/src/documents/views.py\", line 278, in update\r\n response = super().update(request, *args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/mixins.py\", line 68, in update\r\n self.perform_update(serializer)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/mixins.py\", line 78, in perform_update\r\n serializer.save()\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 207, in save\r\n self.instance = self.update(self.instance, validated_data)\r\n File \"/usr/src/paperless/src/documents/serialisers.py\", line 419, in update\r\n super().update(instance, validated_data)\r\n File \"/usr/src/paperless/src/documents/serialisers.py\", line 210, in update\r\n self._set_permissions(validated_data[\"set_permissions\"], instance)\r\n File \"/usr/src/paperless/src/documents/serialisers.py\", line 149, in _set_permissions\r\n set_permissions_for_object(permissions, object)\r\n File \"/usr/src/paperless/src/documents/permissions.py\", line 70, in set_permissions_for_object\r\n if len(users_to_remove) > 0:\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/query.py\", line 376, in __len__\r\n self._fetch_all()\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/query.py\", line 1867, in _fetch_all\r\n self._result_cache = list(self._iterable_class(self))\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/query.py\", line 87, in __iter__\r\n results = compiler.execute_sql(\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/sql/compiler.py\", line 1385, in execute_sql\r\n sql, params = self.as_sql()\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/sql/compiler.py\", line 603, in as_sql\r\n raise NotSupportedError(\r\ndjango.db.utils.NotSupportedError: difference is not supported on this database backend.\n```\n\n\n### Browser logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.13.0-dev\n\n### Host OS\n\nDocker@Ubuntu 22.04\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nSafari\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom guardian.models import GroupObjectPermission\nfrom guardian.shortcuts import assign_perm\nfrom guardian.shortcuts import get_users_with_perms\nfrom guardian.shortcuts import remove_perm\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.permissions import DjangoObjectPermissions\n\n\nclass PaperlessObjectPermissions(DjangoObjectPermissions):\n \"\"\"\n A permissions backend that checks for object-level permissions\n or for ownership.\n \"\"\"\n\n perms_map = {\n \"GET\": [\"%(app_label)s.view_%(model_name)s\"],\n \"OPTIONS\": [\"%(app_label)s.view_%(model_name)s\"],\n \"HEAD\": [\"%(app_label)s.view_%(model_name)s\"],\n \"POST\": [\"%(app_label)s.add_%(model_name)s\"],\n \"PUT\": [\"%(app_label)s.change_%(model_name)s\"],\n \"PATCH\": [\"%(app_label)s.change_%(model_name)s\"],\n \"DELETE\": [\"%(app_label)s.delete_%(model_name)s\"],\n }\n\n def has_object_permission(self, request, view, obj):\n if hasattr(obj, \"owner\") and obj.owner is not None:\n if request.user == obj.owner:\n return True\n else:\n return super().has_object_permission(request, view, obj)\n else:\n return True # no owner\n\n\nclass PaperlessAdminPermissions(BasePermission):\n def has_permission(self, request, view):\n return request.user.has_perm(\"admin.view_logentry\")\n\n\ndef get_groups_with_only_permission(obj, codename):\n ctype = ContentType.objects.get_for_model(obj)\n permission = Permission.objects.get(content_type=ctype, codename=codename)\n group_object_perm_group_ids = (\n GroupObjectPermission.objects.filter(\n object_pk=obj.pk,\n content_type=ctype,\n )\n .filter(permission=permission)\n .values_list(\"group_id\")\n )\n return Group.objects.filter(id__in=group_object_perm_group_ids).distinct()\n\n\ndef set_permissions_for_object(permissions, object):\n for action in permissions:\n permission = f\"{action}_{object.__class__.__name__.lower()}\"\n # users\n users_to_add = User.objects.filter(id__in=permissions[action][\"users\"])\n users_to_remove = get_users_with_perms(\n object,\n only_with_perms_in=[permission],\n with_group_users=False,\n )\n if len(users_to_add) > 0 and len(users_to_remove) > 0:\n users_to_remove = users_to_remove.difference(users_to_add)\n if len(users_to_remove) > 0:\n for user in users_to_remove:\n remove_perm(permission, user, object)\n if len(users_to_add) > 0:\n for user in users_to_add:\n assign_perm(permission, user, object)\n if action == \"change\":\n # change gives view too\n assign_perm(\n f\"view_{object.__class__.__name__.lower()}\",\n user,\n object,\n )\n # groups\n groups_to_add = Group.objects.filter(id__in=permissions[action][\"groups\"])\n groups_to_remove = get_groups_with_only_permission(\n object,\n permission,\n )\n if len(groups_to_add) > 0 and len(groups_to_remove) > 0:\n groups_to_remove = groups_to_remove.difference(groups_to_add)\n if len(groups_to_remove) > 0:\n for group in groups_to_remove:\n remove_perm(permission, group, object)\n if len(groups_to_add) > 0:\n for group in groups_to_add:\n assign_perm(permission, group, object)\n if action == \"change\":\n # change gives view too\n assign_perm(\n f\"view_{object.__class__.__name__.lower()}\",\n group,\n object,\n )\n", "path": "src/documents/permissions.py"}], "after_files": [{"content": "from django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom guardian.models import GroupObjectPermission\nfrom guardian.shortcuts import assign_perm\nfrom guardian.shortcuts import get_users_with_perms\nfrom guardian.shortcuts import remove_perm\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.permissions import DjangoObjectPermissions\n\n\nclass PaperlessObjectPermissions(DjangoObjectPermissions):\n \"\"\"\n A permissions backend that checks for object-level permissions\n or for ownership.\n \"\"\"\n\n perms_map = {\n \"GET\": [\"%(app_label)s.view_%(model_name)s\"],\n \"OPTIONS\": [\"%(app_label)s.view_%(model_name)s\"],\n \"HEAD\": [\"%(app_label)s.view_%(model_name)s\"],\n \"POST\": [\"%(app_label)s.add_%(model_name)s\"],\n \"PUT\": [\"%(app_label)s.change_%(model_name)s\"],\n \"PATCH\": [\"%(app_label)s.change_%(model_name)s\"],\n \"DELETE\": [\"%(app_label)s.delete_%(model_name)s\"],\n }\n\n def has_object_permission(self, request, view, obj):\n if hasattr(obj, \"owner\") and obj.owner is not None:\n if request.user == obj.owner:\n return True\n else:\n return super().has_object_permission(request, view, obj)\n else:\n return True # no owner\n\n\nclass PaperlessAdminPermissions(BasePermission):\n def has_permission(self, request, view):\n return request.user.has_perm(\"admin.view_logentry\")\n\n\ndef get_groups_with_only_permission(obj, codename):\n ctype = ContentType.objects.get_for_model(obj)\n permission = Permission.objects.get(content_type=ctype, codename=codename)\n group_object_perm_group_ids = (\n GroupObjectPermission.objects.filter(\n object_pk=obj.pk,\n content_type=ctype,\n )\n .filter(permission=permission)\n .values_list(\"group_id\")\n )\n return Group.objects.filter(id__in=group_object_perm_group_ids).distinct()\n\n\ndef set_permissions_for_object(permissions, object):\n for action in permissions:\n permission = f\"{action}_{object.__class__.__name__.lower()}\"\n # users\n users_to_add = User.objects.filter(id__in=permissions[action][\"users\"])\n users_to_remove = get_users_with_perms(\n object,\n only_with_perms_in=[permission],\n with_group_users=False,\n )\n if len(users_to_add) > 0 and len(users_to_remove) > 0:\n users_to_remove = users_to_remove.exclude(id__in=users_to_add)\n if len(users_to_remove) > 0:\n for user in users_to_remove:\n remove_perm(permission, user, object)\n if len(users_to_add) > 0:\n for user in users_to_add:\n assign_perm(permission, user, object)\n if action == \"change\":\n # change gives view too\n assign_perm(\n f\"view_{object.__class__.__name__.lower()}\",\n user,\n object,\n )\n # groups\n groups_to_add = Group.objects.filter(id__in=permissions[action][\"groups\"])\n groups_to_remove = get_groups_with_only_permission(\n object,\n permission,\n )\n if len(groups_to_add) > 0 and len(groups_to_remove) > 0:\n groups_to_remove = groups_to_remove.exclude(id__in=groups_to_add)\n if len(groups_to_remove) > 0:\n for group in groups_to_remove:\n remove_perm(permission, group, object)\n if len(groups_to_add) > 0:\n for group in groups_to_add:\n assign_perm(permission, group, object)\n if action == \"change\":\n # change gives view too\n assign_perm(\n f\"view_{object.__class__.__name__.lower()}\",\n group,\n object,\n )\n", "path": "src/documents/permissions.py"}]}
2,682
231
gh_patches_debug_17987
rasdani/github-patches
git_diff
svthalia__concrexit-3480
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Thumbnails 'Source' objects are not removed when deleting an album ### Describe the bug Currently when an album is deleted, the Source objects (from django-thumbnails) of its photos aren't deleted. Then, if an album with the same name is uploaded again, we get IntegrityErrors when trying to create photos with the same filenames that already existed. ### How to reproduce <!-- Steps to reproduce the behaviour --> 1. Upload album 2. Delete it 3. Upload the same album again ### Expected behaviour The Source's are deleted, so there is no integrityerror. ### Additional context https://thalia.sentry.io/issues/4543169553/events/239a90e83abd437e9d7116b45ec30b9a/ This issue would be solved by making photos filenames be uuid's (#3442), and just fixing this single issue won't fix all problems we have with reuploading an album, as there is another problem: Reuploading different photos with the same name does not invalidate the cloudfront cache. So If we do that within 24 hours, facedetection would likely analyze the wrong (stale) cached files, and they would also show up to users. So we also need to either make the filenames unique, or implement cloudfront cache invalidation. Clearly, unique filenames are the easier way to go. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/photos/models.py` Content: ``` 1 import hashlib 2 import logging 3 import os 4 import random 5 from secrets import token_hex 6 7 from django.conf import settings 8 from django.core.exceptions import ValidationError 9 from django.db import models 10 from django.db.models import Count, IntegerField, Value 11 from django.db.models.functions import Coalesce 12 from django.urls import reverse 13 from django.utils.functional import cached_property 14 from django.utils.translation import gettext_lazy as _ 15 16 from queryable_properties.managers import QueryablePropertiesManager 17 from queryable_properties.properties import AnnotationProperty 18 from thumbnails.fields import ImageField 19 20 from members.models import Member 21 22 COVER_FILENAME = "cover.jpg" 23 24 25 logger = logging.getLogger(__name__) 26 27 28 def photo_uploadto(instance, filename): 29 ext = os.path.splitext(filename)[1] 30 return f"photos/{instance.album.dirname}/{token_hex(8)}{ext}" 31 32 33 class DuplicatePhotoException(Exception): 34 """Raised when a photo with the same digest already exists in a given album.""" 35 36 37 class Photo(models.Model): 38 """Model for a Photo object.""" 39 40 objects = QueryablePropertiesManager() 41 42 album = models.ForeignKey( 43 "Album", on_delete=models.CASCADE, verbose_name=_("album") 44 ) 45 46 file = ImageField( 47 _("file"), 48 upload_to=photo_uploadto, 49 resize_source_to="source", 50 ) 51 52 rotation = models.IntegerField( 53 verbose_name=_("rotation"), 54 default=0, 55 choices=((x, x) for x in (0, 90, 180, 270)), 56 help_text=_("This does not modify the original image file."), 57 ) 58 59 _digest = models.CharField( 60 "digest", 61 max_length=40, 62 blank=True, 63 editable=False, 64 ) 65 66 num_likes = AnnotationProperty( 67 Coalesce(Count("likes"), Value(0), output_field=IntegerField()) 68 ) 69 70 def __init__(self, *args, **kwargs): 71 """Initialize Photo object and set the file if it exists.""" 72 super().__init__(*args, **kwargs) 73 if self.file: 74 self.original_file = self.file.name 75 else: 76 self.original_file = "" 77 78 def __str__(self): 79 """Return the filename of a Photo object.""" 80 return os.path.basename(self.file.name) 81 82 def clean(self): 83 if not self.file._committed: 84 hash_sha1 = hashlib.sha1() 85 for chunk in iter(lambda: self.file.read(4096), b""): 86 hash_sha1.update(chunk) 87 digest = hash_sha1.hexdigest() 88 self._digest = digest 89 90 if ( 91 Photo.objects.filter(album=self.album, _digest=digest) 92 .exclude(pk=self.pk) 93 .exists() 94 ): 95 raise ValidationError( 96 {"file": "This photo already exists in this album."} 97 ) 98 99 return super().clean() 100 101 def delete(self, using=None, keep_parents=False): 102 removed = super().delete(using, keep_parents) 103 if self.file.name: 104 self.file.delete() 105 return removed 106 107 class Meta: 108 """Meta class for Photo.""" 109 110 # Photos are created in order of their filename. 111 ordering = ("pk",) 112 113 114 class Like(models.Model): 115 photo = models.ForeignKey( 116 Photo, null=False, blank=False, related_name="likes", on_delete=models.CASCADE 117 ) 118 member = models.ForeignKey( 119 Member, null=True, blank=False, on_delete=models.SET_NULL 120 ) 121 122 def __str__(self): 123 return str(self.member) + " " + _("likes") + " " + str(self.photo) 124 125 class Meta: 126 unique_together = ["photo", "member"] 127 128 129 class Album(models.Model): 130 """Model for Album objects.""" 131 132 title = models.CharField( 133 _("title"), 134 blank=True, 135 max_length=200, 136 help_text=_("Leave empty to take over the title of the event"), 137 ) 138 139 dirname = models.CharField( 140 verbose_name=_("directory name"), 141 max_length=200, 142 ) 143 144 date = models.DateField( 145 verbose_name=_("date"), 146 blank=True, 147 help_text=_("Leave empty to take over the date of the event"), 148 ) 149 150 slug = models.SlugField( 151 verbose_name=_("slug"), 152 unique=True, 153 ) 154 155 hidden = models.BooleanField(verbose_name=_("hidden"), default=False) 156 157 event = models.ForeignKey( 158 "events.Event", 159 on_delete=models.SET_NULL, 160 blank=True, 161 null=True, 162 ) 163 164 _cover = models.OneToOneField( 165 Photo, 166 on_delete=models.SET_NULL, 167 blank=True, 168 null=True, 169 related_name="covered_album", 170 verbose_name=_("cover image"), 171 ) 172 173 shareable = models.BooleanField(verbose_name=_("shareable"), default=False) 174 175 photosdir = "photos" 176 photospath = os.path.join(settings.MEDIA_ROOT, photosdir) 177 178 @cached_property 179 def cover(self): 180 """Return cover of Album. 181 182 If a cover is not set, return a random photo or None if there are no photos. 183 """ 184 cover = None 185 if self._cover is not None: 186 return self._cover 187 188 # Not prefetched because this should be rare and is a lot of data 189 # `exists` is faster in theory, but requires everything to be fetched later anyways 190 if self.photo_set.exists(): 191 r = random.Random(self.dirname) 192 cover = r.choice(self.photo_set.all()) 193 return cover 194 195 def __str__(self): 196 """Get string representation of Album.""" 197 return f"{self.date:%Y-%m-%d} {self.title}" 198 199 def get_absolute_url(self): 200 """Get url of Album.""" 201 return reverse("photos:album", args=[str(self.slug)]) 202 203 def clean(self): 204 super().clean() 205 errors = {} 206 207 if not self.title and not self.event: 208 errors.update( 209 {"title": _("This field is required if there is no event selected.")} 210 ) 211 212 if not self.date and not self.event: 213 errors.update( 214 {"date": _("This field is required if there is no event selected.")} 215 ) 216 217 if errors: 218 raise ValidationError(errors) 219 220 def save(self, **kwargs): 221 """Save album and send appropriate notifications.""" 222 # dirname is only set for new objects, to avoid ever changing it 223 if self.pk is None: 224 self.dirname = self.slug 225 226 if not self.title and self.event: 227 self.title = self.event.title 228 229 if not self.date: 230 self.date = self.event.start.date() 231 232 super().save(**kwargs) 233 234 @property 235 def access_token(self): 236 """Return access token for album.""" 237 return hashlib.sha256( 238 f"{settings.SECRET_KEY}album{self.pk}".encode() 239 ).hexdigest() 240 241 class Meta: 242 """Meta class for Album.""" 243 244 ordering = ("-date", "title") 245 ``` Path: `website/photos/signals.py` Content: ``` 1 from django.db.models.signals import pre_delete 2 3 from utils.models.signals import suspendingreceiver 4 5 6 @suspendingreceiver( 7 pre_delete, sender="photos.Photo", dispatch_uid="photos_photo_delete" 8 ) 9 def pre_photo_delete(sender, instance, **kwargs): 10 """Remove main photo file on deletion.""" 11 instance.file.delete() 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/photos/models.py b/website/photos/models.py --- a/website/photos/models.py +++ b/website/photos/models.py @@ -98,12 +98,6 @@ return super().clean() - def delete(self, using=None, keep_parents=False): - removed = super().delete(using, keep_parents) - if self.file.name: - self.file.delete() - return removed - class Meta: """Meta class for Photo.""" diff --git a/website/photos/signals.py b/website/photos/signals.py --- a/website/photos/signals.py +++ b/website/photos/signals.py @@ -7,5 +7,9 @@ pre_delete, sender="photos.Photo", dispatch_uid="photos_photo_delete" ) def pre_photo_delete(sender, instance, **kwargs): - """Remove main photo file on deletion.""" - instance.file.delete() + """Remove main photo file and thumbnails on deletion.""" + name = instance.file.name # First get the name, it is removed by the next line. + instance.file.delete() # Delete the file and its thumbnails. + + # Clean up the source metadata, django-thumbnails does not do this. + instance.file.metadata_backend.delete_source(name)
{"golden_diff": "diff --git a/website/photos/models.py b/website/photos/models.py\n--- a/website/photos/models.py\n+++ b/website/photos/models.py\n@@ -98,12 +98,6 @@\n \n return super().clean()\n \n- def delete(self, using=None, keep_parents=False):\n- removed = super().delete(using, keep_parents)\n- if self.file.name:\n- self.file.delete()\n- return removed\n-\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n \ndiff --git a/website/photos/signals.py b/website/photos/signals.py\n--- a/website/photos/signals.py\n+++ b/website/photos/signals.py\n@@ -7,5 +7,9 @@\n pre_delete, sender=\"photos.Photo\", dispatch_uid=\"photos_photo_delete\"\n )\n def pre_photo_delete(sender, instance, **kwargs):\n- \"\"\"Remove main photo file on deletion.\"\"\"\n- instance.file.delete()\n+ \"\"\"Remove main photo file and thumbnails on deletion.\"\"\"\n+ name = instance.file.name # First get the name, it is removed by the next line.\n+ instance.file.delete() # Delete the file and its thumbnails.\n+\n+ # Clean up the source metadata, django-thumbnails does not do this.\n+ instance.file.metadata_backend.delete_source(name)\n", "issue": "Thumbnails 'Source' objects are not removed when deleting an album\n### Describe the bug\r\n\r\nCurrently when an album is deleted, the Source objects (from django-thumbnails) of its photos aren't deleted. Then, if an album with the same name is uploaded again, we get IntegrityErrors when trying to create photos with the same filenames that already existed. \r\n\r\n### How to reproduce\r\n<!-- Steps to reproduce the behaviour -->\r\n1. Upload album\r\n2. Delete it\r\n3. Upload the same album again\r\n\r\n### Expected behaviour\r\nThe Source's are deleted, so there is no integrityerror.\r\n\r\n### Additional context\r\nhttps://thalia.sentry.io/issues/4543169553/events/239a90e83abd437e9d7116b45ec30b9a/\r\n\r\nThis issue would be solved by making photos filenames be uuid's (#3442), and just fixing this single issue won't fix all problems we have with reuploading an album, as there is another problem:\r\nReuploading different photos with the same name does not invalidate the cloudfront cache. So If we do that within 24 hours, facedetection would likely analyze the wrong (stale) cached files, and they would also show up to users. So we also need to either make the filenames unique, or implement cloudfront cache invalidation. Clearly, unique filenames are the easier way to go.\r\n\n", "before_files": [{"content": "import hashlib\nimport logging\nimport os\nimport random\nfrom secrets import token_hex\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Count, IntegerField, Value\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\nfrom thumbnails.fields import ImageField\n\nfrom members.models import Member\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n ext = os.path.splitext(filename)[1]\n return f\"photos/{instance.album.dirname}/{token_hex(8)}{ext}\"\n\n\nclass DuplicatePhotoException(Exception):\n \"\"\"Raised when a photo with the same digest already exists in a given album.\"\"\"\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = ImageField(\n _(\"file\"),\n upload_to=photo_uploadto,\n resize_source_to=\"source\",\n )\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n _digest = models.CharField(\n \"digest\",\n max_length=40,\n blank=True,\n editable=False,\n )\n\n num_likes = AnnotationProperty(\n Coalesce(Count(\"likes\"), Value(0), output_field=IntegerField())\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.name\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n def clean(self):\n if not self.file._committed:\n hash_sha1 = hashlib.sha1()\n for chunk in iter(lambda: self.file.read(4096), b\"\"):\n hash_sha1.update(chunk)\n digest = hash_sha1.hexdigest()\n self._digest = digest\n\n if (\n Photo.objects.filter(album=self.album, _digest=digest)\n .exclude(pk=self.pk)\n .exists()\n ):\n raise ValidationError(\n {\"file\": \"This photo already exists in this album.\"}\n )\n\n return super().clean()\n\n def delete(self, using=None, keep_parents=False):\n removed = super().delete(using, keep_parents)\n if self.file.name:\n self.file.delete()\n return removed\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n # Photos are created in order of their filename.\n ordering = (\"pk\",)\n\n\nclass Like(models.Model):\n photo = models.ForeignKey(\n Photo, null=False, blank=False, related_name=\"likes\", on_delete=models.CASCADE\n )\n member = models.ForeignKey(\n Member, null=True, blank=False, on_delete=models.SET_NULL\n )\n\n def __str__(self):\n return str(self.member) + \" \" + _(\"likes\") + \" \" + str(self.photo)\n\n class Meta:\n unique_together = [\"photo\", \"member\"]\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(\n verbose_name=_(\"directory name\"),\n max_length=200,\n )\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(\n verbose_name=_(\"slug\"),\n unique=True,\n )\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n event = models.ForeignKey(\n \"events.Event\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n\n # Not prefetched because this should be rare and is a lot of data\n # `exists` is faster in theory, but requires everything to be fetched later anyways\n if self.photo_set.exists():\n r = random.Random(self.dirname)\n cover = r.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return f\"{self.date:%Y-%m-%d} {self.title}\"\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title\n\n if not self.date:\n self.date = self.event.start.date()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n f\"{settings.SECRET_KEY}album{self.pk}\".encode()\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py"}, {"content": "from django.db.models.signals import pre_delete\n\nfrom utils.models.signals import suspendingreceiver\n\n\n@suspendingreceiver(\n pre_delete, sender=\"photos.Photo\", dispatch_uid=\"photos_photo_delete\"\n)\ndef pre_photo_delete(sender, instance, **kwargs):\n \"\"\"Remove main photo file on deletion.\"\"\"\n instance.file.delete()\n", "path": "website/photos/signals.py"}], "after_files": [{"content": "import hashlib\nimport logging\nimport os\nimport random\nfrom secrets import token_hex\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Count, IntegerField, Value\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\nfrom thumbnails.fields import ImageField\n\nfrom members.models import Member\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n ext = os.path.splitext(filename)[1]\n return f\"photos/{instance.album.dirname}/{token_hex(8)}{ext}\"\n\n\nclass DuplicatePhotoException(Exception):\n \"\"\"Raised when a photo with the same digest already exists in a given album.\"\"\"\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = ImageField(\n _(\"file\"),\n upload_to=photo_uploadto,\n resize_source_to=\"source\",\n )\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n _digest = models.CharField(\n \"digest\",\n max_length=40,\n blank=True,\n editable=False,\n )\n\n num_likes = AnnotationProperty(\n Coalesce(Count(\"likes\"), Value(0), output_field=IntegerField())\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.name\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n def clean(self):\n if not self.file._committed:\n hash_sha1 = hashlib.sha1()\n for chunk in iter(lambda: self.file.read(4096), b\"\"):\n hash_sha1.update(chunk)\n digest = hash_sha1.hexdigest()\n self._digest = digest\n\n if (\n Photo.objects.filter(album=self.album, _digest=digest)\n .exclude(pk=self.pk)\n .exists()\n ):\n raise ValidationError(\n {\"file\": \"This photo already exists in this album.\"}\n )\n\n return super().clean()\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n # Photos are created in order of their filename.\n ordering = (\"pk\",)\n\n\nclass Like(models.Model):\n photo = models.ForeignKey(\n Photo, null=False, blank=False, related_name=\"likes\", on_delete=models.CASCADE\n )\n member = models.ForeignKey(\n Member, null=True, blank=False, on_delete=models.SET_NULL\n )\n\n def __str__(self):\n return str(self.member) + \" \" + _(\"likes\") + \" \" + str(self.photo)\n\n class Meta:\n unique_together = [\"photo\", \"member\"]\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(\n verbose_name=_(\"directory name\"),\n max_length=200,\n )\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(\n verbose_name=_(\"slug\"),\n unique=True,\n )\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n event = models.ForeignKey(\n \"events.Event\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n\n # Not prefetched because this should be rare and is a lot of data\n # `exists` is faster in theory, but requires everything to be fetched later anyways\n if self.photo_set.exists():\n r = random.Random(self.dirname)\n cover = r.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return f\"{self.date:%Y-%m-%d} {self.title}\"\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title\n\n if not self.date:\n self.date = self.event.start.date()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n f\"{settings.SECRET_KEY}album{self.pk}\".encode()\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py"}, {"content": "from django.db.models.signals import pre_delete\n\nfrom utils.models.signals import suspendingreceiver\n\n\n@suspendingreceiver(\n pre_delete, sender=\"photos.Photo\", dispatch_uid=\"photos_photo_delete\"\n)\ndef pre_photo_delete(sender, instance, **kwargs):\n \"\"\"Remove main photo file and thumbnails on deletion.\"\"\"\n name = instance.file.name # First get the name, it is removed by the next line.\n instance.file.delete() # Delete the file and its thumbnails.\n\n # Clean up the source metadata, django-thumbnails does not do this.\n instance.file.metadata_backend.delete_source(name)\n", "path": "website/photos/signals.py"}]}
2,753
275
gh_patches_debug_21665
rasdani/github-patches
git_diff
qtile__qtile-1241
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fake screens [This part of the documentation](https://github.com/qtile/qtile/blob/ed7198a5fb5438110f81a8c6ecc0e6289676c057/libqtile/config.py#L231-L232) mentions "fake screens", and the term is also found [in the code](https://github.com/qtile/qtile/blob/7c2a88fba68bdcf6f25dfb5494a74afc475d674e/libqtile/manager.py#L357-L373). What are they? How to use them? We need to document answers to those questions, and then make sure they work correctly. See #1192 for this last point. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/confreader.py` Content: ``` 1 # coding: utf-8 2 # 3 # Copyright (c) 2008, Aldo Cortesi <[email protected]> 4 # Copyright (c) 2011, Andrew Grigorev <[email protected]> 5 # 6 # All rights reserved. 7 # 8 # Permission is hereby granted, free of charge, to any person obtaining a copy 9 # of this software and associated documentation files (the "Software"), to deal 10 # in the Software without restriction, including without limitation the rights 11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 # copies of the Software, and to permit persons to whom the Software is 13 # furnished to do so, subject to the following conditions: 14 # 15 # The above copyright notice and this permission notice shall be included in 16 # all copies or substantial portions of the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 # SOFTWARE. 25 import os 26 import sys 27 28 29 class ConfigError(Exception): 30 pass 31 32 33 class Config(object): 34 settings_keys = [ 35 "keys", 36 "mouse", 37 "groups", 38 "dgroups_key_binder", 39 "dgroups_app_rules", 40 "follow_mouse_focus", 41 "focus_on_window_activation", 42 "cursor_warp", 43 "layouts", 44 "floating_layout", 45 "screens", 46 "main", 47 "auto_fullscreen", 48 "widget_defaults", 49 "extension_defaults", 50 "bring_front_click", 51 "wmname", 52 ] 53 54 def __init__(self, **settings): 55 """Create a Config() object from settings 56 57 Only attributes found in Config.settings_keys will be added to object. 58 config attribute precedence is 1.) **settings 2.) self 3.) default_config 59 """ 60 from .resources import default_config 61 default = vars(default_config) 62 for key in self.settings_keys: 63 try: 64 value = settings[key] 65 except KeyError: 66 value = getattr(self, key, default[key]) 67 setattr(self, key, value) 68 self._init_deprecated(**settings) 69 70 def _init_deprecated(self, extensions=None, **settings): 71 "Initialize deprecated settings." 72 if extensions: # Deprecated in v0.10.7 73 import warnings 74 warnings.warn("'extentions' is deprecated, use " 75 "'extension_defaults'", DeprecationWarning) 76 self.extension_defaults.update(extensions.get('dmenu', {})) 77 78 @classmethod 79 def from_file(cls, path): 80 "Create a Config() object from the python file located at path." 81 try: 82 sys.path.insert(0, os.path.dirname(path)) 83 config = __import__(os.path.basename(path)[:-3]) 84 except Exception: 85 import traceback 86 from .log_utils import logger 87 logger.exception('Could not import config file %r', path) 88 tb = traceback.format_exc() 89 raise ConfigError(tb) 90 return cls(**vars(config)) 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/confreader.py b/libqtile/confreader.py --- a/libqtile/confreader.py +++ b/libqtile/confreader.py @@ -66,6 +66,7 @@ value = getattr(self, key, default[key]) setattr(self, key, value) self._init_deprecated(**settings) + self._init_fake_screens(**settings) def _init_deprecated(self, extensions=None, **settings): "Initialize deprecated settings." @@ -75,6 +76,14 @@ "'extension_defaults'", DeprecationWarning) self.extension_defaults.update(extensions.get('dmenu', {})) + def _init_fake_screens(self, **settings): + " Initiaize fake_screens if they are set." + try: + value = settings['fake_screens'] + setattr(self, 'fake_screens', value) + except KeyError: + pass + @classmethod def from_file(cls, path): "Create a Config() object from the python file located at path."
{"golden_diff": "diff --git a/libqtile/confreader.py b/libqtile/confreader.py\n--- a/libqtile/confreader.py\n+++ b/libqtile/confreader.py\n@@ -66,6 +66,7 @@\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n self._init_deprecated(**settings)\n+ self._init_fake_screens(**settings)\n \n def _init_deprecated(self, extensions=None, **settings):\n \"Initialize deprecated settings.\"\n@@ -75,6 +76,14 @@\n \"'extension_defaults'\", DeprecationWarning)\n self.extension_defaults.update(extensions.get('dmenu', {}))\n \n+ def _init_fake_screens(self, **settings):\n+ \" Initiaize fake_screens if they are set.\"\n+ try:\n+ value = settings['fake_screens']\n+ setattr(self, 'fake_screens', value)\n+ except KeyError:\n+ pass\n+\n @classmethod\n def from_file(cls, path):\n \"Create a Config() object from the python file located at path.\"\n", "issue": "Fake screens\n[This part of the documentation](https://github.com/qtile/qtile/blob/ed7198a5fb5438110f81a8c6ecc0e6289676c057/libqtile/config.py#L231-L232) mentions \"fake screens\", and the term is also found [in the code](https://github.com/qtile/qtile/blob/7c2a88fba68bdcf6f25dfb5494a74afc475d674e/libqtile/manager.py#L357-L373).\r\n\r\nWhat are they? How to use them?\r\n\r\nWe need to document answers to those questions, and then make sure they work correctly.\r\n\r\nSee #1192 for this last point.\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright (c) 2008, Aldo Cortesi <[email protected]>\n# Copyright (c) 2011, Andrew Grigorev <[email protected]>\n#\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\nimport sys\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(object):\n settings_keys = [\n \"keys\",\n \"mouse\",\n \"groups\",\n \"dgroups_key_binder\",\n \"dgroups_app_rules\",\n \"follow_mouse_focus\",\n \"focus_on_window_activation\",\n \"cursor_warp\",\n \"layouts\",\n \"floating_layout\",\n \"screens\",\n \"main\",\n \"auto_fullscreen\",\n \"widget_defaults\",\n \"extension_defaults\",\n \"bring_front_click\",\n \"wmname\",\n ]\n\n def __init__(self, **settings):\n \"\"\"Create a Config() object from settings\n\n Only attributes found in Config.settings_keys will be added to object.\n config attribute precedence is 1.) **settings 2.) self 3.) default_config\n \"\"\"\n from .resources import default_config\n default = vars(default_config)\n for key in self.settings_keys:\n try:\n value = settings[key]\n except KeyError:\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n self._init_deprecated(**settings)\n\n def _init_deprecated(self, extensions=None, **settings):\n \"Initialize deprecated settings.\"\n if extensions: # Deprecated in v0.10.7\n import warnings\n warnings.warn(\"'extentions' is deprecated, use \"\n \"'extension_defaults'\", DeprecationWarning)\n self.extension_defaults.update(extensions.get('dmenu', {}))\n\n @classmethod\n def from_file(cls, path):\n \"Create a Config() object from the python file located at path.\"\n try:\n sys.path.insert(0, os.path.dirname(path))\n config = __import__(os.path.basename(path)[:-3])\n except Exception:\n import traceback\n from .log_utils import logger\n logger.exception('Could not import config file %r', path)\n tb = traceback.format_exc()\n raise ConfigError(tb)\n return cls(**vars(config))\n", "path": "libqtile/confreader.py"}], "after_files": [{"content": "# coding: utf-8\n#\n# Copyright (c) 2008, Aldo Cortesi <[email protected]>\n# Copyright (c) 2011, Andrew Grigorev <[email protected]>\n#\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\nimport sys\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(object):\n settings_keys = [\n \"keys\",\n \"mouse\",\n \"groups\",\n \"dgroups_key_binder\",\n \"dgroups_app_rules\",\n \"follow_mouse_focus\",\n \"focus_on_window_activation\",\n \"cursor_warp\",\n \"layouts\",\n \"floating_layout\",\n \"screens\",\n \"main\",\n \"auto_fullscreen\",\n \"widget_defaults\",\n \"extension_defaults\",\n \"bring_front_click\",\n \"wmname\",\n ]\n\n def __init__(self, **settings):\n \"\"\"Create a Config() object from settings\n\n Only attributes found in Config.settings_keys will be added to object.\n config attribute precedence is 1.) **settings 2.) self 3.) default_config\n \"\"\"\n from .resources import default_config\n default = vars(default_config)\n for key in self.settings_keys:\n try:\n value = settings[key]\n except KeyError:\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n self._init_deprecated(**settings)\n self._init_fake_screens(**settings)\n\n def _init_deprecated(self, extensions=None, **settings):\n \"Initialize deprecated settings.\"\n if extensions: # Deprecated in v0.10.7\n import warnings\n warnings.warn(\"'extentions' is deprecated, use \"\n \"'extension_defaults'\", DeprecationWarning)\n self.extension_defaults.update(extensions.get('dmenu', {}))\n\n def _init_fake_screens(self, **settings):\n \" Initiaize fake_screens if they are set.\"\n try:\n value = settings['fake_screens']\n setattr(self, 'fake_screens', value)\n except KeyError:\n pass\n\n @classmethod\n def from_file(cls, path):\n \"Create a Config() object from the python file located at path.\"\n try:\n sys.path.insert(0, os.path.dirname(path))\n config = __import__(os.path.basename(path)[:-3])\n except Exception:\n import traceback\n from .log_utils import logger\n logger.exception('Could not import config file %r', path)\n tb = traceback.format_exc()\n raise ConfigError(tb)\n return cls(**vars(config))\n", "path": "libqtile/confreader.py"}]}
1,325
236
gh_patches_debug_13966
rasdani/github-patches
git_diff
streamlit__streamlit-7255
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [v1.21.0] "TypeError: Unicode-objects must be encoded before hashing" in `util.calc_md5` due to inadequate type check ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary After upgrading to version 1.21.0, [`util.calc_md5`](https://github.com/streamlit/streamlit/blob/316cef3426c4f3c81e99414446b0e9a131dbdb57/lib/streamlit/util.py#L180) throws an error when loading a page: `TypeError: Unicode-objects must be encoded before hashing`. This is due to the check `if type(s) is str` in `b = cast(bytes, s.encode("utf-8") if type(s) is str else s)` not being broad enough to catch all `str`-like objects. ### Reproducible Code Example ```Python from streamlit import util class MyString(str): pass s = MyString("foobar") util.calc_md5(s) ``` ### Steps To Reproduce Run the provided code example. ### Expected Behavior No error. ### Current Behavior TypeError: Unicode-objects must be encoded before hashing ### Is this a regression? - [X] Yes, this used to work in a previous version. ### Debug info - Streamlit version: 1.21.0 - Python version: 3.8.14 - Operating System: Ubuntu 18.04.6 - Browser: Chrome Version 107.0.5304.87 (Official Build) (64-bit) - Virtual environment: ### Additional Information In my case, the [`main_script_path_str` passed to `get_pages` here](https://github.com/streamlit/streamlit/blob/316cef3426c4f3c81e99414446b0e9a131dbdb57/lib/streamlit/source_util.py#L106) ends up being a subclass of `str`. A good enough fix for my specific case is using `isinstance` instead of `type`: ``` b = cast(bytes, s.encode("utf-8") if isinstance(s, str) else s) ``` Or something like this could maybe catch more potential issues: ``` b = cast(bytes, str(s).encode("utf-8") if type(s) is not bytes else s) ``` ### Are you willing to submit a PR? - [X] Yes, I am willing to submit a PR! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/streamlit/util.py` Content: ``` 1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """A bunch of useful utilities.""" 16 17 from __future__ import annotations 18 19 import dataclasses 20 import functools 21 import hashlib 22 import os 23 import subprocess 24 from typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union, cast 25 26 from typing_extensions import Final 27 28 from streamlit import env_util 29 30 # URL of Streamlit's help page. 31 HELP_DOC: Final = "https://docs.streamlit.io/" 32 FLOAT_EQUALITY_EPSILON: Final[float] = 0.000000000005 33 34 35 def memoize(func): 36 """Decorator to memoize the result of a no-args func.""" 37 result: List[Any] = [] 38 39 @functools.wraps(func) 40 def wrapped_func(): 41 if not result: 42 result.append(func()) 43 return result[0] 44 45 return wrapped_func 46 47 48 def open_browser(url): 49 """Open a web browser pointing to a given URL. 50 51 We use this function instead of Python's `webbrowser` module because this 52 way we can capture stdout/stderr to avoid polluting the terminal with the 53 browser's messages. For example, Chrome always prints things like "Created 54 new window in existing browser session", and those get on the user's way. 55 56 url : str 57 The URL. Must include the protocol. 58 59 """ 60 # Treat Windows separately because: 61 # 1. /dev/null doesn't exist. 62 # 2. subprocess.Popen(['start', url]) doesn't actually pop up the 63 # browser even though 'start url' works from the command prompt. 64 # Fun! 65 # Also, use webbrowser if we are on Linux and xdg-open is not installed. 66 # 67 # We don't use the webbrowser module on Linux and Mac because some browsers 68 # (ahem... Chrome) always print "Opening in existing browser session" to 69 # the terminal, which is spammy and annoying. So instead we start the 70 # browser ourselves and send all its output to /dev/null. 71 72 if env_util.IS_WINDOWS: 73 _open_browser_with_webbrowser(url) 74 return 75 if env_util.IS_LINUX_OR_BSD: 76 if env_util.is_executable_in_path("xdg-open"): 77 _open_browser_with_command("xdg-open", url) 78 return 79 _open_browser_with_webbrowser(url) 80 return 81 if env_util.IS_DARWIN: 82 _open_browser_with_command("open", url) 83 return 84 85 import platform 86 87 raise Error('Cannot open browser in platform "%s"' % platform.system()) 88 89 90 def _open_browser_with_webbrowser(url): 91 import webbrowser 92 93 webbrowser.open(url) 94 95 96 def _open_browser_with_command(command, url): 97 cmd_line = [command, url] 98 with open(os.devnull, "w") as devnull: 99 subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT) 100 101 102 def _maybe_tuple_to_list(item: Any) -> Any: 103 """Convert a tuple to a list. Leave as is if it's not a tuple.""" 104 if isinstance(item, tuple): 105 return list(item) 106 return item 107 108 109 def repr_(self: Any) -> str: 110 """A clean repr for a class, excluding both values that are likely defaults, 111 and those explicitly default for dataclasses. 112 """ 113 classname = self.__class__.__name__ 114 # Most of the falsey value, but excluding 0 and 0.0, since those often have 115 # semantic meaning within streamlit. 116 defaults: list[Any] = [None, "", False, [], set(), dict()] 117 if dataclasses.is_dataclass(self): 118 fields_vals = ( 119 (f.name, getattr(self, f.name)) 120 for f in dataclasses.fields(self) 121 if f.repr 122 and getattr(self, f.name) != f.default 123 and getattr(self, f.name) not in defaults 124 ) 125 else: 126 fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults) 127 128 field_reprs = ", ".join(f"{field}={value!r}" for field, value in fields_vals) 129 return f"{classname}({field_reprs})" 130 131 132 _Value = TypeVar("_Value") 133 134 135 def index_(iterable: Iterable[_Value], x: _Value) -> int: 136 """Return zero-based index of the first item whose value is equal to x. 137 Raises a ValueError if there is no such item. 138 139 We need a custom implementation instead of the built-in list .index() to 140 be compatible with NumPy array and Pandas Series. 141 142 Parameters 143 ---------- 144 iterable : list, tuple, numpy.ndarray, pandas.Series 145 x : Any 146 147 Returns 148 ------- 149 int 150 """ 151 for i, value in enumerate(iterable): 152 if x == value: 153 return i 154 elif isinstance(value, float) and isinstance(x, float): 155 if abs(x - value) < FLOAT_EQUALITY_EPSILON: 156 return i 157 raise ValueError("{} is not in iterable".format(str(x))) 158 159 160 _Key = TypeVar("_Key", bound=str) 161 162 163 def lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]: 164 return {k.lower().strip(): v for k, v in dict.items()} 165 166 167 # TODO: Move this into errors.py? Replace with StreamlitAPIException? 168 class Error(Exception): 169 pass 170 171 172 def calc_md5(s: Union[bytes, str]) -> str: 173 """Return the md5 hash of the given string.""" 174 h = hashlib.new("md5") 175 176 # mypy seems to have trouble inferring that the type of the if/else expression is 177 # always bytes. 178 b = cast(bytes, s.encode("utf-8") if type(s) is str else s) 179 180 h.update(b) 181 return h.hexdigest() 182 183 184 def exclude_key_query_params( 185 query_params: Dict[str, List[str]], keys_to_exclude: List[str] 186 ) -> Dict[str, List[str]]: 187 """Returns new object query_params : Dict[str, List[str]], but without keys defined with keys_to_drop : List[str].""" 188 return { 189 key: value 190 for key, value in query_params.items() 191 if key.lower() not in keys_to_exclude 192 } 193 194 195 def extract_key_query_params( 196 query_params: Dict[str, List[str]], param_key: str 197 ) -> Set[str]: 198 """Extracts key (case-insensitive) query params from Dict, and returns them as Set of str.""" 199 return set( 200 [ 201 item.lower() 202 for sublist in [ 203 [value.lower() for value in query_params[key]] 204 for key in query_params.keys() 205 if key.lower() == param_key and query_params.get(key) 206 ] 207 for item in sublist 208 ] 209 ) 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py --- a/lib/streamlit/util.py +++ b/lib/streamlit/util.py @@ -21,7 +21,7 @@ import hashlib import os import subprocess -from typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union, cast +from typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union from typing_extensions import Final @@ -173,9 +173,7 @@ """Return the md5 hash of the given string.""" h = hashlib.new("md5") - # mypy seems to have trouble inferring that the type of the if/else expression is - # always bytes. - b = cast(bytes, s.encode("utf-8") if type(s) is str else s) + b = s.encode("utf-8") if isinstance(s, str) else s h.update(b) return h.hexdigest()
{"golden_diff": "diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py\n--- a/lib/streamlit/util.py\n+++ b/lib/streamlit/util.py\n@@ -21,7 +21,7 @@\n import hashlib\n import os\n import subprocess\n-from typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union, cast\n+from typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union\n \n from typing_extensions import Final\n \n@@ -173,9 +173,7 @@\n \"\"\"Return the md5 hash of the given string.\"\"\"\n h = hashlib.new(\"md5\")\n \n- # mypy seems to have trouble inferring that the type of the if/else expression is\n- # always bytes.\n- b = cast(bytes, s.encode(\"utf-8\") if type(s) is str else s)\n+ b = s.encode(\"utf-8\") if isinstance(s, str) else s\n \n h.update(b)\n return h.hexdigest()\n", "issue": "[v1.21.0] \"TypeError: Unicode-objects must be encoded before hashing\" in `util.calc_md5` due to inadequate type check\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nAfter upgrading to version 1.21.0, [`util.calc_md5`](https://github.com/streamlit/streamlit/blob/316cef3426c4f3c81e99414446b0e9a131dbdb57/lib/streamlit/util.py#L180) throws an error when loading a page: `TypeError: Unicode-objects must be encoded before hashing`.\r\n\r\nThis is due to the check `if type(s) is str` in `b = cast(bytes, s.encode(\"utf-8\") if type(s) is str else s)` not being broad enough to catch all `str`-like objects.\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nfrom streamlit import util\r\n\r\nclass MyString(str):\r\n pass\r\n\r\ns = MyString(\"foobar\")\r\n\r\nutil.calc_md5(s)\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nRun the provided code example.\r\n\r\n### Expected Behavior\r\n\r\nNo error.\r\n\r\n### Current Behavior\r\n\r\nTypeError: Unicode-objects must be encoded before hashing\r\n\r\n### Is this a regression?\r\n\r\n- [X] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.21.0\r\n- Python version: 3.8.14\r\n- Operating System: Ubuntu 18.04.6\r\n- Browser: Chrome Version 107.0.5304.87 (Official Build) (64-bit)\r\n- Virtual environment:\r\n\r\n\r\n### Additional Information\r\n\r\nIn my case, the [`main_script_path_str` passed to `get_pages` here](https://github.com/streamlit/streamlit/blob/316cef3426c4f3c81e99414446b0e9a131dbdb57/lib/streamlit/source_util.py#L106) ends up being a subclass of `str`.\r\n\r\nA good enough fix for my specific case is using `isinstance` instead of `type`:\r\n\r\n```\r\nb = cast(bytes, s.encode(\"utf-8\") if isinstance(s, str) else s)\r\n```\r\n\r\nOr something like this could maybe catch more potential issues:\r\n\r\n```\r\nb = cast(bytes, str(s).encode(\"utf-8\") if type(s) is not bytes else s)\r\n```\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [X] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A bunch of useful utilities.\"\"\"\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport functools\nimport hashlib\nimport os\nimport subprocess\nfrom typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union, cast\n\nfrom typing_extensions import Final\n\nfrom streamlit import env_util\n\n# URL of Streamlit's help page.\nHELP_DOC: Final = \"https://docs.streamlit.io/\"\nFLOAT_EQUALITY_EPSILON: Final[float] = 0.000000000005\n\n\ndef memoize(func):\n \"\"\"Decorator to memoize the result of a no-args func.\"\"\"\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func\n\n\ndef open_browser(url):\n \"\"\"Open a web browser pointing to a given URL.\n\n We use this function instead of Python's `webbrowser` module because this\n way we can capture stdout/stderr to avoid polluting the terminal with the\n browser's messages. For example, Chrome always prints things like \"Created\n new window in existing browser session\", and those get on the user's way.\n\n url : str\n The URL. Must include the protocol.\n\n \"\"\"\n # Treat Windows separately because:\n # 1. /dev/null doesn't exist.\n # 2. subprocess.Popen(['start', url]) doesn't actually pop up the\n # browser even though 'start url' works from the command prompt.\n # Fun!\n # Also, use webbrowser if we are on Linux and xdg-open is not installed.\n #\n # We don't use the webbrowser module on Linux and Mac because some browsers\n # (ahem... Chrome) always print \"Opening in existing browser session\" to\n # the terminal, which is spammy and annoying. So instead we start the\n # browser ourselves and send all its output to /dev/null.\n\n if env_util.IS_WINDOWS:\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_LINUX_OR_BSD:\n if env_util.is_executable_in_path(\"xdg-open\"):\n _open_browser_with_command(\"xdg-open\", url)\n return\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_DARWIN:\n _open_browser_with_command(\"open\", url)\n return\n\n import platform\n\n raise Error('Cannot open browser in platform \"%s\"' % platform.system())\n\n\ndef _open_browser_with_webbrowser(url):\n import webbrowser\n\n webbrowser.open(url)\n\n\ndef _open_browser_with_command(command, url):\n cmd_line = [command, url]\n with open(os.devnull, \"w\") as devnull:\n subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT)\n\n\ndef _maybe_tuple_to_list(item: Any) -> Any:\n \"\"\"Convert a tuple to a list. Leave as is if it's not a tuple.\"\"\"\n if isinstance(item, tuple):\n return list(item)\n return item\n\n\ndef repr_(self: Any) -> str:\n \"\"\"A clean repr for a class, excluding both values that are likely defaults,\n and those explicitly default for dataclasses.\n \"\"\"\n classname = self.__class__.__name__\n # Most of the falsey value, but excluding 0 and 0.0, since those often have\n # semantic meaning within streamlit.\n defaults: list[Any] = [None, \"\", False, [], set(), dict()]\n if dataclasses.is_dataclass(self):\n fields_vals = (\n (f.name, getattr(self, f.name))\n for f in dataclasses.fields(self)\n if f.repr\n and getattr(self, f.name) != f.default\n and getattr(self, f.name) not in defaults\n )\n else:\n fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults)\n\n field_reprs = \", \".join(f\"{field}={value!r}\" for field, value in fields_vals)\n return f\"{classname}({field_reprs})\"\n\n\n_Value = TypeVar(\"_Value\")\n\n\ndef index_(iterable: Iterable[_Value], x: _Value) -> int:\n \"\"\"Return zero-based index of the first item whose value is equal to x.\n Raises a ValueError if there is no such item.\n\n We need a custom implementation instead of the built-in list .index() to\n be compatible with NumPy array and Pandas Series.\n\n Parameters\n ----------\n iterable : list, tuple, numpy.ndarray, pandas.Series\n x : Any\n\n Returns\n -------\n int\n \"\"\"\n for i, value in enumerate(iterable):\n if x == value:\n return i\n elif isinstance(value, float) and isinstance(x, float):\n if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))\n\n\n_Key = TypeVar(\"_Key\", bound=str)\n\n\ndef lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]:\n return {k.lower().strip(): v for k, v in dict.items()}\n\n\n# TODO: Move this into errors.py? Replace with StreamlitAPIException?\nclass Error(Exception):\n pass\n\n\ndef calc_md5(s: Union[bytes, str]) -> str:\n \"\"\"Return the md5 hash of the given string.\"\"\"\n h = hashlib.new(\"md5\")\n\n # mypy seems to have trouble inferring that the type of the if/else expression is\n # always bytes.\n b = cast(bytes, s.encode(\"utf-8\") if type(s) is str else s)\n\n h.update(b)\n return h.hexdigest()\n\n\ndef exclude_key_query_params(\n query_params: Dict[str, List[str]], keys_to_exclude: List[str]\n) -> Dict[str, List[str]]:\n \"\"\"Returns new object query_params : Dict[str, List[str]], but without keys defined with keys_to_drop : List[str].\"\"\"\n return {\n key: value\n for key, value in query_params.items()\n if key.lower() not in keys_to_exclude\n }\n\n\ndef extract_key_query_params(\n query_params: Dict[str, List[str]], param_key: str\n) -> Set[str]:\n \"\"\"Extracts key (case-insensitive) query params from Dict, and returns them as Set of str.\"\"\"\n return set(\n [\n item.lower()\n for sublist in [\n [value.lower() for value in query_params[key]]\n for key in query_params.keys()\n if key.lower() == param_key and query_params.get(key)\n ]\n for item in sublist\n ]\n )\n", "path": "lib/streamlit/util.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A bunch of useful utilities.\"\"\"\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport functools\nimport hashlib\nimport os\nimport subprocess\nfrom typing import Any, Dict, Iterable, List, Mapping, Set, TypeVar, Union\n\nfrom typing_extensions import Final\n\nfrom streamlit import env_util\n\n# URL of Streamlit's help page.\nHELP_DOC: Final = \"https://docs.streamlit.io/\"\nFLOAT_EQUALITY_EPSILON: Final[float] = 0.000000000005\n\n\ndef memoize(func):\n \"\"\"Decorator to memoize the result of a no-args func.\"\"\"\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func\n\n\ndef open_browser(url):\n \"\"\"Open a web browser pointing to a given URL.\n\n We use this function instead of Python's `webbrowser` module because this\n way we can capture stdout/stderr to avoid polluting the terminal with the\n browser's messages. For example, Chrome always prints things like \"Created\n new window in existing browser session\", and those get on the user's way.\n\n url : str\n The URL. Must include the protocol.\n\n \"\"\"\n # Treat Windows separately because:\n # 1. /dev/null doesn't exist.\n # 2. subprocess.Popen(['start', url]) doesn't actually pop up the\n # browser even though 'start url' works from the command prompt.\n # Fun!\n # Also, use webbrowser if we are on Linux and xdg-open is not installed.\n #\n # We don't use the webbrowser module on Linux and Mac because some browsers\n # (ahem... Chrome) always print \"Opening in existing browser session\" to\n # the terminal, which is spammy and annoying. So instead we start the\n # browser ourselves and send all its output to /dev/null.\n\n if env_util.IS_WINDOWS:\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_LINUX_OR_BSD:\n if env_util.is_executable_in_path(\"xdg-open\"):\n _open_browser_with_command(\"xdg-open\", url)\n return\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_DARWIN:\n _open_browser_with_command(\"open\", url)\n return\n\n import platform\n\n raise Error('Cannot open browser in platform \"%s\"' % platform.system())\n\n\ndef _open_browser_with_webbrowser(url):\n import webbrowser\n\n webbrowser.open(url)\n\n\ndef _open_browser_with_command(command, url):\n cmd_line = [command, url]\n with open(os.devnull, \"w\") as devnull:\n subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT)\n\n\ndef _maybe_tuple_to_list(item: Any) -> Any:\n \"\"\"Convert a tuple to a list. Leave as is if it's not a tuple.\"\"\"\n if isinstance(item, tuple):\n return list(item)\n return item\n\n\ndef repr_(self: Any) -> str:\n \"\"\"A clean repr for a class, excluding both values that are likely defaults,\n and those explicitly default for dataclasses.\n \"\"\"\n classname = self.__class__.__name__\n # Most of the falsey value, but excluding 0 and 0.0, since those often have\n # semantic meaning within streamlit.\n defaults: list[Any] = [None, \"\", False, [], set(), dict()]\n if dataclasses.is_dataclass(self):\n fields_vals = (\n (f.name, getattr(self, f.name))\n for f in dataclasses.fields(self)\n if f.repr\n and getattr(self, f.name) != f.default\n and getattr(self, f.name) not in defaults\n )\n else:\n fields_vals = ((f, v) for (f, v) in self.__dict__.items() if v not in defaults)\n\n field_reprs = \", \".join(f\"{field}={value!r}\" for field, value in fields_vals)\n return f\"{classname}({field_reprs})\"\n\n\n_Value = TypeVar(\"_Value\")\n\n\ndef index_(iterable: Iterable[_Value], x: _Value) -> int:\n \"\"\"Return zero-based index of the first item whose value is equal to x.\n Raises a ValueError if there is no such item.\n\n We need a custom implementation instead of the built-in list .index() to\n be compatible with NumPy array and Pandas Series.\n\n Parameters\n ----------\n iterable : list, tuple, numpy.ndarray, pandas.Series\n x : Any\n\n Returns\n -------\n int\n \"\"\"\n for i, value in enumerate(iterable):\n if x == value:\n return i\n elif isinstance(value, float) and isinstance(x, float):\n if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))\n\n\n_Key = TypeVar(\"_Key\", bound=str)\n\n\ndef lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]:\n return {k.lower().strip(): v for k, v in dict.items()}\n\n\n# TODO: Move this into errors.py? Replace with StreamlitAPIException?\nclass Error(Exception):\n pass\n\n\ndef calc_md5(s: Union[bytes, str]) -> str:\n \"\"\"Return the md5 hash of the given string.\"\"\"\n h = hashlib.new(\"md5\")\n\n b = s.encode(\"utf-8\") if isinstance(s, str) else s\n\n h.update(b)\n return h.hexdigest()\n\n\ndef exclude_key_query_params(\n query_params: Dict[str, List[str]], keys_to_exclude: List[str]\n) -> Dict[str, List[str]]:\n \"\"\"Returns new object query_params : Dict[str, List[str]], but without keys defined with keys_to_drop : List[str].\"\"\"\n return {\n key: value\n for key, value in query_params.items()\n if key.lower() not in keys_to_exclude\n }\n\n\ndef extract_key_query_params(\n query_params: Dict[str, List[str]], param_key: str\n) -> Set[str]:\n \"\"\"Extracts key (case-insensitive) query params from Dict, and returns them as Set of str.\"\"\"\n return set(\n [\n item.lower()\n for sublist in [\n [value.lower() for value in query_params[key]]\n for key in query_params.keys()\n if key.lower() == param_key and query_params.get(key)\n ]\n for item in sublist\n ]\n )\n", "path": "lib/streamlit/util.py"}]}
3,026
220
gh_patches_debug_2189
rasdani/github-patches
git_diff
googleapis__python-bigquery-648
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- chore: use paths for --cov arguments in noxfile https://github.com/googleapis/python-bigquery/blob/6a48e80bc7d347f381b181f4cf81fef105d0ad0d/noxfile.py#L80-L81 To pull https://github.com/googleapis/synthtool/pull/859 from templates. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `noxfile.py` Content: ``` 1 # Copyright 2016 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 import pathlib 18 import os 19 import shutil 20 21 import nox 22 23 24 PYTYPE_VERSION = "pytype==2021.4.9" 25 BLACK_VERSION = "black==19.10b0" 26 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py") 27 28 DEFAULT_PYTHON_VERSION = "3.8" 29 SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] 30 UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] 31 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() 32 33 # 'docfx' is excluded since it only needs to run in 'docs-presubmit' 34 nox.options.sessions = [ 35 "unit_noextras", 36 "unit", 37 "system", 38 "snippets", 39 "cover", 40 "lint", 41 "lint_setup_py", 42 "blacken", 43 "pytype", 44 "docs", 45 ] 46 47 48 def default(session, install_extras=True): 49 """Default unit test session. 50 51 This is intended to be run **without** an interpreter set, so 52 that the current ``python`` (on the ``PATH``) or the version of 53 Python corresponding to the ``nox`` binary the ``PATH`` can 54 run the tests. 55 """ 56 constraints_path = str( 57 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" 58 ) 59 60 # Install all test dependencies, then install local packages in-place. 61 session.install( 62 "mock", 63 "pytest", 64 "google-cloud-testutils", 65 "pytest-cov", 66 "freezegun", 67 "-c", 68 constraints_path, 69 ) 70 71 install_target = ".[all]" if install_extras else "." 72 session.install("-e", install_target, "-c", constraints_path) 73 74 session.install("ipython", "-c", constraints_path) 75 76 # Run py.test against the unit tests. 77 session.run( 78 "py.test", 79 "--quiet", 80 "--cov=google.cloud.bigquery", 81 "--cov=tests.unit", 82 "--cov-append", 83 "--cov-config=.coveragerc", 84 "--cov-report=", 85 "--cov-fail-under=0", 86 os.path.join("tests", "unit"), 87 *session.posargs, 88 ) 89 90 91 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS) 92 def unit(session): 93 """Run the unit test suite.""" 94 default(session) 95 96 97 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS[-1]) 98 def unit_noextras(session): 99 """Run the unit test suite.""" 100 default(session, install_extras=False) 101 102 103 @nox.session(python=DEFAULT_PYTHON_VERSION) 104 def pytype(session): 105 """Run type checks.""" 106 session.install("-e", ".[all]") 107 session.install("ipython") 108 session.install(PYTYPE_VERSION) 109 session.run("pytype") 110 111 112 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) 113 def system(session): 114 """Run the system test suite.""" 115 116 constraints_path = str( 117 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" 118 ) 119 120 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. 121 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": 122 session.skip("RUN_SYSTEM_TESTS is set to false, skipping") 123 124 # Sanity check: Only run system tests if the environment variable is set. 125 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): 126 session.skip("Credentials must be set via environment variable.") 127 128 # Use pre-release gRPC for system tests. 129 session.install("--pre", "grpcio", "-c", constraints_path) 130 131 # Install all test dependencies, then install local packages in place. 132 session.install( 133 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path 134 ) 135 if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true": 136 # mTLS test requires pyopenssl and latest google-cloud-storage 137 session.install("google-cloud-storage", "pyopenssl") 138 else: 139 session.install("google-cloud-storage", "-c", constraints_path) 140 141 session.install("-e", ".[all]", "-c", constraints_path) 142 session.install("ipython", "-c", constraints_path) 143 144 # Run py.test against the system tests. 145 session.run("py.test", "--quiet", os.path.join("tests", "system"), *session.posargs) 146 147 148 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) 149 def snippets(session): 150 """Run the snippets test suite.""" 151 152 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true. 153 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false": 154 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping") 155 156 # Sanity check: Only run snippets tests if the environment variable is set. 157 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): 158 session.skip("Credentials must be set via environment variable.") 159 160 constraints_path = str( 161 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" 162 ) 163 164 # Install all test dependencies, then install local packages in place. 165 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) 166 session.install("google-cloud-storage", "-c", constraints_path) 167 session.install("grpcio", "-c", constraints_path) 168 169 session.install("-e", ".[all]", "-c", constraints_path) 170 171 # Run py.test against the snippets tests. 172 # Skip tests in samples/snippets, as those are run in a different session 173 # using the nox config from that directory. 174 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs) 175 session.run( 176 "py.test", 177 "samples", 178 "--ignore=samples/snippets", 179 "--ignore=samples/geography", 180 *session.posargs, 181 ) 182 183 184 @nox.session(python=DEFAULT_PYTHON_VERSION) 185 def cover(session): 186 """Run the final coverage report. 187 188 This outputs the coverage report aggregating coverage from the unit 189 test runs (not system test runs), and then erases coverage data. 190 """ 191 session.install("coverage", "pytest-cov") 192 session.run("coverage", "report", "--show-missing", "--fail-under=100") 193 session.run("coverage", "erase") 194 195 196 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) 197 def prerelease_deps(session): 198 """Run all tests with prerelease versions of dependencies installed. 199 200 https://github.com/googleapis/python-bigquery/issues/95 201 """ 202 # PyArrow prerelease packages are published to an alternative PyPI host. 203 # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages 204 session.install( 205 "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow" 206 ) 207 session.install("--pre", "grpcio", "pandas") 208 session.install( 209 "freezegun", 210 "google-cloud-storage", 211 "google-cloud-testutils", 212 "IPython", 213 "mock", 214 "psutil", 215 "pytest", 216 "pytest-cov", 217 ) 218 session.install("-e", ".[all]") 219 220 # Print out prerelease package versions. 221 session.run("python", "-c", "import grpc; print(grpc.__version__)") 222 session.run("python", "-c", "import pandas; print(pandas.__version__)") 223 session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)") 224 225 # Run all tests, except a few samples tests which require extra dependencies. 226 session.run("py.test", "tests/unit") 227 session.run("py.test", "tests/system") 228 session.run("py.test", "samples/tests") 229 230 231 @nox.session(python=DEFAULT_PYTHON_VERSION) 232 def lint(session): 233 """Run linters. 234 235 Returns a failure if the linters find linting errors or sufficiently 236 serious code quality issues. 237 """ 238 239 session.install("flake8", BLACK_VERSION) 240 session.install("-e", ".") 241 session.run("flake8", os.path.join("google", "cloud", "bigquery")) 242 session.run("flake8", "tests") 243 session.run("flake8", os.path.join("docs", "samples")) 244 session.run("flake8", os.path.join("docs", "snippets.py")) 245 session.run("black", "--check", *BLACK_PATHS) 246 247 248 @nox.session(python=DEFAULT_PYTHON_VERSION) 249 def lint_setup_py(session): 250 """Verify that setup.py is valid (including RST check).""" 251 252 session.install("docutils", "Pygments") 253 session.run("python", "setup.py", "check", "--restructuredtext", "--strict") 254 255 256 @nox.session(python="3.6") 257 def blacken(session): 258 """Run black. 259 Format code to uniform standard. 260 261 This currently uses Python 3.6 due to the automated Kokoro run of synthtool. 262 That run uses an image that doesn't have 3.6 installed. Before updating this 263 check the state of the `gcp_ubuntu_config` we use for that Kokoro run. 264 """ 265 session.install(BLACK_VERSION) 266 session.run("black", *BLACK_PATHS) 267 268 269 @nox.session(python=DEFAULT_PYTHON_VERSION) 270 def docs(session): 271 """Build the docs.""" 272 273 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme") 274 session.install("google-cloud-storage") 275 session.install("-e", ".[all]") 276 277 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) 278 session.run( 279 "sphinx-build", 280 "-W", # warnings as errors 281 "-T", # show full traceback on exception 282 "-N", # no colors 283 "-b", 284 "html", 285 "-d", 286 os.path.join("docs", "_build", "doctrees", ""), 287 os.path.join("docs", ""), 288 os.path.join("docs", "_build", "html", ""), 289 ) 290 291 292 @nox.session(python=DEFAULT_PYTHON_VERSION) 293 def docfx(session): 294 """Build the docfx yaml files for this library.""" 295 296 session.install("-e", ".") 297 session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml") 298 299 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) 300 session.run( 301 "sphinx-build", 302 "-T", # show full traceback on exception 303 "-N", # no colors 304 "-D", 305 ( 306 "extensions=sphinx.ext.autodoc," 307 "sphinx.ext.autosummary," 308 "docfx_yaml.extension," 309 "sphinx.ext.intersphinx," 310 "sphinx.ext.coverage," 311 "sphinx.ext.napoleon," 312 "sphinx.ext.todo," 313 "sphinx.ext.viewcode," 314 "recommonmark" 315 ), 316 "-b", 317 "html", 318 "-d", 319 os.path.join("docs", "_build", "doctrees", ""), 320 os.path.join("docs", ""), 321 os.path.join("docs", "_build", "html", ""), 322 ) 323 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -77,8 +77,8 @@ session.run( "py.test", "--quiet", - "--cov=google.cloud.bigquery", - "--cov=tests.unit", + "--cov=google/cloud/bigquery", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=",
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -77,8 +77,8 @@\n session.run(\n \"py.test\",\n \"--quiet\",\n- \"--cov=google.cloud.bigquery\",\n- \"--cov=tests.unit\",\n+ \"--cov=google/cloud/bigquery\",\n+ \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n", "issue": "chore: use paths for --cov arguments in noxfile\nhttps://github.com/googleapis/python-bigquery/blob/6a48e80bc7d347f381b181f4cf81fef105d0ad0d/noxfile.py#L80-L81\r\n\r\nTo pull https://github.com/googleapis/synthtool/pull/859 from templates.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google/cloud/bigquery\",\n \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
3,816
109
gh_patches_debug_34454
rasdani/github-patches
git_diff
mindsdb__mindsdb-1336
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can`t upload big files with waitress * Mindsdb version you tried to install: latest * Additional info if applicable: waitress==2.0.0 Waitress consumes too much memory when uploading a file. 16Gb of memory is not enough to upload 1.1Gb file --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/api/http/namespaces/datasource.py` Content: ``` 1 import os 2 import threading 3 import tempfile 4 import re 5 import multipart 6 7 from dateutil.parser import parse 8 from flask import request, send_file 9 from flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'} 10 from flask import current_app as ca 11 12 from mindsdb.utilities.log import log 13 from mindsdb.api.http.namespaces.configs.datasources import ns_conf 14 from mindsdb.api.http.namespaces.entitites.datasources.datasource import ( 15 datasource_metadata, 16 put_datasource_params 17 ) 18 from mindsdb.api.http.namespaces.entitites.datasources.datasource_data import ( 19 get_datasource_rows_params, 20 datasource_rows_metadata 21 ) 22 from mindsdb.api.http.namespaces.entitites.datasources.datasource_files import ( 23 put_datasource_file_params 24 ) 25 from mindsdb.api.http.namespaces.entitites.datasources.datasource_missed_files import ( 26 datasource_missed_files_metadata, 27 get_datasource_missed_files_params 28 ) 29 from mindsdb.interfaces.database.integrations import get_db_integration 30 31 32 def parse_filter(key, value): 33 result = re.search(r'filter(_*.*)\[(.*)\]', key) 34 operator = result.groups()[0].strip('_') or 'like' 35 field = result.groups()[1] 36 operators_map = { 37 'like': 'like', 38 'in': 'in', 39 'nin': 'not in', 40 'gt': '>', 41 'lt': '<', 42 'gte': '>=', 43 'lte': '<=', 44 'eq': '=', 45 'neq': '!=' 46 } 47 if operator not in operators_map: 48 return None 49 operator = operators_map[operator] 50 return [field, operator, value] 51 52 53 @ns_conf.route('/') 54 class DatasourcesList(Resource): 55 @ns_conf.doc('get_datasources_list') 56 @ns_conf.marshal_list_with(datasource_metadata) 57 def get(self): 58 '''List all datasources''' 59 return request.default_store.get_datasources() 60 61 62 @ns_conf.route('/<name>') 63 @ns_conf.param('name', 'Datasource name') 64 class Datasource(Resource): 65 @ns_conf.doc('get_datasource') 66 @ns_conf.marshal_with(datasource_metadata) 67 def get(self, name): 68 '''return datasource metadata''' 69 ds = request.default_store.get_datasource(name) 70 if ds is not None: 71 return ds 72 return '', 404 73 74 @ns_conf.doc('delete_datasource') 75 def delete(self, name): 76 '''delete datasource''' 77 78 try: 79 request.default_store.delete_datasource(name) 80 except Exception as e: 81 log.error(e) 82 abort(400, str(e)) 83 return '', 200 84 85 @ns_conf.doc('put_datasource', params=put_datasource_params) 86 @ns_conf.marshal_with(datasource_metadata) 87 def put(self, name): 88 '''add new datasource''' 89 data = {} 90 91 def on_field(field): 92 name = field.field_name.decode() 93 value = field.value.decode() 94 data[name] = value 95 96 file_object = None 97 98 def on_file(file): 99 nonlocal file_object 100 data['file'] = file.file_name.decode() 101 file_object = file.file_object 102 103 temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_') 104 105 if request.headers['Content-Type'].startswith('multipart/form-data'): 106 parser = multipart.create_form_parser( 107 headers=request.headers, 108 on_field=on_field, 109 on_file=on_file, 110 config={ 111 'UPLOAD_DIR': temp_dir_path.encode(), # bytes required 112 'UPLOAD_KEEP_FILENAME': True, 113 'UPLOAD_KEEP_EXTENSIONS': True, 114 'MAX_MEMORY_FILE_SIZE': 0 115 } 116 ) 117 118 while True: 119 chunk = request.stream.read(8192) 120 if not chunk: 121 break 122 parser.write(chunk) 123 parser.finalize() 124 parser.close() 125 126 if file_object is not None and not file_object.closed: 127 file_object.close() 128 else: 129 data = request.json 130 131 if 'query' in data: 132 integration_id = request.json['integration_id'] 133 integration = get_db_integration(integration_id, request.company_id) 134 if integration is None: 135 abort(400, f"{integration_id} integration doesn't exist") 136 137 if integration['type'] == 'mongodb': 138 data['find'] = data['query'] 139 140 request.default_store.save_datasource(name, integration_id, data) 141 os.rmdir(temp_dir_path) 142 return request.default_store.get_datasource(name) 143 144 ds_name = data['name'] if 'name' in data else name 145 source = data['source'] if 'source' in data else name 146 source_type = data['source_type'] 147 148 if source_type == 'file': 149 file_path = os.path.join(temp_dir_path, data['file']) 150 else: 151 file_path = None 152 153 request.default_store.save_datasource(ds_name, source_type, source, file_path) 154 os.rmdir(temp_dir_path) 155 156 return request.default_store.get_datasource(ds_name) 157 158 159 def analyzing_thread(name, default_store): 160 try: 161 from mindsdb.interfaces.storage.db import session 162 analysis = default_store.start_analysis(name) 163 session.close() 164 except Exception as e: 165 log.error(e) 166 167 168 @ns_conf.route('/<name>/analyze') 169 @ns_conf.param('name', 'Datasource name') 170 class Analyze(Resource): 171 @ns_conf.doc('analyse_dataset') 172 def get(self, name): 173 analysis = request.default_store.get_analysis(name) 174 if analysis is not None: 175 return analysis, 200 176 177 178 ds = request.default_store.get_datasource(name) 179 if ds is None: 180 log.error('No valid datasource given') 181 abort(400, 'No valid datasource given') 182 183 x = threading.Thread(target=analyzing_thread, args=(name, request.default_store)) 184 x.start() 185 return {'status': 'analyzing'}, 200 186 187 188 @ns_conf.route('/<name>/analyze_refresh') 189 @ns_conf.param('name', 'Datasource name') 190 class Analyze2(Resource): 191 @ns_conf.doc('analyze_refresh_dataset') 192 def get(self, name): 193 analysis = request.default_store.get_analysis(name) 194 if analysis is not None: 195 return analysis, 200 196 197 ds = request.default_store.get_datasource(name) 198 if ds is None: 199 log.error('No valid datasource given') 200 abort(400, 'No valid datasource given') 201 202 x = threading.Thread(target=analyzing_thread, args=(name, request.default_store)) 203 x.start() 204 return {'status': 'analyzing'}, 200 205 206 207 @ns_conf.route('/<name>/data/') 208 @ns_conf.param('name', 'Datasource name') 209 class DatasourceData(Resource): 210 @ns_conf.doc('get_datasource_data', params=get_datasource_rows_params) 211 @ns_conf.marshal_with(datasource_rows_metadata) 212 def get(self, name): 213 '''return data rows''' 214 ds = request.default_store.get_datasource(name) 215 if ds is None: 216 abort(400, 'No valid datasource given') 217 218 params = { 219 'page[size]': None, 220 'page[offset]': None 221 } 222 where = [] 223 for key, value in request.args.items(): 224 if key == 'page[size]': 225 params['page[size]'] = int(value) 226 if key == 'page[offset]': 227 params['page[offset]'] = int(value) 228 elif key.startswith('filter'): 229 param = parse_filter(key, value) 230 if param is None: 231 abort(400, f'Not valid filter "{key}"') 232 where.append(param) 233 234 data_dict = request.default_store.get_data(name, where, params['page[size]'], params['page[offset]']) 235 return data_dict, 200 236 237 238 @ns_conf.route('/<name>/download') 239 @ns_conf.param('name', 'Datasource name') 240 class DatasourceMissedFilesDownload(Resource): 241 @ns_conf.doc('get_datasource_download') 242 def get(self, name): 243 '''download uploaded file''' 244 ds = request.default_store.get_datasource(name) 245 if not ds: 246 abort(404, "{} not found".format(name)) 247 # force download from s3 248 request.default_store.get_datasource_obj(name) 249 if not os.path.exists(ds['source']): 250 abort(404, "{} not found".format(name)) 251 252 return send_file(os.path.abspath(ds['source']), as_attachment=True) 253 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py --- a/mindsdb/api/http/namespaces/datasource.py +++ b/mindsdb/api/http/namespaces/datasource.py @@ -3,6 +3,8 @@ import tempfile import re import multipart +import zipfile +import tarfile from dateutil.parser import parse from flask import request, send_file @@ -10,6 +12,7 @@ from flask import current_app as ca from mindsdb.utilities.log import log +from mindsdb.api.http.utils import http_error from mindsdb.api.http.namespaces.configs.datasources import ns_conf from mindsdb.api.http.namespaces.entitites.datasources.datasource import ( datasource_metadata, @@ -147,6 +150,24 @@ if source_type == 'file': file_path = os.path.join(temp_dir_path, data['file']) + lp = file_path.lower() + if lp.endswith(('.zip', '.tar.gz')): + if lp.endswith('.zip'): + with zipfile.ZipFile(file_path) as f: + f.extractall(temp_dir_path) + elif lp.endswith('.tar.gz'): + with tarfile.open(file_path) as f: + f.extractall(temp_dir_path) + os.remove(file_path) + files = os.listdir(temp_dir_path) + if len(files) != 1: + os.rmdir(temp_dir_path) + return http_error(400, 'Wrong content.', 'Archive must contain only one data file.') + file_path = os.path.join(temp_dir_path, files[0]) + source = files[0] + if not os.path.isfile(file_path): + os.rmdir(temp_dir_path) + return http_error(400, 'Wrong content.', 'Archive must contain data file in root.') else: file_path = None @@ -174,7 +195,6 @@ if analysis is not None: return analysis, 200 - ds = request.default_store.get_datasource(name) if ds is None: log.error('No valid datasource given')
{"golden_diff": "diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py\n--- a/mindsdb/api/http/namespaces/datasource.py\n+++ b/mindsdb/api/http/namespaces/datasource.py\n@@ -3,6 +3,8 @@\n import tempfile\n import re\n import multipart\n+import zipfile\n+import tarfile\n \n from dateutil.parser import parse\n from flask import request, send_file\n@@ -10,6 +12,7 @@\n from flask import current_app as ca\n \n from mindsdb.utilities.log import log\n+from mindsdb.api.http.utils import http_error\n from mindsdb.api.http.namespaces.configs.datasources import ns_conf\n from mindsdb.api.http.namespaces.entitites.datasources.datasource import (\n datasource_metadata,\n@@ -147,6 +150,24 @@\n \n if source_type == 'file':\n file_path = os.path.join(temp_dir_path, data['file'])\n+ lp = file_path.lower()\n+ if lp.endswith(('.zip', '.tar.gz')):\n+ if lp.endswith('.zip'):\n+ with zipfile.ZipFile(file_path) as f:\n+ f.extractall(temp_dir_path)\n+ elif lp.endswith('.tar.gz'):\n+ with tarfile.open(file_path) as f:\n+ f.extractall(temp_dir_path)\n+ os.remove(file_path)\n+ files = os.listdir(temp_dir_path)\n+ if len(files) != 1:\n+ os.rmdir(temp_dir_path)\n+ return http_error(400, 'Wrong content.', 'Archive must contain only one data file.')\n+ file_path = os.path.join(temp_dir_path, files[0])\n+ source = files[0]\n+ if not os.path.isfile(file_path):\n+ os.rmdir(temp_dir_path)\n+ return http_error(400, 'Wrong content.', 'Archive must contain data file in root.')\n else:\n file_path = None\n \n@@ -174,7 +195,6 @@\n if analysis is not None:\n return analysis, 200\n \n-\n ds = request.default_store.get_datasource(name)\n if ds is None:\n log.error('No valid datasource given')\n", "issue": "Can`t upload big files with waitress\n* Mindsdb version you tried to install: latest\r\n* Additional info if applicable: waitress==2.0.0\r\n\r\nWaitress consumes too much memory when uploading a file. 16Gb of memory is not enough to upload 1.1Gb file \n", "before_files": [{"content": "import os\nimport threading\nimport tempfile\nimport re\nimport multipart\n\nfrom dateutil.parser import parse\nfrom flask import request, send_file\nfrom flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'}\nfrom flask import current_app as ca\n\nfrom mindsdb.utilities.log import log\nfrom mindsdb.api.http.namespaces.configs.datasources import ns_conf\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource import (\n datasource_metadata,\n put_datasource_params\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_data import (\n get_datasource_rows_params,\n datasource_rows_metadata\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_files import (\n put_datasource_file_params\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_missed_files import (\n datasource_missed_files_metadata,\n get_datasource_missed_files_params\n)\nfrom mindsdb.interfaces.database.integrations import get_db_integration\n\n\ndef parse_filter(key, value):\n result = re.search(r'filter(_*.*)\\[(.*)\\]', key)\n operator = result.groups()[0].strip('_') or 'like'\n field = result.groups()[1]\n operators_map = {\n 'like': 'like',\n 'in': 'in',\n 'nin': 'not in',\n 'gt': '>',\n 'lt': '<',\n 'gte': '>=',\n 'lte': '<=',\n 'eq': '=',\n 'neq': '!='\n }\n if operator not in operators_map:\n return None\n operator = operators_map[operator]\n return [field, operator, value]\n\n\n@ns_conf.route('/')\nclass DatasourcesList(Resource):\n @ns_conf.doc('get_datasources_list')\n @ns_conf.marshal_list_with(datasource_metadata)\n def get(self):\n '''List all datasources'''\n return request.default_store.get_datasources()\n\n\n@ns_conf.route('/<name>')\n@ns_conf.param('name', 'Datasource name')\nclass Datasource(Resource):\n @ns_conf.doc('get_datasource')\n @ns_conf.marshal_with(datasource_metadata)\n def get(self, name):\n '''return datasource metadata'''\n ds = request.default_store.get_datasource(name)\n if ds is not None:\n return ds\n return '', 404\n\n @ns_conf.doc('delete_datasource')\n def delete(self, name):\n '''delete datasource'''\n\n try:\n request.default_store.delete_datasource(name)\n except Exception as e:\n log.error(e)\n abort(400, str(e))\n return '', 200\n\n @ns_conf.doc('put_datasource', params=put_datasource_params)\n @ns_conf.marshal_with(datasource_metadata)\n def put(self, name):\n '''add new datasource'''\n data = {}\n\n def on_field(field):\n name = field.field_name.decode()\n value = field.value.decode()\n data[name] = value\n\n file_object = None\n\n def on_file(file):\n nonlocal file_object\n data['file'] = file.file_name.decode()\n file_object = file.file_object\n\n temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_')\n\n if request.headers['Content-Type'].startswith('multipart/form-data'):\n parser = multipart.create_form_parser(\n headers=request.headers,\n on_field=on_field,\n on_file=on_file,\n config={\n 'UPLOAD_DIR': temp_dir_path.encode(), # bytes required\n 'UPLOAD_KEEP_FILENAME': True,\n 'UPLOAD_KEEP_EXTENSIONS': True,\n 'MAX_MEMORY_FILE_SIZE': 0\n }\n )\n\n while True:\n chunk = request.stream.read(8192)\n if not chunk:\n break\n parser.write(chunk)\n parser.finalize()\n parser.close()\n\n if file_object is not None and not file_object.closed:\n file_object.close()\n else:\n data = request.json\n\n if 'query' in data:\n integration_id = request.json['integration_id']\n integration = get_db_integration(integration_id, request.company_id)\n if integration is None:\n abort(400, f\"{integration_id} integration doesn't exist\")\n\n if integration['type'] == 'mongodb':\n data['find'] = data['query']\n\n request.default_store.save_datasource(name, integration_id, data)\n os.rmdir(temp_dir_path)\n return request.default_store.get_datasource(name)\n\n ds_name = data['name'] if 'name' in data else name\n source = data['source'] if 'source' in data else name\n source_type = data['source_type']\n\n if source_type == 'file':\n file_path = os.path.join(temp_dir_path, data['file'])\n else:\n file_path = None\n\n request.default_store.save_datasource(ds_name, source_type, source, file_path)\n os.rmdir(temp_dir_path)\n\n return request.default_store.get_datasource(ds_name)\n\n\ndef analyzing_thread(name, default_store):\n try:\n from mindsdb.interfaces.storage.db import session\n analysis = default_store.start_analysis(name)\n session.close()\n except Exception as e:\n log.error(e)\n\n\n@ns_conf.route('/<name>/analyze')\n@ns_conf.param('name', 'Datasource name')\nclass Analyze(Resource):\n @ns_conf.doc('analyse_dataset')\n def get(self, name):\n analysis = request.default_store.get_analysis(name)\n if analysis is not None:\n return analysis, 200\n\n\n ds = request.default_store.get_datasource(name)\n if ds is None:\n log.error('No valid datasource given')\n abort(400, 'No valid datasource given')\n\n x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))\n x.start()\n return {'status': 'analyzing'}, 200\n\n\n@ns_conf.route('/<name>/analyze_refresh')\n@ns_conf.param('name', 'Datasource name')\nclass Analyze2(Resource):\n @ns_conf.doc('analyze_refresh_dataset')\n def get(self, name):\n analysis = request.default_store.get_analysis(name)\n if analysis is not None:\n return analysis, 200\n\n ds = request.default_store.get_datasource(name)\n if ds is None:\n log.error('No valid datasource given')\n abort(400, 'No valid datasource given')\n\n x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))\n x.start()\n return {'status': 'analyzing'}, 200\n\n\n@ns_conf.route('/<name>/data/')\n@ns_conf.param('name', 'Datasource name')\nclass DatasourceData(Resource):\n @ns_conf.doc('get_datasource_data', params=get_datasource_rows_params)\n @ns_conf.marshal_with(datasource_rows_metadata)\n def get(self, name):\n '''return data rows'''\n ds = request.default_store.get_datasource(name)\n if ds is None:\n abort(400, 'No valid datasource given')\n\n params = {\n 'page[size]': None,\n 'page[offset]': None\n }\n where = []\n for key, value in request.args.items():\n if key == 'page[size]':\n params['page[size]'] = int(value)\n if key == 'page[offset]':\n params['page[offset]'] = int(value)\n elif key.startswith('filter'):\n param = parse_filter(key, value)\n if param is None:\n abort(400, f'Not valid filter \"{key}\"')\n where.append(param)\n\n data_dict = request.default_store.get_data(name, where, params['page[size]'], params['page[offset]'])\n return data_dict, 200\n\n\n@ns_conf.route('/<name>/download')\n@ns_conf.param('name', 'Datasource name')\nclass DatasourceMissedFilesDownload(Resource):\n @ns_conf.doc('get_datasource_download')\n def get(self, name):\n '''download uploaded file'''\n ds = request.default_store.get_datasource(name)\n if not ds:\n abort(404, \"{} not found\".format(name))\n # force download from s3\n request.default_store.get_datasource_obj(name)\n if not os.path.exists(ds['source']):\n abort(404, \"{} not found\".format(name))\n\n return send_file(os.path.abspath(ds['source']), as_attachment=True)\n", "path": "mindsdb/api/http/namespaces/datasource.py"}], "after_files": [{"content": "import os\nimport threading\nimport tempfile\nimport re\nimport multipart\nimport zipfile\nimport tarfile\n\nfrom dateutil.parser import parse\nfrom flask import request, send_file\nfrom flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'}\nfrom flask import current_app as ca\n\nfrom mindsdb.utilities.log import log\nfrom mindsdb.api.http.utils import http_error\nfrom mindsdb.api.http.namespaces.configs.datasources import ns_conf\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource import (\n datasource_metadata,\n put_datasource_params\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_data import (\n get_datasource_rows_params,\n datasource_rows_metadata\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_files import (\n put_datasource_file_params\n)\nfrom mindsdb.api.http.namespaces.entitites.datasources.datasource_missed_files import (\n datasource_missed_files_metadata,\n get_datasource_missed_files_params\n)\nfrom mindsdb.interfaces.database.integrations import get_db_integration\n\n\ndef parse_filter(key, value):\n result = re.search(r'filter(_*.*)\\[(.*)\\]', key)\n operator = result.groups()[0].strip('_') or 'like'\n field = result.groups()[1]\n operators_map = {\n 'like': 'like',\n 'in': 'in',\n 'nin': 'not in',\n 'gt': '>',\n 'lt': '<',\n 'gte': '>=',\n 'lte': '<=',\n 'eq': '=',\n 'neq': '!='\n }\n if operator not in operators_map:\n return None\n operator = operators_map[operator]\n return [field, operator, value]\n\n\n@ns_conf.route('/')\nclass DatasourcesList(Resource):\n @ns_conf.doc('get_datasources_list')\n @ns_conf.marshal_list_with(datasource_metadata)\n def get(self):\n '''List all datasources'''\n return request.default_store.get_datasources()\n\n\n@ns_conf.route('/<name>')\n@ns_conf.param('name', 'Datasource name')\nclass Datasource(Resource):\n @ns_conf.doc('get_datasource')\n @ns_conf.marshal_with(datasource_metadata)\n def get(self, name):\n '''return datasource metadata'''\n ds = request.default_store.get_datasource(name)\n if ds is not None:\n return ds\n return '', 404\n\n @ns_conf.doc('delete_datasource')\n def delete(self, name):\n '''delete datasource'''\n\n try:\n request.default_store.delete_datasource(name)\n except Exception as e:\n log.error(e)\n abort(400, str(e))\n return '', 200\n\n @ns_conf.doc('put_datasource', params=put_datasource_params)\n @ns_conf.marshal_with(datasource_metadata)\n def put(self, name):\n '''add new datasource'''\n data = {}\n\n def on_field(field):\n name = field.field_name.decode()\n value = field.value.decode()\n data[name] = value\n\n file_object = None\n\n def on_file(file):\n nonlocal file_object\n data['file'] = file.file_name.decode()\n file_object = file.file_object\n\n temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_')\n\n if request.headers['Content-Type'].startswith('multipart/form-data'):\n parser = multipart.create_form_parser(\n headers=request.headers,\n on_field=on_field,\n on_file=on_file,\n config={\n 'UPLOAD_DIR': temp_dir_path.encode(), # bytes required\n 'UPLOAD_KEEP_FILENAME': True,\n 'UPLOAD_KEEP_EXTENSIONS': True,\n 'MAX_MEMORY_FILE_SIZE': 0\n }\n )\n\n while True:\n chunk = request.stream.read(8192)\n if not chunk:\n break\n parser.write(chunk)\n parser.finalize()\n parser.close()\n\n if file_object is not None and not file_object.closed:\n file_object.close()\n else:\n data = request.json\n\n if 'query' in data:\n integration_id = request.json['integration_id']\n integration = get_db_integration(integration_id, request.company_id)\n if integration is None:\n abort(400, f\"{integration_id} integration doesn't exist\")\n\n if integration['type'] == 'mongodb':\n data['find'] = data['query']\n\n request.default_store.save_datasource(name, integration_id, data)\n os.rmdir(temp_dir_path)\n return request.default_store.get_datasource(name)\n\n ds_name = data['name'] if 'name' in data else name\n source = data['source'] if 'source' in data else name\n source_type = data['source_type']\n\n if source_type == 'file':\n file_path = os.path.join(temp_dir_path, data['file'])\n lp = file_path.lower()\n if lp.endswith(('.zip', '.tar.gz')):\n if lp.endswith('.zip'):\n with zipfile.ZipFile(file_path) as f:\n f.extractall(temp_dir_path)\n elif lp.endswith('.tar.gz'):\n with tarfile.open(file_path) as f:\n f.extractall(temp_dir_path)\n os.remove(file_path)\n files = os.listdir(temp_dir_path)\n if len(files) != 1:\n os.rmdir(temp_dir_path)\n return http_error(400, 'Wrong content.', 'Archive must contain only one data file.')\n file_path = os.path.join(temp_dir_path, files[0])\n source = files[0]\n if not os.path.isfile(file_path):\n os.rmdir(temp_dir_path)\n return http_error(400, 'Wrong content.', 'Archive must contain data file in root.')\n else:\n file_path = None\n\n request.default_store.save_datasource(ds_name, source_type, source, file_path)\n os.rmdir(temp_dir_path)\n\n return request.default_store.get_datasource(ds_name)\n\n\ndef analyzing_thread(name, default_store):\n try:\n from mindsdb.interfaces.storage.db import session\n analysis = default_store.start_analysis(name)\n session.close()\n except Exception as e:\n log.error(e)\n\n\n@ns_conf.route('/<name>/analyze')\n@ns_conf.param('name', 'Datasource name')\nclass Analyze(Resource):\n @ns_conf.doc('analyse_dataset')\n def get(self, name):\n analysis = request.default_store.get_analysis(name)\n if analysis is not None:\n return analysis, 200\n\n ds = request.default_store.get_datasource(name)\n if ds is None:\n log.error('No valid datasource given')\n abort(400, 'No valid datasource given')\n\n x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))\n x.start()\n return {'status': 'analyzing'}, 200\n\n\n@ns_conf.route('/<name>/analyze_refresh')\n@ns_conf.param('name', 'Datasource name')\nclass Analyze2(Resource):\n @ns_conf.doc('analyze_refresh_dataset')\n def get(self, name):\n analysis = request.default_store.get_analysis(name)\n if analysis is not None:\n return analysis, 200\n\n ds = request.default_store.get_datasource(name)\n if ds is None:\n log.error('No valid datasource given')\n abort(400, 'No valid datasource given')\n\n x = threading.Thread(target=analyzing_thread, args=(name, request.default_store))\n x.start()\n return {'status': 'analyzing'}, 200\n\n\n@ns_conf.route('/<name>/data/')\n@ns_conf.param('name', 'Datasource name')\nclass DatasourceData(Resource):\n @ns_conf.doc('get_datasource_data', params=get_datasource_rows_params)\n @ns_conf.marshal_with(datasource_rows_metadata)\n def get(self, name):\n '''return data rows'''\n ds = request.default_store.get_datasource(name)\n if ds is None:\n abort(400, 'No valid datasource given')\n\n params = {\n 'page[size]': None,\n 'page[offset]': None\n }\n where = []\n for key, value in request.args.items():\n if key == 'page[size]':\n params['page[size]'] = int(value)\n if key == 'page[offset]':\n params['page[offset]'] = int(value)\n elif key.startswith('filter'):\n param = parse_filter(key, value)\n if param is None:\n abort(400, f'Not valid filter \"{key}\"')\n where.append(param)\n\n data_dict = request.default_store.get_data(name, where, params['page[size]'], params['page[offset]'])\n return data_dict, 200\n\n\n@ns_conf.route('/<name>/download')\n@ns_conf.param('name', 'Datasource name')\nclass DatasourceMissedFilesDownload(Resource):\n @ns_conf.doc('get_datasource_download')\n def get(self, name):\n '''download uploaded file'''\n ds = request.default_store.get_datasource(name)\n if not ds:\n abort(404, \"{} not found\".format(name))\n # force download from s3\n request.default_store.get_datasource_obj(name)\n if not os.path.exists(ds['source']):\n abort(404, \"{} not found\".format(name))\n\n return send_file(os.path.abspath(ds['source']), as_attachment=True)\n", "path": "mindsdb/api/http/namespaces/datasource.py"}]}
2,828
486
gh_patches_debug_38557
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fully connected layer before mean and covariance modules I am trying to do a non-linear transformation of input before forwarding it through mean and covariance modules. Before master branch was merged to priors (commit 1f5491e3edcac6497d3370c8aaef9a9362048a3e), I can add a fully connected layer before passing the input through mean and covariance modules to learn a non-linear representation. For example, the following script worked fine. ``` import math import torch import gpytorch from matplotlib import pyplot as plt from torch import optim from gpytorch.kernels import RBFKernel from gpytorch.means import ConstantMean from gpytorch.likelihoods import GaussianLikelihood from gpytorch.random_variables import GaussianRandomVariable train_x = torch.linspace(0, 1, 11) train_y = torch.sin(train_x.data * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2 class ExactGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(ExactGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(constant_bounds=(-10, 10)) self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 5)) self.fc = torch.nn.Linear(1, 2) def forward(self, x): x_ = self.fc(x) mean_x = self.mean_module(x_) covar_x = self.covar_module(x_) return GaussianRandomVariable(mean_x, covar_x) likelihood = GaussianLikelihood(log_noise_bounds=(-5, 5)) model = ExactGPModel(train_x.data, train_y.data, likelihood) model.train() likelihood.train() optimizer = torch.optim.Adam([ {'params': model.parameters()}, ], lr=0.1) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) training_iter = 1000 for i in range(training_iter): optimizer.zero_grad() output = model(train_x) loss = -mll(output, train_y) loss.backward() print('Iter %d/%d - Loss: %.3f log_lengthscale: %.3f log_noise: %.3f' % ( i + 1, training_iter, loss.data[0], model.covar_module.log_lengthscale.data[0, 0], model.likelihood.log_noise.data[0] )) optimizer.step() ``` However, now I get this runtime error because of the linear fc layer: `AttributeError: 'Linear' object has no attribute '_get_prior_for'` How can I get this to work with the latest version of gpytorch? DKL does something similar to what I am trying to do but implements AdditiveGridInducingVariationalGP and softmax likelihood, however, in my application I'd like to use ExactGP and gaussian likelihood. Is it possible to do so? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gpytorch/module.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import division 3 from __future__ import print_function 4 from __future__ import unicode_literals 5 6 import torch 7 from collections import OrderedDict 8 from torch import nn 9 from .random_variables import RandomVariable 10 from .lazy import LazyVariable 11 from .variational import VariationalStrategy 12 13 14 class Module(nn.Module): 15 def __init__(self): 16 super(Module, self).__init__() 17 self._priors = OrderedDict() 18 self._derived_priors = OrderedDict() 19 self._variational_strategies = OrderedDict() 20 21 def _get_module_and_name(self, parameter_name): 22 """Get module and name from full parameter name.""" 23 module, name = parameter_name.split(".", 1) 24 if module in self._modules: 25 return self.__getattr__(module), name 26 else: 27 raise AttributeError( 28 "Invalid parameter name {}. {} has no module {}".format(parameter_name, type(self).__name__, module) 29 ) 30 31 def _get_prior_for(self, parameter_name): 32 """ 33 Get prior for parameter 34 35 parameter_name (str): parameter name 36 """ 37 if "." in parameter_name: 38 module, parameter_name = self._get_module_and_name(parameter_name) 39 return module._get_prior_for(parameter_name) 40 else: 41 if parameter_name in self._parameters: 42 return self._priors.get(parameter_name) 43 else: 44 raise AttributeError( 45 "Module {module} has no parameter {name}".format(module=type(self).__name__, name=parameter_name) 46 ) 47 48 def _get_derived_prior(self, prior_name): 49 """ 50 Get derived prior from prior name 51 52 prior_name (str): the name of the derived prior 53 """ 54 if "." in prior_name: 55 module, prior_name = self._get_module_and_name(prior_name) 56 return module._get_derived_prior(prior_name) 57 else: 58 if prior_name in self._parameters: 59 return self._derived_priors.get(prior_name) 60 else: 61 raise AttributeError( 62 "Module {module} has no derived prior {name}".format(module=type(self).__name__, name=prior_name) 63 ) 64 65 def forward(self, *inputs, **kwargs): 66 raise NotImplementedError 67 68 def initialize(self, **kwargs): 69 """ 70 Set a value for a parameter 71 72 kwargs: (param_name, value) - parameter to initialize 73 Value can take the form of a tensor, a float, or an int 74 """ 75 for name, val in kwargs.items(): 76 if name not in self._parameters: 77 raise AttributeError("Unknown parameter {p} for {c}".format(p=name, c=self.__class__.__name__)) 78 if torch.is_tensor(val): 79 self.__getattr__(name).data.copy_(val) 80 elif isinstance(val, float) or isinstance(val, int): 81 self.__getattr__(name).data.fill_(val) 82 else: 83 raise AttributeError("Type {t} not valid to initialize parameter {p}".format(t=type(val), p=name)) 84 85 # Ensure value is contained in support of prior (if present) 86 prior = self._priors.get(name) 87 if prior is not None: 88 param = self._parameters[name] 89 if not prior.is_in_support(param): 90 raise ValueError( 91 "Value of parameter {param} not contained in support of specified prior".format(param=param) 92 ) 93 return self 94 95 def named_parameter_priors(self): 96 """ 97 Returns an iterator over module parameter priors, yielding the name of 98 the parameter, the parameter itself, as well as the associated prior 99 (excludes parameters for which no prior has been registered) 100 """ 101 for name, param in self.named_parameters(): 102 prior = self._get_prior_for(name) 103 if prior is not None: 104 yield name, param, prior 105 106 def named_derived_priors(self, memo=None, prefix=""): 107 """Returns an iterator over module derived priors, yielding both the 108 name of the prior as well as the prior, the associated parameters, and 109 the transformation callable. 110 111 Yields: 112 (string, Prior, tuple(string), callable): Tuple containing the name 113 of the prior, the prior itself, its parameters, and the transform 114 to be called on the parameters. 115 116 """ 117 if memo is None: 118 memo = set() 119 for name, (prior, pnames, tf) in self._derived_priors.items(): 120 if prior is not None and prior not in memo: 121 memo.add(prior) 122 parameters = tuple(getattr(self, pname) for pname in pnames) 123 yield prefix + ("." if prefix else "") + name, prior, parameters, tf 124 for mname, module in self.named_children(): 125 submodule_prefix = prefix + ("." if prefix else "") + mname 126 if hasattr(module, "_derived_priors"): 127 for name, prior, parameters, tf in module.named_derived_priors(memo, submodule_prefix): 128 yield name, prior, parameters, tf 129 130 def named_variational_strategies(self, memo=None, prefix=""): 131 """Returns an iterator over module variational strategies, yielding both 132 the name of the variational strategy as well as the strategy itself. 133 134 Yields: 135 (string, VariationalStrategy): Tuple containing the name of the 136 strategy and the strategy 137 138 """ 139 if memo is None: 140 memo = set() 141 for name, strategy in self._variational_strategies.items(): 142 if strategy is not None and strategy not in memo: 143 memo.add(strategy) 144 yield prefix + ("." if prefix else "") + name, strategy 145 for mname, module in self.named_children(): 146 submodule_prefix = prefix + ("." if prefix else "") + mname 147 if hasattr(module, "named_variational_strategies"): 148 for name, strategy in module.named_variational_strategies(memo, submodule_prefix): 149 yield name, strategy 150 151 def register_parameter(self, name, parameter, prior=None): 152 """ 153 Adds a parameter to the module. 154 The parameter can be accessed as an attribute using given name. 155 156 name (str): name of parameter 157 param (torch.nn.Parameter): parameter 158 prior (Prior): prior for parameter (default: None) 159 """ 160 if "_parameters" not in self.__dict__: 161 raise AttributeError("Cannot assign parameter before Module.__init__() call") 162 super(Module, self).register_parameter(name, parameter) 163 if prior is not None: 164 self.set_parameter_priors(**{name: prior}) 165 166 def register_derived_prior(self, name, prior, parameter_names, transform): 167 """ 168 Adds a derived prior to the module. 169 The prior can be accessed as an attribute using the given name. 170 171 name (str): name of the derived prior 172 prior (Prior): the prior object 173 parameter_names (tuple(str)): The parameters the transform operaters on, 174 in the same order as expected by the transform callable. 175 transform (Callable): The function called on the specified parameters. The 176 log-pdf of the prior will be evaluating on the output of this transform. 177 178 A derived prior operates on a transform of one or multiple parameters. 179 This can be used, for instance, to put a prior over the ICM Kernel 180 covariance matrix generated from covar_factor and log_var parameters. 181 182 """ 183 self.add_module(name, prior) 184 self._derived_priors[name] = (prior, tuple(parameter_names), transform) 185 186 def register_variational_strategy(self, name): 187 self._variational_strategies[name] = None 188 189 def set_parameter_priors(self, **kwargs): 190 """ 191 Set prior for a parameter. 192 The prior can be accessed as an attribute using <PARAMETER_NAME>_prior. 193 194 kwargs: (param_name, prior) - parameter to initialize 195 prior must be a gpytorch Prior 196 """ 197 for name, prior in kwargs.items(): 198 if name not in self._parameters: 199 raise AttributeError( 200 "Unknown parameter {name} for {module}".format(name=name, module=self.__class__.__name__) 201 ) 202 self.add_module("_".join([name, "prior"]), prior) 203 self._priors[name] = prior 204 return self 205 206 def variational_strategies(self): 207 for _, strategy in self.named_variational_strategies(): 208 yield strategy 209 210 def update_variational_strategy(self, name, variational_strategy): 211 if not isinstance(variational_strategy, VariationalStrategy): 212 raise RuntimeError("variational_strategy must be a VariationalStrategy") 213 if name not in self._variational_strategies.keys(): 214 raise RuntimeError("variational strategy {} not registered".format(name)) 215 self._variational_strategies[name] = variational_strategy 216 217 def __call__(self, *inputs, **kwargs): 218 outputs = self.forward(*inputs, **kwargs) 219 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable): 220 return outputs 221 for output in outputs: 222 if not (isinstance(output, RandomVariable) or torch.is_tensor(output) or isinstance(output, LazyVariable)): 223 raise RuntimeError( 224 "Output must be a RandomVariable, torch.Tensor, or LazyVariable. " 225 "Was a {}".format(input.__class__.__name__) 226 ) 227 if len(outputs) == 1: 228 outputs = outputs[0] 229 return outputs 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gpytorch/module.py b/gpytorch/module.py --- a/gpytorch/module.py +++ b/gpytorch/module.py @@ -28,40 +28,6 @@ "Invalid parameter name {}. {} has no module {}".format(parameter_name, type(self).__name__, module) ) - def _get_prior_for(self, parameter_name): - """ - Get prior for parameter - - parameter_name (str): parameter name - """ - if "." in parameter_name: - module, parameter_name = self._get_module_and_name(parameter_name) - return module._get_prior_for(parameter_name) - else: - if parameter_name in self._parameters: - return self._priors.get(parameter_name) - else: - raise AttributeError( - "Module {module} has no parameter {name}".format(module=type(self).__name__, name=parameter_name) - ) - - def _get_derived_prior(self, prior_name): - """ - Get derived prior from prior name - - prior_name (str): the name of the derived prior - """ - if "." in prior_name: - module, prior_name = self._get_module_and_name(prior_name) - return module._get_derived_prior(prior_name) - else: - if prior_name in self._parameters: - return self._derived_priors.get(prior_name) - else: - raise AttributeError( - "Module {module} has no derived prior {name}".format(module=type(self).__name__, name=prior_name) - ) - def forward(self, *inputs, **kwargs): raise NotImplementedError @@ -92,16 +58,24 @@ ) return self - def named_parameter_priors(self): + def named_parameter_priors(self, memo=None, prefix=""): """ Returns an iterator over module parameter priors, yielding the name of the parameter, the parameter itself, as well as the associated prior (excludes parameters for which no prior has been registered) """ - for name, param in self.named_parameters(): - prior = self._get_prior_for(name) - if prior is not None: - yield name, param, prior + if memo is None: + memo = set() + for name, parameter in self._parameters.items(): + if name in self._priors and self._priors[name] not in memo: + prior = self._priors[name] + memo.add(prior) + yield prefix + ("." if prefix else "") + name, parameter, prior + for mname, module in self.named_children(): + submodule_prefix = prefix + ("." if prefix else "") + mname + if hasattr(module, "named_parameter_priors"): + for name, parameter, prior in module.named_parameter_priors(memo, submodule_prefix): + yield name, parameter, prior def named_derived_priors(self, memo=None, prefix=""): """Returns an iterator over module derived priors, yielding both the
{"golden_diff": "diff --git a/gpytorch/module.py b/gpytorch/module.py\n--- a/gpytorch/module.py\n+++ b/gpytorch/module.py\n@@ -28,40 +28,6 @@\n \"Invalid parameter name {}. {} has no module {}\".format(parameter_name, type(self).__name__, module)\n )\n \n- def _get_prior_for(self, parameter_name):\n- \"\"\"\n- Get prior for parameter\n-\n- parameter_name (str): parameter name\n- \"\"\"\n- if \".\" in parameter_name:\n- module, parameter_name = self._get_module_and_name(parameter_name)\n- return module._get_prior_for(parameter_name)\n- else:\n- if parameter_name in self._parameters:\n- return self._priors.get(parameter_name)\n- else:\n- raise AttributeError(\n- \"Module {module} has no parameter {name}\".format(module=type(self).__name__, name=parameter_name)\n- )\n-\n- def _get_derived_prior(self, prior_name):\n- \"\"\"\n- Get derived prior from prior name\n-\n- prior_name (str): the name of the derived prior\n- \"\"\"\n- if \".\" in prior_name:\n- module, prior_name = self._get_module_and_name(prior_name)\n- return module._get_derived_prior(prior_name)\n- else:\n- if prior_name in self._parameters:\n- return self._derived_priors.get(prior_name)\n- else:\n- raise AttributeError(\n- \"Module {module} has no derived prior {name}\".format(module=type(self).__name__, name=prior_name)\n- )\n-\n def forward(self, *inputs, **kwargs):\n raise NotImplementedError\n \n@@ -92,16 +58,24 @@\n )\n return self\n \n- def named_parameter_priors(self):\n+ def named_parameter_priors(self, memo=None, prefix=\"\"):\n \"\"\"\n Returns an iterator over module parameter priors, yielding the name of\n the parameter, the parameter itself, as well as the associated prior\n (excludes parameters for which no prior has been registered)\n \"\"\"\n- for name, param in self.named_parameters():\n- prior = self._get_prior_for(name)\n- if prior is not None:\n- yield name, param, prior\n+ if memo is None:\n+ memo = set()\n+ for name, parameter in self._parameters.items():\n+ if name in self._priors and self._priors[name] not in memo:\n+ prior = self._priors[name]\n+ memo.add(prior)\n+ yield prefix + (\".\" if prefix else \"\") + name, parameter, prior\n+ for mname, module in self.named_children():\n+ submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n+ if hasattr(module, \"named_parameter_priors\"):\n+ for name, parameter, prior in module.named_parameter_priors(memo, submodule_prefix):\n+ yield name, parameter, prior\n \n def named_derived_priors(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over module derived priors, yielding both the\n", "issue": "Fully connected layer before mean and covariance modules\nI am trying to do a non-linear transformation of input before forwarding it through mean and covariance modules. Before master branch was merged to priors (commit 1f5491e3edcac6497d3370c8aaef9a9362048a3e), I can add a fully connected layer before passing the input through mean and covariance modules to learn a non-linear representation. For example, the following script worked fine. \r\n\r\n```\r\nimport math\r\nimport torch\r\nimport gpytorch\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom torch import optim\r\nfrom gpytorch.kernels import RBFKernel\r\nfrom gpytorch.means import ConstantMean\r\nfrom gpytorch.likelihoods import GaussianLikelihood\r\nfrom gpytorch.random_variables import GaussianRandomVariable\r\n\r\ntrain_x = torch.linspace(0, 1, 11)\r\ntrain_y = torch.sin(train_x.data * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2\r\n\r\nclass ExactGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood):\r\n super(ExactGPModel, self).__init__(train_x, train_y, likelihood)\r\n self.mean_module = ConstantMean(constant_bounds=(-10, 10))\r\n self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 5))\r\n self.fc = torch.nn.Linear(1, 2)\r\n\r\n def forward(self, x):\r\n x_ = self.fc(x)\r\n mean_x = self.mean_module(x_)\r\n covar_x = self.covar_module(x_)\r\n return GaussianRandomVariable(mean_x, covar_x)\r\n\r\nlikelihood = GaussianLikelihood(log_noise_bounds=(-5, 5))\r\nmodel = ExactGPModel(train_x.data, train_y.data, likelihood)\r\n\r\nmodel.train()\r\nlikelihood.train()\r\n\r\noptimizer = torch.optim.Adam([\r\n {'params': model.parameters()}, \r\n], lr=0.1)\r\n\r\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\r\n\r\ntraining_iter = 1000\r\nfor i in range(training_iter):\r\n optimizer.zero_grad()\r\n output = model(train_x)\r\n loss = -mll(output, train_y)\r\n loss.backward()\r\n print('Iter %d/%d - Loss: %.3f log_lengthscale: %.3f log_noise: %.3f' % (\r\n i + 1, training_iter, loss.data[0],\r\n model.covar_module.log_lengthscale.data[0, 0],\r\n model.likelihood.log_noise.data[0]\r\n ))\r\n optimizer.step()\r\n```\r\n\r\nHowever, now I get this runtime error because of the linear fc layer:\r\n`AttributeError: 'Linear' object has no attribute '_get_prior_for'`\r\n\r\nHow can I get this to work with the latest version of gpytorch? DKL does something similar to what I am trying to do but implements AdditiveGridInducingVariationalGP and softmax likelihood, however, in my application I'd like to use ExactGP and gaussian likelihood. Is it possible to do so? \r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nfrom collections import OrderedDict\nfrom torch import nn\nfrom .random_variables import RandomVariable\nfrom .lazy import LazyVariable\nfrom .variational import VariationalStrategy\n\n\nclass Module(nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self._priors = OrderedDict()\n self._derived_priors = OrderedDict()\n self._variational_strategies = OrderedDict()\n\n def _get_module_and_name(self, parameter_name):\n \"\"\"Get module and name from full parameter name.\"\"\"\n module, name = parameter_name.split(\".\", 1)\n if module in self._modules:\n return self.__getattr__(module), name\n else:\n raise AttributeError(\n \"Invalid parameter name {}. {} has no module {}\".format(parameter_name, type(self).__name__, module)\n )\n\n def _get_prior_for(self, parameter_name):\n \"\"\"\n Get prior for parameter\n\n parameter_name (str): parameter name\n \"\"\"\n if \".\" in parameter_name:\n module, parameter_name = self._get_module_and_name(parameter_name)\n return module._get_prior_for(parameter_name)\n else:\n if parameter_name in self._parameters:\n return self._priors.get(parameter_name)\n else:\n raise AttributeError(\n \"Module {module} has no parameter {name}\".format(module=type(self).__name__, name=parameter_name)\n )\n\n def _get_derived_prior(self, prior_name):\n \"\"\"\n Get derived prior from prior name\n\n prior_name (str): the name of the derived prior\n \"\"\"\n if \".\" in prior_name:\n module, prior_name = self._get_module_and_name(prior_name)\n return module._get_derived_prior(prior_name)\n else:\n if prior_name in self._parameters:\n return self._derived_priors.get(prior_name)\n else:\n raise AttributeError(\n \"Module {module} has no derived prior {name}\".format(module=type(self).__name__, name=prior_name)\n )\n\n def forward(self, *inputs, **kwargs):\n raise NotImplementedError\n\n def initialize(self, **kwargs):\n \"\"\"\n Set a value for a parameter\n\n kwargs: (param_name, value) - parameter to initialize\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n for name, val in kwargs.items():\n if name not in self._parameters:\n raise AttributeError(\"Unknown parameter {p} for {c}\".format(p=name, c=self.__class__.__name__))\n if torch.is_tensor(val):\n self.__getattr__(name).data.copy_(val)\n elif isinstance(val, float) or isinstance(val, int):\n self.__getattr__(name).data.fill_(val)\n else:\n raise AttributeError(\"Type {t} not valid to initialize parameter {p}\".format(t=type(val), p=name))\n\n # Ensure value is contained in support of prior (if present)\n prior = self._priors.get(name)\n if prior is not None:\n param = self._parameters[name]\n if not prior.is_in_support(param):\n raise ValueError(\n \"Value of parameter {param} not contained in support of specified prior\".format(param=param)\n )\n return self\n\n def named_parameter_priors(self):\n \"\"\"\n Returns an iterator over module parameter priors, yielding the name of\n the parameter, the parameter itself, as well as the associated prior\n (excludes parameters for which no prior has been registered)\n \"\"\"\n for name, param in self.named_parameters():\n prior = self._get_prior_for(name)\n if prior is not None:\n yield name, param, prior\n\n def named_derived_priors(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over module derived priors, yielding both the\n name of the prior as well as the prior, the associated parameters, and\n the transformation callable.\n\n Yields:\n (string, Prior, tuple(string), callable): Tuple containing the name\n of the prior, the prior itself, its parameters, and the transform\n to be called on the parameters.\n\n \"\"\"\n if memo is None:\n memo = set()\n for name, (prior, pnames, tf) in self._derived_priors.items():\n if prior is not None and prior not in memo:\n memo.add(prior)\n parameters = tuple(getattr(self, pname) for pname in pnames)\n yield prefix + (\".\" if prefix else \"\") + name, prior, parameters, tf\n for mname, module in self.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n if hasattr(module, \"_derived_priors\"):\n for name, prior, parameters, tf in module.named_derived_priors(memo, submodule_prefix):\n yield name, prior, parameters, tf\n\n def named_variational_strategies(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over module variational strategies, yielding both\n the name of the variational strategy as well as the strategy itself.\n\n Yields:\n (string, VariationalStrategy): Tuple containing the name of the\n strategy and the strategy\n\n \"\"\"\n if memo is None:\n memo = set()\n for name, strategy in self._variational_strategies.items():\n if strategy is not None and strategy not in memo:\n memo.add(strategy)\n yield prefix + (\".\" if prefix else \"\") + name, strategy\n for mname, module in self.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n if hasattr(module, \"named_variational_strategies\"):\n for name, strategy in module.named_variational_strategies(memo, submodule_prefix):\n yield name, strategy\n\n def register_parameter(self, name, parameter, prior=None):\n \"\"\"\n Adds a parameter to the module.\n The parameter can be accessed as an attribute using given name.\n\n name (str): name of parameter\n param (torch.nn.Parameter): parameter\n prior (Prior): prior for parameter (default: None)\n \"\"\"\n if \"_parameters\" not in self.__dict__:\n raise AttributeError(\"Cannot assign parameter before Module.__init__() call\")\n super(Module, self).register_parameter(name, parameter)\n if prior is not None:\n self.set_parameter_priors(**{name: prior})\n\n def register_derived_prior(self, name, prior, parameter_names, transform):\n \"\"\"\n Adds a derived prior to the module.\n The prior can be accessed as an attribute using the given name.\n\n name (str): name of the derived prior\n prior (Prior): the prior object\n parameter_names (tuple(str)): The parameters the transform operaters on,\n in the same order as expected by the transform callable.\n transform (Callable): The function called on the specified parameters. The\n log-pdf of the prior will be evaluating on the output of this transform.\n\n A derived prior operates on a transform of one or multiple parameters.\n This can be used, for instance, to put a prior over the ICM Kernel\n covariance matrix generated from covar_factor and log_var parameters.\n\n \"\"\"\n self.add_module(name, prior)\n self._derived_priors[name] = (prior, tuple(parameter_names), transform)\n\n def register_variational_strategy(self, name):\n self._variational_strategies[name] = None\n\n def set_parameter_priors(self, **kwargs):\n \"\"\"\n Set prior for a parameter.\n The prior can be accessed as an attribute using <PARAMETER_NAME>_prior.\n\n kwargs: (param_name, prior) - parameter to initialize\n prior must be a gpytorch Prior\n \"\"\"\n for name, prior in kwargs.items():\n if name not in self._parameters:\n raise AttributeError(\n \"Unknown parameter {name} for {module}\".format(name=name, module=self.__class__.__name__)\n )\n self.add_module(\"_\".join([name, \"prior\"]), prior)\n self._priors[name] = prior\n return self\n\n def variational_strategies(self):\n for _, strategy in self.named_variational_strategies():\n yield strategy\n\n def update_variational_strategy(self, name, variational_strategy):\n if not isinstance(variational_strategy, VariationalStrategy):\n raise RuntimeError(\"variational_strategy must be a VariationalStrategy\")\n if name not in self._variational_strategies.keys():\n raise RuntimeError(\"variational strategy {} not registered\".format(name))\n self._variational_strategies[name] = variational_strategy\n\n def __call__(self, *inputs, **kwargs):\n outputs = self.forward(*inputs, **kwargs)\n if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):\n return outputs\n for output in outputs:\n if not (isinstance(output, RandomVariable) or torch.is_tensor(output) or isinstance(output, LazyVariable)):\n raise RuntimeError(\n \"Output must be a RandomVariable, torch.Tensor, or LazyVariable. \"\n \"Was a {}\".format(input.__class__.__name__)\n )\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n", "path": "gpytorch/module.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nfrom collections import OrderedDict\nfrom torch import nn\nfrom .random_variables import RandomVariable\nfrom .lazy import LazyVariable\nfrom .variational import VariationalStrategy\n\n\nclass Module(nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self._priors = OrderedDict()\n self._derived_priors = OrderedDict()\n self._variational_strategies = OrderedDict()\n\n def _get_module_and_name(self, parameter_name):\n \"\"\"Get module and name from full parameter name.\"\"\"\n module, name = parameter_name.split(\".\", 1)\n if module in self._modules:\n return self.__getattr__(module), name\n else:\n raise AttributeError(\n \"Invalid parameter name {}. {} has no module {}\".format(parameter_name, type(self).__name__, module)\n )\n\n def forward(self, *inputs, **kwargs):\n raise NotImplementedError\n\n def initialize(self, **kwargs):\n \"\"\"\n Set a value for a parameter\n\n kwargs: (param_name, value) - parameter to initialize\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n for name, val in kwargs.items():\n if name not in self._parameters:\n raise AttributeError(\"Unknown parameter {p} for {c}\".format(p=name, c=self.__class__.__name__))\n if torch.is_tensor(val):\n self.__getattr__(name).data.copy_(val)\n elif isinstance(val, float) or isinstance(val, int):\n self.__getattr__(name).data.fill_(val)\n else:\n raise AttributeError(\"Type {t} not valid to initialize parameter {p}\".format(t=type(val), p=name))\n\n # Ensure value is contained in support of prior (if present)\n prior = self._priors.get(name)\n if prior is not None:\n param = self._parameters[name]\n if not prior.is_in_support(param):\n raise ValueError(\n \"Value of parameter {param} not contained in support of specified prior\".format(param=param)\n )\n return self\n\n def named_parameter_priors(self, memo=None, prefix=\"\"):\n \"\"\"\n Returns an iterator over module parameter priors, yielding the name of\n the parameter, the parameter itself, as well as the associated prior\n (excludes parameters for which no prior has been registered)\n \"\"\"\n if memo is None:\n memo = set()\n for name, parameter in self._parameters.items():\n if name in self._priors and self._priors[name] not in memo:\n prior = self._priors[name]\n memo.add(prior)\n yield prefix + (\".\" if prefix else \"\") + name, parameter, prior\n for mname, module in self.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n if hasattr(module, \"named_parameter_priors\"):\n for name, parameter, prior in module.named_parameter_priors(memo, submodule_prefix):\n yield name, parameter, prior\n\n def named_derived_priors(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over module derived priors, yielding both the\n name of the prior as well as the prior, the associated parameters, and\n the transformation callable.\n\n Yields:\n (string, Prior, tuple(string), callable): Tuple containing the name\n of the prior, the prior itself, its parameters, and the transform\n to be called on the parameters.\n\n \"\"\"\n if memo is None:\n memo = set()\n for name, (prior, pnames, tf) in self._derived_priors.items():\n if prior is not None and prior not in memo:\n memo.add(prior)\n parameters = tuple(getattr(self, pname) for pname in pnames)\n yield prefix + (\".\" if prefix else \"\") + name, prior, parameters, tf\n for mname, module in self.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n if hasattr(module, \"_derived_priors\"):\n for name, prior, parameters, tf in module.named_derived_priors(memo, submodule_prefix):\n yield name, prior, parameters, tf\n\n def named_variational_strategies(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over module variational strategies, yielding both\n the name of the variational strategy as well as the strategy itself.\n\n Yields:\n (string, VariationalStrategy): Tuple containing the name of the\n strategy and the strategy\n\n \"\"\"\n if memo is None:\n memo = set()\n for name, strategy in self._variational_strategies.items():\n if strategy is not None and strategy not in memo:\n memo.add(strategy)\n yield prefix + (\".\" if prefix else \"\") + name, strategy\n for mname, module in self.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n if hasattr(module, \"named_variational_strategies\"):\n for name, strategy in module.named_variational_strategies(memo, submodule_prefix):\n yield name, strategy\n\n def register_parameter(self, name, parameter, prior=None):\n \"\"\"\n Adds a parameter to the module.\n The parameter can be accessed as an attribute using given name.\n\n name (str): name of parameter\n param (torch.nn.Parameter): parameter\n prior (Prior): prior for parameter (default: None)\n \"\"\"\n if \"_parameters\" not in self.__dict__:\n raise AttributeError(\"Cannot assign parameter before Module.__init__() call\")\n super(Module, self).register_parameter(name, parameter)\n if prior is not None:\n self.set_parameter_priors(**{name: prior})\n\n def register_derived_prior(self, name, prior, parameter_names, transform):\n \"\"\"\n Adds a derived prior to the module.\n The prior can be accessed as an attribute using the given name.\n\n name (str): name of the derived prior\n prior (Prior): the prior object\n parameter_names (tuple(str)): The parameters the transform operaters on,\n in the same order as expected by the transform callable.\n transform (Callable): The function called on the specified parameters. The\n log-pdf of the prior will be evaluating on the output of this transform.\n\n A derived prior operates on a transform of one or multiple parameters.\n This can be used, for instance, to put a prior over the ICM Kernel\n covariance matrix generated from covar_factor and log_var parameters.\n\n \"\"\"\n self.add_module(name, prior)\n self._derived_priors[name] = (prior, tuple(parameter_names), transform)\n\n def register_variational_strategy(self, name):\n self._variational_strategies[name] = None\n\n def set_parameter_priors(self, **kwargs):\n \"\"\"\n Set prior for a parameter.\n The prior can be accessed as an attribute using <PARAMETER_NAME>_prior.\n\n kwargs: (param_name, prior) - parameter to initialize\n prior must be a gpytorch Prior\n \"\"\"\n for name, prior in kwargs.items():\n if name not in self._parameters:\n raise AttributeError(\n \"Unknown parameter {name} for {module}\".format(name=name, module=self.__class__.__name__)\n )\n self.add_module(\"_\".join([name, \"prior\"]), prior)\n self._priors[name] = prior\n return self\n\n def variational_strategies(self):\n for _, strategy in self.named_variational_strategies():\n yield strategy\n\n def update_variational_strategy(self, name, variational_strategy):\n if not isinstance(variational_strategy, VariationalStrategy):\n raise RuntimeError(\"variational_strategy must be a VariationalStrategy\")\n if name not in self._variational_strategies.keys():\n raise RuntimeError(\"variational strategy {} not registered\".format(name))\n self._variational_strategies[name] = variational_strategy\n\n def __call__(self, *inputs, **kwargs):\n outputs = self.forward(*inputs, **kwargs)\n if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):\n return outputs\n for output in outputs:\n if not (isinstance(output, RandomVariable) or torch.is_tensor(output) or isinstance(output, LazyVariable)):\n raise RuntimeError(\n \"Output must be a RandomVariable, torch.Tensor, or LazyVariable. \"\n \"Was a {}\".format(input.__class__.__name__)\n )\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n", "path": "gpytorch/module.py"}]}
3,505
678
gh_patches_debug_795
rasdani/github-patches
git_diff
Parsl__parsl-140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Do not import `parsl` before requirements are setup ``` [annawoodard@midway001 parsl]$ python setup.py install Traceback (most recent call last): File "setup.py", line 2, in <module> from parsl.version import VERSION File "/home/annawoodard/parsl/parsl/__init__.py", line 35, in <module> from parsl.executors.ipp import IPyParallelExecutor File "/home/annawoodard/parsl/parsl/executors/ipp.py", line 4, in <module> from ipyparallel import Client ModuleNotFoundError: No module named 'ipyparallel' ``` Setuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from parsl.version import VERSION 3 4 with open('requirements.txt') as f: 5 install_requires = f.readlines() 6 7 # tests_require = parse_requirements('test-requirements.txt') 8 9 setup( 10 name='parsl', 11 version=VERSION, 12 description='Simple data dependent workflows in Python', 13 long_description='Simple and easy parallel workflows system for Python', 14 url='https://github.com/Parsl/parsl', 15 author='Yadu Nand Babuji', 16 author_email='[email protected]', 17 license='Apache 2.0', 18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION), 19 package_data={'': ['LICENSE']}, 20 packages=find_packages(), 21 install_requires=install_requires, 22 classifiers=[ 23 # Maturity 24 'Development Status :: 3 - Alpha', 25 # Intended audience 26 'Intended Audience :: Developers', 27 # Licence, must match with licence above 28 'License :: OSI Approved :: Apache Software License', 29 # Python versions supported 30 'Programming Language :: Python :: 3.5', 31 'Programming Language :: Python :: 3.6', 32 ], 33 keywords=['Workflows', 'Scientific computing'], 34 ) 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,5 +1,7 @@ from setuptools import setup, find_packages -from parsl.version import VERSION + +with open('parsl/version.py') as f: + exec(f.read()) with open('requirements.txt') as f: install_requires = f.readlines()
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,7 @@\n from setuptools import setup, find_packages\n-from parsl.version import VERSION\n+\n+with open('parsl/version.py') as f:\n+ exec(f.read())\n \n with open('requirements.txt') as f:\n install_requires = f.readlines()\n", "issue": "Do not import `parsl` before requirements are setup\n```\r\n[annawoodard@midway001 parsl]$ python setup.py install\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 2, in <module>\r\n from parsl.version import VERSION\r\n File \"/home/annawoodard/parsl/parsl/__init__.py\", line 35, in <module>\r\n from parsl.executors.ipp import IPyParallelExecutor\r\n File \"/home/annawoodard/parsl/parsl/executors/ipp.py\", line 4, in <module>\r\n from ipyparallel import Client\r\nModuleNotFoundError: No module named 'ipyparallel'\r\n```\r\n\r\nSetuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom parsl.version import VERSION\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple and easy parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='Yadu Nand Babuji',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple and easy parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='Yadu Nand Babuji',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]}
796
81
gh_patches_debug_29007
rasdani/github-patches
git_diff
vega__altair-2642
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dots aren't showing up in ranged dot plot ![Screenshot from 2022-06-30 20-02-14](https://user-images.githubusercontent.com/9993/176815294-a1801b76-ed5a-4f3c-b38f-71850c131a5a.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `altair/examples/ranged_dot_plot.py` Content: ``` 1 """ 2 Ranged Dot Plot 3 ----------------- 4 This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000). 5 """ 6 # category: other charts 7 import altair as alt 8 from vega_datasets import data 9 10 source = data.countries.url 11 12 chart = alt.layer( 13 data=source 14 ).transform_filter( 15 filter={"field": 'country', 16 "oneOf": ["China", "India", "United States", "Indonesia", "Brazil"]} 17 ).transform_filter( 18 filter={'field': 'year', 19 "oneOf": [1955, 2000]} 20 ) 21 22 chart += alt.Chart().mark_line(color='#db646f').encode( 23 x='life_expect:Q', 24 y='country:N', 25 detail='country:N' 26 ) 27 # Add points for life expectancy in 1955 & 2000 28 chart += alt.Chart().mark_point( 29 size=100, 30 opacity=1, 31 filled=True 32 ).encode( 33 x='life_expect:Q', 34 y='country:N', 35 color=alt.Color('year:O', 36 scale=alt.Scale( 37 domain=['1955', '2000'], 38 range=['#e6959c', '#911a24'] 39 ) 40 ) 41 ).interactive() 42 43 chart 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/altair/examples/ranged_dot_plot.py b/altair/examples/ranged_dot_plot.py --- a/altair/examples/ranged_dot_plot.py +++ b/altair/examples/ranged_dot_plot.py @@ -1,7 +1,7 @@ """ Ranged Dot Plot ------------------ -This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000). +--------------- +This example shows a ranged dot plot to convey changing life expectancy for the five most populous countries (between 1955 and 2000). """ # category: other charts import altair as alt @@ -9,7 +9,7 @@ source = data.countries.url -chart = alt.layer( +chart = alt.Chart( data=source ).transform_filter( filter={"field": 'country', @@ -19,13 +19,13 @@ "oneOf": [1955, 2000]} ) -chart += alt.Chart().mark_line(color='#db646f').encode( +line = chart.mark_line(color='#db646f').encode( x='life_expect:Q', y='country:N', detail='country:N' ) # Add points for life expectancy in 1955 & 2000 -chart += alt.Chart().mark_point( +points = chart.mark_point( size=100, opacity=1, filled=True @@ -34,10 +34,10 @@ y='country:N', color=alt.Color('year:O', scale=alt.Scale( - domain=['1955', '2000'], + domain=[1955, 2000], range=['#e6959c', '#911a24'] ) ) ).interactive() -chart +(line + points)
{"golden_diff": "diff --git a/altair/examples/ranged_dot_plot.py b/altair/examples/ranged_dot_plot.py\n--- a/altair/examples/ranged_dot_plot.py\n+++ b/altair/examples/ranged_dot_plot.py\n@@ -1,7 +1,7 @@\n \"\"\"\n Ranged Dot Plot\n------------------\n-This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n+---------------\n+This example shows a ranged dot plot to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n \"\"\"\n # category: other charts\n import altair as alt\n@@ -9,7 +9,7 @@\n \n source = data.countries.url\n \n-chart = alt.layer(\n+chart = alt.Chart(\n data=source\n ).transform_filter(\n filter={\"field\": 'country',\n@@ -19,13 +19,13 @@\n \"oneOf\": [1955, 2000]}\n )\n \n-chart += alt.Chart().mark_line(color='#db646f').encode(\n+line = chart.mark_line(color='#db646f').encode(\n x='life_expect:Q',\n y='country:N',\n detail='country:N'\n )\n # Add points for life expectancy in 1955 & 2000\n-chart += alt.Chart().mark_point(\n+points = chart.mark_point(\n size=100,\n opacity=1,\n filled=True\n@@ -34,10 +34,10 @@\n y='country:N',\n color=alt.Color('year:O',\n scale=alt.Scale(\n- domain=['1955', '2000'],\n+ domain=[1955, 2000],\n range=['#e6959c', '#911a24']\n )\n )\n ).interactive()\n \n-chart\n+(line + points)\n", "issue": "Dots aren't showing up in ranged dot plot\n![Screenshot from 2022-06-30 20-02-14](https://user-images.githubusercontent.com/9993/176815294-a1801b76-ed5a-4f3c-b38f-71850c131a5a.png)\r\n\n", "before_files": [{"content": "\"\"\"\nRanged Dot Plot\n-----------------\nThis example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n\"\"\"\n# category: other charts\nimport altair as alt\nfrom vega_datasets import data\n\nsource = data.countries.url\n\nchart = alt.layer(\n data=source\n).transform_filter(\n filter={\"field\": 'country',\n \"oneOf\": [\"China\", \"India\", \"United States\", \"Indonesia\", \"Brazil\"]}\n).transform_filter(\n filter={'field': 'year',\n \"oneOf\": [1955, 2000]}\n)\n\nchart += alt.Chart().mark_line(color='#db646f').encode(\n x='life_expect:Q',\n y='country:N',\n detail='country:N'\n)\n# Add points for life expectancy in 1955 & 2000\nchart += alt.Chart().mark_point(\n size=100,\n opacity=1,\n filled=True\n).encode(\n x='life_expect:Q',\n y='country:N',\n color=alt.Color('year:O',\n scale=alt.Scale(\n domain=['1955', '2000'],\n range=['#e6959c', '#911a24']\n )\n )\n).interactive()\n\nchart\n", "path": "altair/examples/ranged_dot_plot.py"}], "after_files": [{"content": "\"\"\"\nRanged Dot Plot\n---------------\nThis example shows a ranged dot plot to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n\"\"\"\n# category: other charts\nimport altair as alt\nfrom vega_datasets import data\n\nsource = data.countries.url\n\nchart = alt.Chart(\n data=source\n).transform_filter(\n filter={\"field\": 'country',\n \"oneOf\": [\"China\", \"India\", \"United States\", \"Indonesia\", \"Brazil\"]}\n).transform_filter(\n filter={'field': 'year',\n \"oneOf\": [1955, 2000]}\n)\n\nline = chart.mark_line(color='#db646f').encode(\n x='life_expect:Q',\n y='country:N',\n detail='country:N'\n)\n# Add points for life expectancy in 1955 & 2000\npoints = chart.mark_point(\n size=100,\n opacity=1,\n filled=True\n).encode(\n x='life_expect:Q',\n y='country:N',\n color=alt.Color('year:O',\n scale=alt.Scale(\n domain=[1955, 2000],\n range=['#e6959c', '#911a24']\n )\n )\n).interactive()\n\n(line + points)\n", "path": "altair/examples/ranged_dot_plot.py"}]}
740
435
gh_patches_debug_64587
rasdani/github-patches
git_diff
kubeflow__pipelines-4118
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- allow output artifact store configuration (vs hard coded) it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148 it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names. i suggest making it configurable, i can do such PR if we agree its needed. flexible pipeline service (host) path in client SDK when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from: `config.host = host if host else Client.IN_CLUSTER_DNS_NAME` to: `config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)` also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug if its acceptable i can submit a PR for the line change above --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `samples/core/iris/iris.py` Content: ``` 1 #!/usr/bin/env python3 2 # Copyright 2020 Google LLC 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py""" 16 17 from __future__ import absolute_import 18 from __future__ import division 19 from __future__ import print_function 20 21 import os 22 import kfp 23 from typing import Text 24 25 import absl 26 import tensorflow_model_analysis as tfma 27 28 from tfx.components import CsvExampleGen 29 from tfx.components import Evaluator 30 from tfx.components import ExampleValidator 31 from tfx.components import Pusher 32 from tfx.components import ResolverNode 33 from tfx.components import SchemaGen 34 from tfx.components import StatisticsGen 35 from tfx.components import Trainer 36 from tfx.components import Transform 37 from tfx.components.base import executor_spec 38 from tfx.components.trainer.executor import GenericExecutor 39 from tfx.dsl.experimental import latest_blessed_model_resolver 40 from tfx.orchestration import data_types 41 from tfx.orchestration import pipeline 42 from tfx.orchestration.kubeflow import kubeflow_dag_runner 43 from tfx.proto import trainer_pb2 44 from tfx.proto import pusher_pb2 45 from tfx.types import Channel 46 from tfx.types.standard_artifacts import Model 47 from tfx.types.standard_artifacts import ModelBlessing 48 from tfx.utils.dsl_utils import external_input 49 50 _pipeline_name = 'iris_native_keras' 51 52 # This example assumes that Iris flowers data is stored in GCS and the 53 # utility function is in iris_utils.py. Feel free to customize as needed. 54 _data_root_param = data_types.RuntimeParameter( 55 name='data-root', 56 default='gs://ml-pipeline/sample-data/iris/data', 57 ptype=Text, 58 ) 59 60 # Python module file to inject customized logic into the TFX components. The 61 # Transform and Trainer both require user-defined functions to run successfully. 62 # This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py 63 # and baked into the TFX image used in the pipeline. 64 _module_file_param = data_types.RuntimeParameter( 65 name='module-file', 66 default= 67 '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py', 68 ptype=Text, 69 ) 70 71 # Directory and data locations. This example assumes all of the flowers 72 # example code and metadata library is relative to a GCS path. 73 # Note: if one deployed KFP from GKE marketplace, it's possible to leverage 74 # the following magic placeholder to auto-populate the default GCS bucket 75 # associated with KFP deployment. Otherwise you'll need to replace it with your 76 # actual bucket name here or when creating a run. 77 _pipeline_root = os.path.join( 78 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER 79 ) 80 81 82 def _create_pipeline( 83 pipeline_name: Text, pipeline_root: Text 84 ) -> pipeline.Pipeline: 85 """Implements the Iris flowers pipeline with TFX.""" 86 examples = external_input(_data_root_param) 87 88 # Brings data into the pipeline or otherwise joins/converts training data. 89 example_gen = CsvExampleGen(input=examples) 90 91 # Computes statistics over data for visualization and example validation. 92 statistics_gen = StatisticsGen(examples=example_gen.outputs['examples']) 93 94 # Generates schema based on statistics files. 95 infer_schema = SchemaGen( 96 statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True 97 ) 98 99 # Performs anomaly detection based on statistics and data schema. 100 validate_stats = ExampleValidator( 101 statistics=statistics_gen.outputs['statistics'], 102 schema=infer_schema.outputs['schema'] 103 ) 104 105 # Performs transformations and feature engineering in training and serving. 106 transform = Transform( 107 examples=example_gen.outputs['examples'], 108 schema=infer_schema.outputs['schema'], 109 module_file=_module_file_param 110 ) 111 112 # Uses user-provided Python function that implements a model using Keras. 113 trainer = Trainer( 114 module_file=_module_file_param, 115 custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), 116 examples=transform.outputs['transformed_examples'], 117 transform_graph=transform.outputs['transform_graph'], 118 schema=infer_schema.outputs['schema'], 119 train_args=trainer_pb2.TrainArgs(num_steps=100), 120 eval_args=trainer_pb2.EvalArgs(num_steps=50) 121 ) 122 123 # Get the latest blessed model for model validation. 124 model_resolver = ResolverNode( 125 instance_name='latest_blessed_model_resolver', 126 resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, 127 model=Channel(type=Model), 128 model_blessing=Channel(type=ModelBlessing) 129 ) 130 131 # Uses TFMA to compute an evaluation statistics over features of a model and 132 # perform quality validation of a candidate model (compared to a baseline). 133 # Note: to compile this successfully you'll need TFMA at >= 0.21.5 134 eval_config = tfma.EvalConfig( 135 model_specs=[ 136 tfma.ModelSpec(name='candidate', label_key='variety'), 137 tfma.ModelSpec( 138 name='baseline', label_key='variety', is_baseline=True 139 ) 140 ], 141 slicing_specs=[ 142 tfma.SlicingSpec(), 143 # Data can be sliced along a feature column. Required by TFMA visualization. 144 tfma.SlicingSpec(feature_keys=['sepal_length'])], 145 metrics_specs=[ 146 tfma.MetricsSpec( 147 metrics=[ 148 tfma.MetricConfig( 149 class_name='SparseCategoricalAccuracy', 150 threshold=tfma.config.MetricThreshold( 151 value_threshold=tfma.GenericValueThreshold( 152 lower_bound={'value': 0.9} 153 ), 154 change_threshold=tfma.GenericChangeThreshold( 155 direction=tfma.MetricDirection.HIGHER_IS_BETTER, 156 absolute={'value': -1e-10} 157 ) 158 ) 159 ) 160 ] 161 ) 162 ] 163 ) 164 165 # Uses TFMA to compute a evaluation statistics over features of a model. 166 model_analyzer = Evaluator( 167 examples=example_gen.outputs['examples'], 168 model=trainer.outputs['model'], 169 baseline_model=model_resolver.outputs['model'], 170 # Change threshold will be ignored if there is no baseline (first run). 171 eval_config=eval_config 172 ) 173 174 # Checks whether the model passed the validation steps and pushes the model 175 # to a file destination if check passed. 176 pusher = Pusher( 177 model=trainer.outputs['model'], 178 model_blessing=model_analyzer.outputs['blessing'], 179 push_destination=pusher_pb2.PushDestination( 180 filesystem=pusher_pb2.PushDestination.Filesystem( 181 base_directory=os.path. 182 join(str(pipeline.ROOT_PARAMETER), 'model_serving') 183 ) 184 ) 185 ) 186 187 return pipeline.Pipeline( 188 pipeline_name=pipeline_name, 189 pipeline_root=pipeline_root, 190 components=[ 191 example_gen, statistics_gen, infer_schema, validate_stats, transform, 192 trainer, model_resolver, model_analyzer, pusher 193 ], 194 enable_cache=True, 195 ) 196 197 198 if __name__ == '__main__': 199 absl.logging.set_verbosity(absl.logging.INFO) 200 # Make sure the version of TFX image used is consistent with the version of 201 # TFX SDK. Here we use tfx:0.22.0 image. 202 config = kubeflow_dag_runner.KubeflowDagRunnerConfig( 203 kubeflow_metadata_config=kubeflow_dag_runner. 204 get_default_kubeflow_metadata_config(), 205 tfx_image='gcr.io/tfx-oss-public/tfx:0.22.0', 206 ) 207 kfp_runner = kubeflow_dag_runner.KubeflowDagRunner( 208 output_filename=__file__ + '.yaml', config=config 209 ) 210 kfp_runner.run( 211 _create_pipeline( 212 pipeline_name=_pipeline_name, pipeline_root=_pipeline_root 213 ) 214 ) 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/samples/core/iris/iris.py b/samples/core/iris/iris.py --- a/samples/core/iris/iris.py +++ b/samples/core/iris/iris.py @@ -14,10 +14,6 @@ # limitations under the License. """Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import os import kfp from typing import Text
{"golden_diff": "diff --git a/samples/core/iris/iris.py b/samples/core/iris/iris.py\n--- a/samples/core/iris/iris.py\n+++ b/samples/core/iris/iris.py\n@@ -14,10 +14,6 @@\n # limitations under the License.\n \"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n \n-from __future__ import absolute_import\n-from __future__ import division\n-from __future__ import print_function\n-\n import os\n import kfp\n from typing import Text\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.22.0 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.22.0',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n", "path": "samples/core/iris/iris.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.21.2 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.21.2',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n", "path": "samples/core/iris/iris.py"}]}
3,000
135
gh_patches_debug_7421
rasdani/github-patches
git_diff
safe-global__safe-config-service-65
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Format JSON keys in camel case Endpoints consumed by the clients should have the JSON keys in camel case. By having them camel case, it follows the formatting that we have in other services. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/config/settings.py` Content: ``` 1 """ 2 Django settings for safe_client_config_service project. 3 4 Generated by 'django-admin startproject' using Django 3.2. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/3.2/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/3.2/ref/settings/ 11 """ 12 import os 13 from distutils.util import strtobool 14 from pathlib import Path 15 16 # Build paths inside the project like this: BASE_DIR / 'subdir'. 17 BASE_DIR = Path(__file__).resolve().parent.parent 18 19 # Quick-start development settings - unsuitable for production 20 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ 21 22 # SECURITY WARNING: keep the secret key used in production secret! 23 SECRET_KEY = os.getenv("SECRET_KEY", None) 24 25 # SECURITY WARNING: don't run with debug turned on in production! 26 DEBUG = bool(strtobool(os.getenv("DEBUG", "false"))) 27 28 # https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS 29 allowed_hosts = os.getenv("DJANGO_ALLOWED_HOSTS", ".localhost,127.0.0.1,[::1]") 30 ALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(",")] 31 32 # Application definition 33 34 default_renderer_classes = os.getenv( 35 "REST_DEFAULT_RENDERER_CLASSES", "rest_framework.renderers.JSONRenderer" 36 ) 37 REST_FRAMEWORK = { 38 # https://www.django-rest-framework.org/api-guide/renderers/ 39 "DEFAULT_RENDERER_CLASSES": [ 40 default_renderer_class.strip() 41 for default_renderer_class in default_renderer_classes.split(",") 42 ] 43 } 44 45 INSTALLED_APPS = [ 46 "safe_apps.apps.AppsConfig", 47 "django.contrib.admin", 48 "django.contrib.auth", 49 "django.contrib.contenttypes", 50 "django.contrib.sessions", 51 "django.contrib.messages", 52 "django.contrib.staticfiles", 53 "rest_framework", 54 ] 55 56 MIDDLEWARE = [ 57 "config.middleware.LoggingMiddleware", 58 "django.middleware.security.SecurityMiddleware", 59 "django.contrib.sessions.middleware.SessionMiddleware", 60 "django.middleware.common.CommonMiddleware", 61 "django.middleware.csrf.CsrfViewMiddleware", 62 "django.contrib.auth.middleware.AuthenticationMiddleware", 63 "django.contrib.messages.middleware.MessageMiddleware", 64 "django.middleware.clickjacking.XFrameOptionsMiddleware", 65 ] 66 67 CACHES = { 68 "default": { 69 "BACKEND": "django.core.cache.backends.locmem.LocMemCache", 70 }, 71 "safe-apps": { 72 "BACKEND": "django.core.cache.backends.locmem.LocMemCache", 73 }, 74 } 75 76 LOGGING = { 77 "version": 1, 78 "disable_existing_loggers": False, 79 "formatters": { 80 "short": {"format": "%(asctime)s %(message)s"}, 81 "verbose": { 82 "format": "%(asctime)s [%(levelname)s] [%(processName)s] %(message)s" 83 }, 84 }, 85 "handlers": { 86 "console": { 87 "class": "logging.StreamHandler", 88 "formatter": "verbose", 89 }, 90 "console_short": { 91 "class": "logging.StreamHandler", 92 "formatter": "short", 93 }, 94 }, 95 "root": { 96 "handlers": ["console"], 97 "level": os.getenv("ROOT_LOG_LEVEL", "INFO"), 98 }, 99 "loggers": { 100 "LoggingMiddleware": { 101 "handlers": ["console_short"], 102 "level": "INFO", 103 "propagate": False, 104 }, 105 }, 106 } 107 108 ROOT_URLCONF = "config.urls" 109 110 TEMPLATES = [ 111 { 112 "BACKEND": "django.template.backends.django.DjangoTemplates", 113 "DIRS": [], 114 "APP_DIRS": True, 115 "OPTIONS": { 116 "context_processors": [ 117 "django.template.context_processors.debug", 118 "django.template.context_processors.request", 119 "django.contrib.auth.context_processors.auth", 120 "django.contrib.messages.context_processors.messages", 121 ], 122 }, 123 }, 124 ] 125 126 WSGI_APPLICATION = "config.wsgi.application" 127 128 # Database 129 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases 130 131 DATABASES = { 132 "default": { 133 "ENGINE": "django.db.backends.postgresql", 134 "NAME": os.getenv("POSTGRES_NAME", "postgres"), 135 "USER": os.getenv("POSTGRES_USER", "postgres"), 136 "PASSWORD": os.getenv("POSTGRES_PASSWORD", "postgres"), 137 "HOST": os.getenv("POSTGRES_HOST", "db"), 138 "PORT": os.getenv("POSTGRES_PORT", "5432"), 139 } 140 } 141 142 # Password validation 143 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators 144 145 AUTH_PASSWORD_VALIDATORS = [ 146 { 147 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", 148 }, 149 { 150 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", 151 }, 152 { 153 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 154 }, 155 { 156 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 157 }, 158 ] 159 160 # Internationalization 161 # https://docs.djangoproject.com/en/3.2/topics/i18n/ 162 163 LANGUAGE_CODE = "en-us" 164 165 TIME_ZONE = "UTC" 166 167 USE_I18N = True 168 169 USE_L10N = True 170 171 USE_TZ = True 172 173 # Static files (CSS, JavaScript, Images) 174 # https://docs.djangoproject.com/en/3.2/howto/static-files/ 175 176 STATIC_URL = "/static/" 177 178 # Default primary key field type 179 # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field 180 181 DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/config/settings.py b/src/config/settings.py --- a/src/config/settings.py +++ b/src/config/settings.py @@ -31,14 +31,10 @@ # Application definition -default_renderer_classes = os.getenv( - "REST_DEFAULT_RENDERER_CLASSES", "rest_framework.renderers.JSONRenderer" -) REST_FRAMEWORK = { # https://www.django-rest-framework.org/api-guide/renderers/ "DEFAULT_RENDERER_CLASSES": [ - default_renderer_class.strip() - for default_renderer_class in default_renderer_classes.split(",") + "djangorestframework_camel_case.render.CamelCaseJSONRenderer", ] }
{"golden_diff": "diff --git a/src/config/settings.py b/src/config/settings.py\n--- a/src/config/settings.py\n+++ b/src/config/settings.py\n@@ -31,14 +31,10 @@\n \n # Application definition\n \n-default_renderer_classes = os.getenv(\n- \"REST_DEFAULT_RENDERER_CLASSES\", \"rest_framework.renderers.JSONRenderer\"\n-)\n REST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n- default_renderer_class.strip()\n- for default_renderer_class in default_renderer_classes.split(\",\")\n+ \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n }\n", "issue": "Format JSON keys in camel case\nEndpoints consumed by the clients should have the JSON keys in camel case. By having them camel case, it follows the formatting that we have in other services. \n", "before_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\ndefault_renderer_classes = os.getenv(\n \"REST_DEFAULT_RENDERER_CLASSES\", \"rest_framework.renderers.JSONRenderer\"\n)\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n default_renderer_class.strip()\n for default_renderer_class in default_renderer_classes.split(\",\")\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py"}]}
1,950
138
gh_patches_debug_25504
rasdani/github-patches
git_diff
canonical__microk8s-4235
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- update homebrew formula to newest microk8s version (1.28) - otherwise Mac Users can't use it. Summary The latest present formula on homebrew as of October 2023 point to ubuntu version 22.04 and microk8s version 1.27. This makes it near to impossible for mac users to use it. Why is this important? Lot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :) Are you interested in contributing to this feature? yep definitely. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `installer/common/definitions.py` Content: ``` 1 MAX_CHARACTERS_WRAP: int = 120 2 command_descriptions = { 3 "add-node": "Adds a node to a cluster", 4 "ambassador": "Ambassador API Gateway and Ingress", 5 "cilium": "The cilium client", 6 "config": "Print the kubeconfig", 7 "ctr": "The containerd client", 8 "dashboard-proxy": "Enable the Kubernetes dashboard and proxy to host", 9 "dbctl": "Backup and restore the Kubernetes datastore", 10 "disable": "Disables running add-ons", 11 "enable": "Enables useful add-ons", 12 "helm": "The helm client", 13 "helm3": "The helm3 client", 14 "inspect": "Checks the cluster and gathers logs", 15 "istioctl": "The istio client", 16 "join": "Joins this instance as a node to a cluster", 17 "kubectl": "The kubernetes client", 18 "leave": "Disconnects this node from any cluster it has joined", 19 "linkerd": "The linkerd client", 20 "refresh-certs": "Refresh the CA certificates in this deployment", 21 "remove-node": "Removes a node from the cluster", 22 "reset": "Cleans the cluster from all workloads", 23 "start": "Starts the kubernetes cluster", 24 "status": "Displays the status of the cluster", 25 "stop": "Stops the kubernetes cluster", 26 } 27 DEFAULT_CORES: int = 2 28 DEFAULT_MEMORY_GB: int = 4 29 DEFAULT_DISK_GB: int = 50 30 DEFAULT_ASSUME: bool = False 31 DEFAULT_CHANNEL: str = "1.27/stable" 32 DEFAULT_IMAGE: str = "22.04" 33 34 MIN_CORES: int = 2 35 MIN_MEMORY_GB: int = 2 36 MIN_DISK_GB: int = 10 37 ``` Path: `installer/vm_providers/_multipass/_windows.py` Content: ``` 1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- 2 # 3 # Copyright (C) 2018 Canonical Ltd 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License version 3 as 7 # published by the Free Software Foundation. 8 # 9 # This program is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License 15 # along with this program. If not, see <http://www.gnu.org/licenses/>. 16 17 import logging 18 import os.path 19 import requests 20 import shutil 21 import simplejson 22 import subprocess 23 import sys 24 import tempfile 25 26 from progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength 27 28 from common.file_utils import calculate_sha3_384, is_dumb_terminal 29 from vm_providers.errors import ( 30 ProviderMultipassDownloadFailed, 31 ProviderMultipassInstallationFailed, 32 ) 33 34 if sys.platform == "win32": 35 import winreg 36 37 38 logger = logging.getLogger(__name__) 39 40 41 _MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases" 42 _MULTIPASS_DL_VERSION = "1.12.0" 43 _MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION) 44 45 # Download multipass installer and calculate hash: 46 # python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501 47 _MULTIPASS_DL_SHA3_384 = "ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee" # noqa: E501 48 49 50 def windows_reload_multipass_path_env(): 51 """Update PATH to include installed Multipass, if not already set.""" 52 53 assert sys.platform == "win32" 54 55 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Environment") 56 57 paths = os.environ["PATH"].split(";") 58 59 # Drop empty placeholder for trailing comma, if present. 60 if paths[-1] == "": 61 del paths[-1] 62 63 reg_user_path, _ = winreg.QueryValueEx(key, "Path") 64 for path in reg_user_path.split(";"): 65 if path not in paths and "Multipass" in path: 66 paths.append(path) 67 68 # Restore path with trailing comma. 69 os.environ["PATH"] = ";".join(paths) + ";" 70 71 72 def _run_installer(installer_path: str, echoer): 73 """Execute multipass installer.""" 74 75 echoer.info("Installing Multipass...") 76 77 # Multipass requires administrative privileges to install, which requires 78 # the use of `runas` functionality. Some of the options included: 79 # (1) https://stackoverflow.com/a/34216774 80 # (2) ShellExecuteW and wait on installer by attempting to delete it. 81 # Windows would prevent us from deleting installer with a PermissionError: 82 # PermissionError: [WinError 32] The process cannot access the file because 83 # it is being used by another process: <path> 84 # (3) Use PowerShell's "Start-Process" with RunAs verb as shown below. 85 # None of the options are quite ideal, but #3 will do. 86 cmd = """ 87 & {{ 88 try {{ 89 $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru 90 }} catch {{ 91 [Environment]::Exit(1) 92 }} 93 }} 94 """.format( 95 path=installer_path 96 ) 97 98 try: 99 subprocess.check_call(["powershell.exe", "-Command", cmd]) 100 except subprocess.CalledProcessError: 101 raise ProviderMultipassInstallationFailed("error launching installer") 102 103 # Reload path environment to see if we can find multipass now. 104 windows_reload_multipass_path_env() 105 106 if not shutil.which("multipass.exe"): 107 # Installation failed. 108 raise ProviderMultipassInstallationFailed("installation did not complete successfully") 109 110 echoer.info("Multipass installation completed successfully.") 111 112 113 def _requests_exception_hint(e: requests.RequestException) -> str: 114 # Use the __doc__ description to give the user a hint. It seems to be a 115 # a decent option over trying to enumerate all of possible types. 116 if e.__doc__: 117 split_lines = e.__doc__.splitlines() 118 if split_lines: 119 return e.__doc__.splitlines()[0].decode().strip() 120 121 # Should never get here. 122 return "unknown download error" 123 124 125 def _fetch_installer_url() -> str: 126 """Verify version set is a valid 127 ref in GitHub and return the full 128 URL. 129 """ 130 131 try: 132 resp = requests.get(_MULTIPASS_RELEASES_API_URL) 133 except requests.RequestException as e: 134 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e)) 135 136 try: 137 data = resp.json() 138 except simplejson.JSONDecodeError: 139 raise ProviderMultipassDownloadFailed( 140 "failed to fetch valid release data from {}".format(_MULTIPASS_RELEASES_API_URL) 141 ) 142 143 for assets in data: 144 for asset in assets.get("assets", list()): 145 # Find matching name. 146 if asset.get("name") != _MULTIPASS_DL_NAME: 147 continue 148 149 return asset.get("browser_download_url") 150 151 # Something changed we don't know about - we will simply categorize 152 # all possible events as an updated version we do not yet know about. 153 raise ProviderMultipassDownloadFailed("ref specified is not a valid ref in GitHub") 154 155 156 def _download_multipass(dl_dir: str, echoer) -> str: 157 """Creates temporary Downloads installer to temp directory.""" 158 159 dl_url = _fetch_installer_url() 160 dl_basename = os.path.basename(dl_url) 161 dl_path = os.path.join(dl_dir, dl_basename) 162 163 echoer.info("Downloading Multipass installer...\n{} -> {}".format(dl_url, dl_path)) 164 165 try: 166 request = requests.get(dl_url, stream=True, allow_redirects=True) 167 request.raise_for_status() 168 download_requests_stream(request, dl_path) 169 except requests.RequestException as e: 170 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e)) 171 172 digest = calculate_sha3_384(dl_path) 173 if digest != _MULTIPASS_DL_SHA3_384: 174 raise ProviderMultipassDownloadFailed( 175 "download failed verification (expected={} but found={})".format( 176 _MULTIPASS_DL_SHA3_384, digest 177 ) 178 ) 179 180 echoer.info("Verified installer successfully...") 181 return dl_path 182 183 184 def windows_install_multipass(echoer) -> None: 185 """Download and install multipass.""" 186 187 assert sys.platform == "win32" 188 189 dl_dir = tempfile.mkdtemp() 190 dl_path = _download_multipass(dl_dir, echoer) 191 _run_installer(dl_path, echoer) 192 193 # Cleanup. 194 shutil.rmtree(dl_dir) 195 196 197 def _init_progress_bar(total_length, destination, message=None): 198 if not message: 199 message = "Downloading {!r}".format(os.path.basename(destination)) 200 201 valid_length = total_length and total_length > 0 202 203 if valid_length and is_dumb_terminal(): 204 widgets = [message, " ", Percentage()] 205 maxval = total_length 206 elif valid_length and not is_dumb_terminal(): 207 widgets = [message, Bar(marker="=", left="[", right="]"), " ", Percentage()] 208 maxval = total_length 209 elif not valid_length and is_dumb_terminal(): 210 widgets = [message] 211 maxval = UnknownLength 212 else: 213 widgets = [message, AnimatedMarker()] 214 maxval = UnknownLength 215 216 return ProgressBar(widgets=widgets, maxval=maxval) 217 218 219 def download_requests_stream(request_stream, destination, message=None, total_read=0): 220 """This is a facility to download a request with nice progress bars.""" 221 222 # Doing len(request_stream.content) may defeat the purpose of a 223 # progress bar 224 total_length = 0 225 if not request_stream.headers.get("Content-Encoding", ""): 226 total_length = int(request_stream.headers.get("Content-Length", "0")) 227 # Content-Length in the case of resuming will be 228 # Content-Length - total_read so we add back up to have the feel of 229 # resuming 230 if os.path.exists(destination): 231 total_length += total_read 232 233 progress_bar = _init_progress_bar(total_length, destination, message) 234 progress_bar.start() 235 236 if os.path.exists(destination): 237 mode = "ab" 238 else: 239 mode = "wb" 240 with open(destination, mode) as destination_file: 241 for buf in request_stream.iter_content(1024): 242 destination_file.write(buf) 243 if not is_dumb_terminal(): 244 total_read += len(buf) 245 progress_bar.update(total_read) 246 progress_bar.finish() 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/installer/common/definitions.py b/installer/common/definitions.py --- a/installer/common/definitions.py +++ b/installer/common/definitions.py @@ -28,7 +28,7 @@ DEFAULT_MEMORY_GB: int = 4 DEFAULT_DISK_GB: int = 50 DEFAULT_ASSUME: bool = False -DEFAULT_CHANNEL: str = "1.27/stable" +DEFAULT_CHANNEL: str = "1.28/stable" DEFAULT_IMAGE: str = "22.04" MIN_CORES: int = 2 diff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py --- a/installer/vm_providers/_multipass/_windows.py +++ b/installer/vm_providers/_multipass/_windows.py @@ -39,12 +39,12 @@ _MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases" -_MULTIPASS_DL_VERSION = "1.12.0" +_MULTIPASS_DL_VERSION = "1.12.2" _MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION) # Download multipass installer and calculate hash: # python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501 -_MULTIPASS_DL_SHA3_384 = "ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee" # noqa: E501 +_MULTIPASS_DL_SHA3_384 = "9031c8fc98b941df1094a832c356e12f281c70d0eb10bee15b5576c61af4c8a17ef32b833f0043c8df0e04897e69c8bc" # noqa: E501 def windows_reload_multipass_path_env():
{"golden_diff": "diff --git a/installer/common/definitions.py b/installer/common/definitions.py\n--- a/installer/common/definitions.py\n+++ b/installer/common/definitions.py\n@@ -28,7 +28,7 @@\n DEFAULT_MEMORY_GB: int = 4\n DEFAULT_DISK_GB: int = 50\n DEFAULT_ASSUME: bool = False\n-DEFAULT_CHANNEL: str = \"1.27/stable\"\n+DEFAULT_CHANNEL: str = \"1.28/stable\"\n DEFAULT_IMAGE: str = \"22.04\"\n \n MIN_CORES: int = 2\ndiff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py\n--- a/installer/vm_providers/_multipass/_windows.py\n+++ b/installer/vm_providers/_multipass/_windows.py\n@@ -39,12 +39,12 @@\n \n \n _MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n-_MULTIPASS_DL_VERSION = \"1.12.0\"\n+_MULTIPASS_DL_VERSION = \"1.12.2\"\n _MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n \n # Download multipass installer and calculate hash:\n # python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n-_MULTIPASS_DL_SHA3_384 = \"ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee\" # noqa: E501\n+_MULTIPASS_DL_SHA3_384 = \"9031c8fc98b941df1094a832c356e12f281c70d0eb10bee15b5576c61af4c8a17ef32b833f0043c8df0e04897e69c8bc\" # noqa: E501\n \n \n def windows_reload_multipass_path_env():\n", "issue": "update homebrew formula to newest microk8s version (1.28) - otherwise Mac Users can't use it. \nSummary\r\nThe latest present formula on homebrew as of October 2023 point to ubuntu version 22.04 and microk8s version 1.27. This makes it near to impossible for mac users to use it.\r\n\r\nWhy is this important?\r\nLot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :)\r\n\r\nAre you interested in contributing to this feature?\r\nyep definitely.\n", "before_files": [{"content": "MAX_CHARACTERS_WRAP: int = 120\ncommand_descriptions = {\n \"add-node\": \"Adds a node to a cluster\",\n \"ambassador\": \"Ambassador API Gateway and Ingress\",\n \"cilium\": \"The cilium client\",\n \"config\": \"Print the kubeconfig\",\n \"ctr\": \"The containerd client\",\n \"dashboard-proxy\": \"Enable the Kubernetes dashboard and proxy to host\",\n \"dbctl\": \"Backup and restore the Kubernetes datastore\",\n \"disable\": \"Disables running add-ons\",\n \"enable\": \"Enables useful add-ons\",\n \"helm\": \"The helm client\",\n \"helm3\": \"The helm3 client\",\n \"inspect\": \"Checks the cluster and gathers logs\",\n \"istioctl\": \"The istio client\",\n \"join\": \"Joins this instance as a node to a cluster\",\n \"kubectl\": \"The kubernetes client\",\n \"leave\": \"Disconnects this node from any cluster it has joined\",\n \"linkerd\": \"The linkerd client\",\n \"refresh-certs\": \"Refresh the CA certificates in this deployment\",\n \"remove-node\": \"Removes a node from the cluster\",\n \"reset\": \"Cleans the cluster from all workloads\",\n \"start\": \"Starts the kubernetes cluster\",\n \"status\": \"Displays the status of the cluster\",\n \"stop\": \"Stops the kubernetes cluster\",\n}\nDEFAULT_CORES: int = 2\nDEFAULT_MEMORY_GB: int = 4\nDEFAULT_DISK_GB: int = 50\nDEFAULT_ASSUME: bool = False\nDEFAULT_CHANNEL: str = \"1.27/stable\"\nDEFAULT_IMAGE: str = \"22.04\"\n\nMIN_CORES: int = 2\nMIN_MEMORY_GB: int = 2\nMIN_DISK_GB: int = 10\n", "path": "installer/common/definitions.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2018 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os.path\nimport requests\nimport shutil\nimport simplejson\nimport subprocess\nimport sys\nimport tempfile\n\nfrom progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength\n\nfrom common.file_utils import calculate_sha3_384, is_dumb_terminal\nfrom vm_providers.errors import (\n ProviderMultipassDownloadFailed,\n ProviderMultipassInstallationFailed,\n)\n\nif sys.platform == \"win32\":\n import winreg\n\n\nlogger = logging.getLogger(__name__)\n\n\n_MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n_MULTIPASS_DL_VERSION = \"1.12.0\"\n_MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n\n# Download multipass installer and calculate hash:\n# python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n_MULTIPASS_DL_SHA3_384 = \"ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee\" # noqa: E501\n\n\ndef windows_reload_multipass_path_env():\n \"\"\"Update PATH to include installed Multipass, if not already set.\"\"\"\n\n assert sys.platform == \"win32\"\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Environment\")\n\n paths = os.environ[\"PATH\"].split(\";\")\n\n # Drop empty placeholder for trailing comma, if present.\n if paths[-1] == \"\":\n del paths[-1]\n\n reg_user_path, _ = winreg.QueryValueEx(key, \"Path\")\n for path in reg_user_path.split(\";\"):\n if path not in paths and \"Multipass\" in path:\n paths.append(path)\n\n # Restore path with trailing comma.\n os.environ[\"PATH\"] = \";\".join(paths) + \";\"\n\n\ndef _run_installer(installer_path: str, echoer):\n \"\"\"Execute multipass installer.\"\"\"\n\n echoer.info(\"Installing Multipass...\")\n\n # Multipass requires administrative privileges to install, which requires\n # the use of `runas` functionality. Some of the options included:\n # (1) https://stackoverflow.com/a/34216774\n # (2) ShellExecuteW and wait on installer by attempting to delete it.\n # Windows would prevent us from deleting installer with a PermissionError:\n # PermissionError: [WinError 32] The process cannot access the file because\n # it is being used by another process: <path>\n # (3) Use PowerShell's \"Start-Process\" with RunAs verb as shown below.\n # None of the options are quite ideal, but #3 will do.\n cmd = \"\"\"\n & {{\n try {{\n $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru\n }} catch {{\n [Environment]::Exit(1)\n }}\n }}\n \"\"\".format(\n path=installer_path\n )\n\n try:\n subprocess.check_call([\"powershell.exe\", \"-Command\", cmd])\n except subprocess.CalledProcessError:\n raise ProviderMultipassInstallationFailed(\"error launching installer\")\n\n # Reload path environment to see if we can find multipass now.\n windows_reload_multipass_path_env()\n\n if not shutil.which(\"multipass.exe\"):\n # Installation failed.\n raise ProviderMultipassInstallationFailed(\"installation did not complete successfully\")\n\n echoer.info(\"Multipass installation completed successfully.\")\n\n\ndef _requests_exception_hint(e: requests.RequestException) -> str:\n # Use the __doc__ description to give the user a hint. It seems to be a\n # a decent option over trying to enumerate all of possible types.\n if e.__doc__:\n split_lines = e.__doc__.splitlines()\n if split_lines:\n return e.__doc__.splitlines()[0].decode().strip()\n\n # Should never get here.\n return \"unknown download error\"\n\n\ndef _fetch_installer_url() -> str:\n \"\"\"Verify version set is a valid\n ref in GitHub and return the full\n URL.\n \"\"\"\n\n try:\n resp = requests.get(_MULTIPASS_RELEASES_API_URL)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n try:\n data = resp.json()\n except simplejson.JSONDecodeError:\n raise ProviderMultipassDownloadFailed(\n \"failed to fetch valid release data from {}\".format(_MULTIPASS_RELEASES_API_URL)\n )\n\n for assets in data:\n for asset in assets.get(\"assets\", list()):\n # Find matching name.\n if asset.get(\"name\") != _MULTIPASS_DL_NAME:\n continue\n\n return asset.get(\"browser_download_url\")\n\n # Something changed we don't know about - we will simply categorize\n # all possible events as an updated version we do not yet know about.\n raise ProviderMultipassDownloadFailed(\"ref specified is not a valid ref in GitHub\")\n\n\ndef _download_multipass(dl_dir: str, echoer) -> str:\n \"\"\"Creates temporary Downloads installer to temp directory.\"\"\"\n\n dl_url = _fetch_installer_url()\n dl_basename = os.path.basename(dl_url)\n dl_path = os.path.join(dl_dir, dl_basename)\n\n echoer.info(\"Downloading Multipass installer...\\n{} -> {}\".format(dl_url, dl_path))\n\n try:\n request = requests.get(dl_url, stream=True, allow_redirects=True)\n request.raise_for_status()\n download_requests_stream(request, dl_path)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n digest = calculate_sha3_384(dl_path)\n if digest != _MULTIPASS_DL_SHA3_384:\n raise ProviderMultipassDownloadFailed(\n \"download failed verification (expected={} but found={})\".format(\n _MULTIPASS_DL_SHA3_384, digest\n )\n )\n\n echoer.info(\"Verified installer successfully...\")\n return dl_path\n\n\ndef windows_install_multipass(echoer) -> None:\n \"\"\"Download and install multipass.\"\"\"\n\n assert sys.platform == \"win32\"\n\n dl_dir = tempfile.mkdtemp()\n dl_path = _download_multipass(dl_dir, echoer)\n _run_installer(dl_path, echoer)\n\n # Cleanup.\n shutil.rmtree(dl_dir)\n\n\ndef _init_progress_bar(total_length, destination, message=None):\n if not message:\n message = \"Downloading {!r}\".format(os.path.basename(destination))\n\n valid_length = total_length and total_length > 0\n\n if valid_length and is_dumb_terminal():\n widgets = [message, \" \", Percentage()]\n maxval = total_length\n elif valid_length and not is_dumb_terminal():\n widgets = [message, Bar(marker=\"=\", left=\"[\", right=\"]\"), \" \", Percentage()]\n maxval = total_length\n elif not valid_length and is_dumb_terminal():\n widgets = [message]\n maxval = UnknownLength\n else:\n widgets = [message, AnimatedMarker()]\n maxval = UnknownLength\n\n return ProgressBar(widgets=widgets, maxval=maxval)\n\n\ndef download_requests_stream(request_stream, destination, message=None, total_read=0):\n \"\"\"This is a facility to download a request with nice progress bars.\"\"\"\n\n # Doing len(request_stream.content) may defeat the purpose of a\n # progress bar\n total_length = 0\n if not request_stream.headers.get(\"Content-Encoding\", \"\"):\n total_length = int(request_stream.headers.get(\"Content-Length\", \"0\"))\n # Content-Length in the case of resuming will be\n # Content-Length - total_read so we add back up to have the feel of\n # resuming\n if os.path.exists(destination):\n total_length += total_read\n\n progress_bar = _init_progress_bar(total_length, destination, message)\n progress_bar.start()\n\n if os.path.exists(destination):\n mode = \"ab\"\n else:\n mode = \"wb\"\n with open(destination, mode) as destination_file:\n for buf in request_stream.iter_content(1024):\n destination_file.write(buf)\n if not is_dumb_terminal():\n total_read += len(buf)\n progress_bar.update(total_read)\n progress_bar.finish()\n", "path": "installer/vm_providers/_multipass/_windows.py"}], "after_files": [{"content": "MAX_CHARACTERS_WRAP: int = 120\ncommand_descriptions = {\n \"add-node\": \"Adds a node to a cluster\",\n \"ambassador\": \"Ambassador API Gateway and Ingress\",\n \"cilium\": \"The cilium client\",\n \"config\": \"Print the kubeconfig\",\n \"ctr\": \"The containerd client\",\n \"dashboard-proxy\": \"Enable the Kubernetes dashboard and proxy to host\",\n \"dbctl\": \"Backup and restore the Kubernetes datastore\",\n \"disable\": \"Disables running add-ons\",\n \"enable\": \"Enables useful add-ons\",\n \"helm\": \"The helm client\",\n \"helm3\": \"The helm3 client\",\n \"inspect\": \"Checks the cluster and gathers logs\",\n \"istioctl\": \"The istio client\",\n \"join\": \"Joins this instance as a node to a cluster\",\n \"kubectl\": \"The kubernetes client\",\n \"leave\": \"Disconnects this node from any cluster it has joined\",\n \"linkerd\": \"The linkerd client\",\n \"refresh-certs\": \"Refresh the CA certificates in this deployment\",\n \"remove-node\": \"Removes a node from the cluster\",\n \"reset\": \"Cleans the cluster from all workloads\",\n \"start\": \"Starts the kubernetes cluster\",\n \"status\": \"Displays the status of the cluster\",\n \"stop\": \"Stops the kubernetes cluster\",\n}\nDEFAULT_CORES: int = 2\nDEFAULT_MEMORY_GB: int = 4\nDEFAULT_DISK_GB: int = 50\nDEFAULT_ASSUME: bool = False\nDEFAULT_CHANNEL: str = \"1.28/stable\"\nDEFAULT_IMAGE: str = \"22.04\"\n\nMIN_CORES: int = 2\nMIN_MEMORY_GB: int = 2\nMIN_DISK_GB: int = 10\n", "path": "installer/common/definitions.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2018 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os.path\nimport requests\nimport shutil\nimport simplejson\nimport subprocess\nimport sys\nimport tempfile\n\nfrom progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength\n\nfrom common.file_utils import calculate_sha3_384, is_dumb_terminal\nfrom vm_providers.errors import (\n ProviderMultipassDownloadFailed,\n ProviderMultipassInstallationFailed,\n)\n\nif sys.platform == \"win32\":\n import winreg\n\n\nlogger = logging.getLogger(__name__)\n\n\n_MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n_MULTIPASS_DL_VERSION = \"1.12.2\"\n_MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n\n# Download multipass installer and calculate hash:\n# python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n_MULTIPASS_DL_SHA3_384 = \"9031c8fc98b941df1094a832c356e12f281c70d0eb10bee15b5576c61af4c8a17ef32b833f0043c8df0e04897e69c8bc\" # noqa: E501\n\n\ndef windows_reload_multipass_path_env():\n \"\"\"Update PATH to include installed Multipass, if not already set.\"\"\"\n\n assert sys.platform == \"win32\"\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Environment\")\n\n paths = os.environ[\"PATH\"].split(\";\")\n\n # Drop empty placeholder for trailing comma, if present.\n if paths[-1] == \"\":\n del paths[-1]\n\n reg_user_path, _ = winreg.QueryValueEx(key, \"Path\")\n for path in reg_user_path.split(\";\"):\n if path not in paths and \"Multipass\" in path:\n paths.append(path)\n\n # Restore path with trailing comma.\n os.environ[\"PATH\"] = \";\".join(paths) + \";\"\n\n\ndef _run_installer(installer_path: str, echoer):\n \"\"\"Execute multipass installer.\"\"\"\n\n echoer.info(\"Installing Multipass...\")\n\n # Multipass requires administrative privileges to install, which requires\n # the use of `runas` functionality. Some of the options included:\n # (1) https://stackoverflow.com/a/34216774\n # (2) ShellExecuteW and wait on installer by attempting to delete it.\n # Windows would prevent us from deleting installer with a PermissionError:\n # PermissionError: [WinError 32] The process cannot access the file because\n # it is being used by another process: <path>\n # (3) Use PowerShell's \"Start-Process\" with RunAs verb as shown below.\n # None of the options are quite ideal, but #3 will do.\n cmd = \"\"\"\n & {{\n try {{\n $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru\n }} catch {{\n [Environment]::Exit(1)\n }}\n }}\n \"\"\".format(\n path=installer_path\n )\n\n try:\n subprocess.check_call([\"powershell.exe\", \"-Command\", cmd])\n except subprocess.CalledProcessError:\n raise ProviderMultipassInstallationFailed(\"error launching installer\")\n\n # Reload path environment to see if we can find multipass now.\n windows_reload_multipass_path_env()\n\n if not shutil.which(\"multipass.exe\"):\n # Installation failed.\n raise ProviderMultipassInstallationFailed(\"installation did not complete successfully\")\n\n echoer.info(\"Multipass installation completed successfully.\")\n\n\ndef _requests_exception_hint(e: requests.RequestException) -> str:\n # Use the __doc__ description to give the user a hint. It seems to be a\n # a decent option over trying to enumerate all of possible types.\n if e.__doc__:\n split_lines = e.__doc__.splitlines()\n if split_lines:\n return e.__doc__.splitlines()[0].decode().strip()\n\n # Should never get here.\n return \"unknown download error\"\n\n\ndef _fetch_installer_url() -> str:\n \"\"\"Verify version set is a valid\n ref in GitHub and return the full\n URL.\n \"\"\"\n\n try:\n resp = requests.get(_MULTIPASS_RELEASES_API_URL)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n try:\n data = resp.json()\n except simplejson.JSONDecodeError:\n raise ProviderMultipassDownloadFailed(\n \"failed to fetch valid release data from {}\".format(_MULTIPASS_RELEASES_API_URL)\n )\n\n for assets in data:\n for asset in assets.get(\"assets\", list()):\n # Find matching name.\n if asset.get(\"name\") != _MULTIPASS_DL_NAME:\n continue\n\n return asset.get(\"browser_download_url\")\n\n # Something changed we don't know about - we will simply categorize\n # all possible events as an updated version we do not yet know about.\n raise ProviderMultipassDownloadFailed(\"ref specified is not a valid ref in GitHub\")\n\n\ndef _download_multipass(dl_dir: str, echoer) -> str:\n \"\"\"Creates temporary Downloads installer to temp directory.\"\"\"\n\n dl_url = _fetch_installer_url()\n dl_basename = os.path.basename(dl_url)\n dl_path = os.path.join(dl_dir, dl_basename)\n\n echoer.info(\"Downloading Multipass installer...\\n{} -> {}\".format(dl_url, dl_path))\n\n try:\n request = requests.get(dl_url, stream=True, allow_redirects=True)\n request.raise_for_status()\n download_requests_stream(request, dl_path)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n digest = calculate_sha3_384(dl_path)\n if digest != _MULTIPASS_DL_SHA3_384:\n raise ProviderMultipassDownloadFailed(\n \"download failed verification (expected={} but found={})\".format(\n _MULTIPASS_DL_SHA3_384, digest\n )\n )\n\n echoer.info(\"Verified installer successfully...\")\n return dl_path\n\n\ndef windows_install_multipass(echoer) -> None:\n \"\"\"Download and install multipass.\"\"\"\n\n assert sys.platform == \"win32\"\n\n dl_dir = tempfile.mkdtemp()\n dl_path = _download_multipass(dl_dir, echoer)\n _run_installer(dl_path, echoer)\n\n # Cleanup.\n shutil.rmtree(dl_dir)\n\n\ndef _init_progress_bar(total_length, destination, message=None):\n if not message:\n message = \"Downloading {!r}\".format(os.path.basename(destination))\n\n valid_length = total_length and total_length > 0\n\n if valid_length and is_dumb_terminal():\n widgets = [message, \" \", Percentage()]\n maxval = total_length\n elif valid_length and not is_dumb_terminal():\n widgets = [message, Bar(marker=\"=\", left=\"[\", right=\"]\"), \" \", Percentage()]\n maxval = total_length\n elif not valid_length and is_dumb_terminal():\n widgets = [message]\n maxval = UnknownLength\n else:\n widgets = [message, AnimatedMarker()]\n maxval = UnknownLength\n\n return ProgressBar(widgets=widgets, maxval=maxval)\n\n\ndef download_requests_stream(request_stream, destination, message=None, total_read=0):\n \"\"\"This is a facility to download a request with nice progress bars.\"\"\"\n\n # Doing len(request_stream.content) may defeat the purpose of a\n # progress bar\n total_length = 0\n if not request_stream.headers.get(\"Content-Encoding\", \"\"):\n total_length = int(request_stream.headers.get(\"Content-Length\", \"0\"))\n # Content-Length in the case of resuming will be\n # Content-Length - total_read so we add back up to have the feel of\n # resuming\n if os.path.exists(destination):\n total_length += total_read\n\n progress_bar = _init_progress_bar(total_length, destination, message)\n progress_bar.start()\n\n if os.path.exists(destination):\n mode = \"ab\"\n else:\n mode = \"wb\"\n with open(destination, mode) as destination_file:\n for buf in request_stream.iter_content(1024):\n destination_file.write(buf)\n if not is_dumb_terminal():\n total_read += len(buf)\n progress_bar.update(total_read)\n progress_bar.finish()\n", "path": "installer/vm_providers/_multipass/_windows.py"}]}
3,586
581
gh_patches_debug_491
rasdani/github-patches
git_diff
zulip__zulip-22637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show info about organization and Zulip in gear menu At present, it requires some digging to find a few key pieces of information about the Zulip organization one is viewing: - Organization name - Organization URL (if using the Desktop app) - For Zulip Cloud, the plan that the organization on. - For self-hosted Zulip, does the server need to be upgraded? What's the version of the server, and what's the current release version? We should address this by adding this information at the top of the gear settings menu. - For all users: Show organization name and URL - For all Zulip Cloud users: Show plan name with a link to `/plans`, e.g. "Zulip Cloud Free" - For owners of Zulip Cloud Free orgs: Show "Upgrade to Zulip Cloud Standard" link to `/upgrade` - For all self-hosted users: - Show Zulip server version (same as in the "About Zulip" widget); we'll need to test to make sure it looks reasonable for non-standard versions (e.g. forks, installs running off `main`). - If the server version is old, we should show an "Upgrade to the latest release (x.y)" linking to https://zulip.readthedocs.io/en/stable/production/upgrade-or-modify.html. We should probably show this link to all users, as server admins might not be owners/admins of the organization. ## Mockups <img width="1552" alt="popover-menu" src="https://user-images.githubusercontent.com/2090066/172440944-5dc8ee48-908f-4642-beb7-9ec141128a29.png"> <img width="1552" alt="dark-inbox-01" src="https://user-images.githubusercontent.com/2090066/172440973-12639e2a-3f42-408d-b976-27b01498ecda.png"> <img width="1608" alt="selfhosted-upgrade" src="https://user-images.githubusercontent.com/2090066/172441028-c0ce417f-e3db-4542-845f-10ba3fab98df.png"> **CZO discussion threads:** - [Design proposal (Zulip Cloud)](https://chat.zulip.org/#narrow/stream/101-design/topic/UI.20redesign.3A.20popover.20menu/near/1388585) - [Server upgrade notice](https://chat.zulip.org/#narrow/stream/101-design/topic/server.20upgrade.20notice) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/lib/capitalization.py` Content: ``` 1 import re 2 from typing import List, Match, Tuple 3 4 from bs4 import BeautifulSoup 5 6 # The phrases in this list will be ignored. The longest phrase is 7 # tried first; this removes the chance of smaller phrases changing 8 # the text before longer phrases are tried. 9 # The errors shown by `tools/check-capitalization` can be added to 10 # this list without any modification. 11 IGNORED_PHRASES = [ 12 # Proper nouns and acronyms 13 r"API", 14 r"APNS", 15 r"Botserver", 16 r"Cookie Bot", 17 r"DevAuthBackend", 18 r"GCM", 19 r"GitHub", 20 r"Gravatar", 21 r"Help Center", 22 r"HTTP", 23 r"ID", 24 r"IDs", 25 r"IP", 26 r"JSON", 27 r"Kerberos", 28 r"LDAP", 29 r"Markdown", 30 r"OTP", 31 r"Pivotal", 32 r"PM", 33 r"PMs", 34 r"Slack", 35 r"Google", 36 r"Terms of Service", 37 r"Tuesday", 38 r"URL", 39 r"UUID", 40 r"Webathena", 41 r"WordPress", 42 r"Zephyr", 43 r"Zoom", 44 r"Zulip", 45 r"Zulip Account Security", 46 r"Zulip Security", 47 r"Zulip Cloud Standard", 48 r"BigBlueButton", 49 # Code things 50 r"\.zuliprc", 51 # BeautifulSoup will remove <z-user> which is horribly confusing, 52 # so we need more of the sentence. 53 r"<z-user></z-user> will have the same role", 54 # Things using "I" 55 r"I understand", 56 r"I'm", 57 r"I've", 58 # Specific short words 59 r"beta", 60 r"and", 61 r"bot", 62 r"e\.g\.", 63 r"enabled", 64 r"signups", 65 # Placeholders 66 r"keyword", 67 r"streamname", 68 r"user@example\.com", 69 # Fragments of larger strings 70 (r"your subscriptions on your Streams page"), 71 r"Add global time<br />Everyone sees global times in their own time zone\.", 72 r"user", 73 r"an unknown operating system", 74 r"Go to Settings", 75 # SPECIAL CASES 76 # Because topics usually are lower-case, this would look weird if it were capitalized 77 r"more topics", 78 # Used alone in a parenthetical where capitalized looks worse. 79 r"^deprecated$", 80 # Capital 'i' looks weird in reminders popover 81 r"in 1 hour", 82 r"in 20 minutes", 83 r"in 3 hours", 84 # these are used as topics 85 r"^new streams$", 86 r"^stream events$", 87 # These are used as example short names (e.g. an uncapitalized context): 88 r"^marketing$", 89 r"^cookie$", 90 # Used to refer custom time limits 91 r"\bN\b", 92 # Capital c feels obtrusive in clear status option 93 r"clear", 94 r"group private messages with \{recipient\}", 95 r"private messages with \{recipient\}", 96 r"private messages with yourself", 97 r"GIF", 98 # Emoji name placeholder 99 r"leafy green vegetable", 100 # Subdomain placeholder 101 r"your-organization-url", 102 # Used in invite modal 103 r"or", 104 # Used in GIPHY popover. 105 r"GIFs", 106 r"GIPHY", 107 # Used in our case studies 108 r"Technical University of Munich", 109 r"University of California San Diego", 110 # Used in stream creation form 111 r"email hidden", 112 # Use in compose box. 113 r"to send", 114 r"to add a new line", 115 # Used in showing Notification Bot read receipts message 116 "Notification Bot", 117 # Used in presence_enabled setting label 118 r"invisible mode off", 119 # Typeahead suggestions for "Pronouns" custom field type. 120 r"he/him", 121 r"she/her", 122 r"they/them", 123 ] 124 125 # Sort regexes in descending order of their lengths. As a result, the 126 # longer phrases will be ignored first. 127 IGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True) 128 129 # Compile regexes to improve performance. This also extracts the 130 # text using BeautifulSoup and then removes extra whitespaces from 131 # it. This step enables us to add HTML in our regexes directly. 132 COMPILED_IGNORED_PHRASES = [ 133 re.compile(" ".join(BeautifulSoup(regex, "lxml").text.split())) for regex in IGNORED_PHRASES 134 ] 135 136 SPLIT_BOUNDARY = "?.!" # Used to split string into sentences. 137 SPLIT_BOUNDARY_REGEX = re.compile(rf"[{SPLIT_BOUNDARY}]") 138 139 # Regexes which check capitalization in sentences. 140 DISALLOWED = [ 141 r"^[a-z](?!\})", # Checks if the sentence starts with a lower case character. 142 r"^[A-Z][a-z]+[\sa-z0-9]+[A-Z]", # Checks if an upper case character exists 143 # after a lower case character when the first character is in upper case. 144 ] 145 DISALLOWED_REGEX = re.compile(r"|".join(DISALLOWED)) 146 147 BANNED_WORDS = { 148 "realm": "The term realm should not appear in user-facing strings. Use organization instead.", 149 } 150 151 152 def get_safe_phrase(phrase: str) -> str: 153 """ 154 Safe phrase is in lower case and doesn't contain characters which can 155 conflict with split boundaries. All conflicting characters are replaced 156 with low dash (_). 157 """ 158 phrase = SPLIT_BOUNDARY_REGEX.sub("_", phrase) 159 return phrase.lower() 160 161 162 def replace_with_safe_phrase(matchobj: Match[str]) -> str: 163 """ 164 The idea is to convert IGNORED_PHRASES into safe phrases, see 165 `get_safe_phrase()` function. The only exception is when the 166 IGNORED_PHRASE is at the start of the text or after a split 167 boundary; in this case, we change the first letter of the phrase 168 to upper case. 169 """ 170 ignored_phrase = matchobj.group(0) 171 safe_string = get_safe_phrase(ignored_phrase) 172 173 start_index = matchobj.start() 174 complete_string = matchobj.string 175 176 is_string_start = start_index == 0 177 # We expect that there will be one space between split boundary 178 # and the next word. 179 punctuation = complete_string[max(start_index - 2, 0)] 180 is_after_split_boundary = punctuation in SPLIT_BOUNDARY 181 if is_string_start or is_after_split_boundary: 182 return safe_string.capitalize() 183 184 return safe_string 185 186 187 def get_safe_text(text: str) -> str: 188 """ 189 This returns text which is rendered by BeautifulSoup and is in the 190 form that can be split easily and has all IGNORED_PHRASES processed. 191 """ 192 soup = BeautifulSoup(text, "lxml") 193 text = " ".join(soup.text.split()) # Remove extra whitespaces. 194 for phrase_regex in COMPILED_IGNORED_PHRASES: 195 text = phrase_regex.sub(replace_with_safe_phrase, text) 196 197 return text 198 199 200 def is_capitalized(safe_text: str) -> bool: 201 sentences = SPLIT_BOUNDARY_REGEX.split(safe_text) 202 return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences) 203 204 205 def check_banned_words(text: str) -> List[str]: 206 lower_cased_text = text.lower() 207 errors = [] 208 for word, reason in BANNED_WORDS.items(): 209 if word in lower_cased_text: 210 # Hack: Should move this into BANNED_WORDS framework; for 211 # now, just hand-code the skips: 212 if "realm_name" in lower_cased_text: 213 continue 214 kwargs = dict(word=word, text=text, reason=reason) 215 msg = "{word} found in '{text}'. {reason}".format(**kwargs) 216 errors.append(msg) 217 218 return errors 219 220 221 def check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]: 222 errors = [] 223 ignored = [] 224 banned_word_errors = [] 225 for text in strings: 226 text = " ".join(text.split()) # Remove extra whitespaces. 227 safe_text = get_safe_text(text) 228 has_ignored_phrase = text != safe_text 229 capitalized = is_capitalized(safe_text) 230 if not capitalized: 231 errors.append(text) 232 elif has_ignored_phrase: 233 ignored.append(text) 234 235 banned_word_errors.extend(check_banned_words(text)) 236 237 return sorted(errors), sorted(ignored), sorted(banned_word_errors) 238 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py --- a/tools/lib/capitalization.py +++ b/tools/lib/capitalization.py @@ -42,6 +42,7 @@ r"Zephyr", r"Zoom", r"Zulip", + r"Zulip Server", r"Zulip Account Security", r"Zulip Security", r"Zulip Cloud Standard",
{"golden_diff": "diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py\n--- a/tools/lib/capitalization.py\n+++ b/tools/lib/capitalization.py\n@@ -42,6 +42,7 @@\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n+ r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n", "issue": "Show info about organization and Zulip in gear menu\nAt present, it requires some digging to find a few key pieces of information about the Zulip organization one is viewing:\r\n\r\n- Organization name\r\n- Organization URL (if using the Desktop app)\r\n- For Zulip Cloud, the plan that the organization on.\r\n- For self-hosted Zulip, does the server need to be upgraded? What's the version of the server, and what's the current release version?\r\n\r\nWe should address this by adding this information at the top of the gear settings menu.\r\n\r\n- For all users: Show organization name and URL\r\n- For all Zulip Cloud users: Show plan name with a link to `/plans`, e.g. \"Zulip Cloud Free\"\r\n- For owners of Zulip Cloud Free orgs: Show \"Upgrade to Zulip Cloud Standard\" link to `/upgrade`\r\n- For all self-hosted users:\r\n - Show Zulip server version (same as in the \"About Zulip\" widget); we'll need to test to make sure it looks reasonable for non-standard versions (e.g. forks, installs running off `main`).\r\n - If the server version is old, we should show an \"Upgrade to the latest release (x.y)\" linking to https://zulip.readthedocs.io/en/stable/production/upgrade-or-modify.html. We should probably show this link to all users, as server admins might not be owners/admins of the organization.\r\n\r\n## Mockups\r\n<img width=\"1552\" alt=\"popover-menu\" src=\"https://user-images.githubusercontent.com/2090066/172440944-5dc8ee48-908f-4642-beb7-9ec141128a29.png\">\r\n\r\n<img width=\"1552\" alt=\"dark-inbox-01\" src=\"https://user-images.githubusercontent.com/2090066/172440973-12639e2a-3f42-408d-b976-27b01498ecda.png\">\r\n\r\n<img width=\"1608\" alt=\"selfhosted-upgrade\" src=\"https://user-images.githubusercontent.com/2090066/172441028-c0ce417f-e3db-4542-845f-10ba3fab98df.png\">\r\n\r\n**CZO discussion threads:**\r\n- [Design proposal (Zulip Cloud)](https://chat.zulip.org/#narrow/stream/101-design/topic/UI.20redesign.3A.20popover.20menu/near/1388585)\r\n- [Server upgrade notice](https://chat.zulip.org/#narrow/stream/101-design/topic/server.20upgrade.20notice)\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}], "after_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}]}
3,409
106
gh_patches_debug_36161
rasdani/github-patches
git_diff
DistrictDataLabs__yellowbrick-530
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect parameter values in K-Elbow Visualizer docstring Initially in the K-Elbow Visualizer's docstring, the possible values for the parameter `metric` are named as `distortion_score`, `silhouette_score`, and `calinski_harabaz_score`. However, using these values returns the following error: `YellowbrickValueError: '{}' is not a defined metric use one of distortion, silhouette, or calinski_harabaz` However, the correct names&mdash;`distortion`, `silhouette`, `calinski_harabaz`&mdash;are listed corrected further down under `Parameters`. ![elbow-documentation](https://user-images.githubusercontent.com/19958803/43358572-0f72b404-9262-11e8-8201-7a9ef6cd3fec.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yellowbrick/cluster/elbow.py` Content: ``` 1 # yellowbrick.cluster.elbow 2 # Implements the elbow method for determining the optimal number of clusters. 3 # 4 # Author: Benjamin Bengfort <[email protected]> 5 # Created: Thu Mar 23 22:36:31 2017 -0400 6 # 7 # Copyright (C) 2016 District Data Labs 8 # For license information, see LICENSE.txt 9 # 10 # ID: elbow.py [5a370c8] [email protected] $ 11 12 """ 13 Implements the elbow method for determining the optimal number of clusters. 14 https://bl.ocks.org/rpgove/0060ff3b656618e9136b 15 """ 16 17 ########################################################################## 18 ## Imports 19 ########################################################################## 20 21 import time 22 import numpy as np 23 import scipy.sparse as sp 24 25 from .base import ClusteringScoreVisualizer 26 from ..exceptions import YellowbrickValueError 27 28 from sklearn.metrics import silhouette_score 29 from sklearn.metrics import calinski_harabaz_score 30 from sklearn.metrics.pairwise import pairwise_distances 31 from sklearn.preprocessing import LabelEncoder 32 33 34 ## Packages for export 35 __all__ = [ 36 "KElbowVisualizer", "distortion_score" 37 ] 38 39 40 ########################################################################## 41 ## Metrics 42 ########################################################################## 43 44 def distortion_score(X, labels, metric='euclidean'): 45 """ 46 Compute the mean distortion of all samples. 47 48 The distortion is computed as the the sum of the squared distances between 49 each observation and its closest centroid. Logically, this is the metric 50 that K-Means attempts to minimize as it is fitting the model. 51 52 .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/ 53 54 Parameters 55 ---------- 56 X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a] 57 Array of pairwise distances between samples if metric == "precomputed" 58 or a feature array for computing distances against the labels. 59 60 labels : array, shape = [n_samples] 61 Predicted labels for each sample 62 63 metric : string 64 The metric to use when calculating distance between instances in a 65 feature array. If metric is a string, it must be one of the options 66 allowed by `sklearn.metrics.pairwise.pairwise_distances 67 <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_ 68 69 .. todo:: add sample_size and random_state kwds similar to silhouette_score 70 """ 71 # Encode labels to get unique centers and groups 72 le = LabelEncoder() 73 labels = le.fit_transform(labels) 74 unique_labels = le.classes_ 75 76 # Sum of the distortions 77 distortion = 0 78 79 # Loop through each label (center) to compute the centroid 80 for current_label in unique_labels: 81 # Mask the instances that belong to the current label 82 mask = labels == current_label 83 instances = X[mask] 84 85 # Compute the center of these instances 86 center = instances.mean(axis=0) 87 88 # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean 89 # returns an array of 1 dimension less than the input. We expect 90 # instances to be a 2D array, therefore to do pairwise computation we 91 # require center to be a 2D array with a single row (the center). 92 # See #370 for more detail. 93 if not sp.issparse(instances): 94 center = np.array([center]) 95 96 # Compute the square distances from the instances to the center 97 distances = pairwise_distances(instances, center, metric=metric) 98 distances = distances ** 2 99 100 # Add the mean square distance to the distortion 101 distortion += distances.mean() 102 103 return distortion 104 105 106 ########################################################################## 107 ## Elbow Method 108 ########################################################################## 109 110 KELBOW_SCOREMAP = { 111 "distortion": distortion_score, 112 "silhouette": silhouette_score, 113 "calinski_harabaz": calinski_harabaz_score, 114 } 115 116 117 class KElbowVisualizer(ClusteringScoreVisualizer): 118 """ 119 The K-Elbow Visualizer implements the "elbow" method of selecting the 120 optimal number of clusters for K-means clustering. K-means is a simple 121 unsupervised machine learning algorithm that groups data into a specified 122 number (k) of clusters. Because the user must specify in advance what k to 123 choose, the algorithm is somewhat naive -- it assigns all members to k 124 clusters even if that is not the right k for the dataset. 125 126 The elbow method runs k-means clustering on the dataset for a range of 127 values for k (say from 1-10) and then for each value of k computes an 128 average score for all clusters. By default, the ``distortion_score`` is 129 computed, the sum of square distances from each point to its assigned 130 center. Other metrics can also be used such as the ``silhouette_score``, 131 the mean silhouette coefficient for all samples or the 132 ``calinski_harabaz_score``, which computes the ratio of dispersion between 133 and within clusters. 134 135 When these overall metrics for each model are plotted, it is possible to 136 visually determine the best value for K. If the line chart looks like an 137 arm, then the "elbow" (the point of inflection on the curve) is the best 138 value of k. The "arm" can be either up or down, but if there is a strong 139 inflection point, it is a good indication that the underlying model fits 140 best at that point. 141 142 Parameters 143 ---------- 144 145 model : a Scikit-Learn clusterer 146 Should be an instance of a clusterer, specifically ``KMeans`` or 147 ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised. 148 149 ax : matplotlib Axes, default: None 150 The axes to plot the figure on. If None is passed in the current axes 151 will be used (or generated if required). 152 153 k : integer or tuple 154 The range of k to compute silhouette scores for. If a single integer 155 is specified, then will compute the range (2,k) otherwise the 156 specified range in the tuple is used. 157 158 metric : string, default: ``"distortion"`` 159 Select the scoring metric to evaluate the clusters. The default is the 160 mean distortion, defined by the sum of squared distances between each 161 observation and its closest centroid. Other metrics include: 162 163 - **distortion**: mean sum of squared distances to centers 164 - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance 165 - **calinski_harabaz**: ratio of within to between cluster dispersion 166 167 timings : bool, default: True 168 Display the fitting time per k to evaluate the amount of time required 169 to train the clustering model. 170 171 kwargs : dict 172 Keyword arguments that are passed to the base class and may influence 173 the visualization as defined in other Visualizers. 174 175 Examples 176 -------- 177 178 >>> from yellowbrick.cluster import KElbowVisualizer 179 >>> from sklearn.cluster import KMeans 180 >>> model = KElbowVisualizer(KMeans(), k=10) 181 >>> model.fit(X) 182 >>> model.poof() 183 184 Notes 185 ----- 186 187 If you get a visualizer that doesn't have an elbow or inflection point, 188 then this method may not be working. The elbow method does not work well 189 if the data is not very clustered; in this case you might see a smooth 190 curve and the value of k is unclear. Other scoring methods such as BIC or 191 SSE also can be used to explore if clustering is a correct choice. 192 193 For a discussion on the Elbow method, read more at 194 `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_. 195 196 .. todo:: add parallelization option for performance 197 .. todo:: add different metrics for scores and silhoutte 198 .. todo:: add timing information about how long its taking 199 """ 200 201 def __init__(self, model, ax=None, k=10, 202 metric="distortion", timings=True, **kwargs): 203 super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs) 204 205 # Get the scoring method 206 if metric not in KELBOW_SCOREMAP: 207 raise YellowbrickValueError( 208 "'{}' is not a defined metric " 209 "use one of distortion, silhouette, or calinski_harabaz" 210 ) 211 212 # Store the arguments 213 self.scoring_metric = KELBOW_SCOREMAP[metric] 214 self.timings = timings 215 216 # Convert K into a tuple argument if an integer 217 if isinstance(k, int): 218 k = (2, k+1) 219 220 # Expand k in to the values we will use, capturing exceptions 221 try: 222 k = tuple(k) 223 self.k_values_ = list(range(*k)) 224 except: 225 raise YellowbrickValueError(( 226 "Specify a range or maximal K value, the value '{}' " 227 "is not a valid argument for K.".format(k) 228 )) 229 230 231 # Holds the values of the silhoutte scores 232 self.k_scores_ = None 233 234 def fit(self, X, y=None, **kwargs): 235 """ 236 Fits n KMeans models where n is the length of ``self.k_values_``, 237 storing the silhoutte scores in the ``self.k_scores_`` attribute. 238 This method finishes up by calling draw to create the plot. 239 """ 240 241 self.k_scores_ = [] 242 self.k_timers_ = [] 243 244 for k in self.k_values_: 245 # Compute the start time for each model 246 start = time.time() 247 248 # Set the k value and fit the model 249 self.estimator.set_params(n_clusters=k) 250 self.estimator.fit(X) 251 252 # Append the time and score to our plottable metrics 253 self.k_timers_.append(time.time() - start) 254 self.k_scores_.append( 255 self.scoring_metric(X, self.estimator.labels_) 256 ) 257 258 self.draw() 259 260 return self 261 262 def draw(self): 263 """ 264 Draw the elbow curve for the specified scores and values of K. 265 """ 266 # Plot the silhouette score against k 267 self.ax.plot(self.k_values_, self.k_scores_, marker="D", label="score") 268 269 # If we're going to plot the timings, create a twinx axis 270 if self.timings: 271 self.axes = [self.ax, self.ax.twinx()] 272 self.axes[1].plot( 273 self.k_values_, self.k_timers_, label="fit time", 274 c='g', marker="o", linestyle="--", alpha=0.75, 275 ) 276 277 return self.ax 278 279 def finalize(self): 280 """ 281 Prepare the figure for rendering by setting the title as well as the 282 X and Y axis labels and adding the legend. 283 """ 284 # Get the metric name 285 metric = self.scoring_metric.__name__.replace("_", " ").title() 286 287 # Set the title 288 self.set_title( 289 '{} Elbow for {} Clustering'.format(metric, self.name) 290 ) 291 292 # Set the x and y labels 293 self.ax.set_xlabel('k') 294 self.ax.set_ylabel(metric.lower()) 295 296 # Set the second y axis labels 297 if self.timings: 298 self.axes[1].grid(False) 299 self.axes[1].set_ylabel("fit time (seconds)", color='g') 300 self.axes[1].tick_params('y', colors='g') 301 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yellowbrick/cluster/elbow.py b/yellowbrick/cluster/elbow.py --- a/yellowbrick/cluster/elbow.py +++ b/yellowbrick/cluster/elbow.py @@ -125,11 +125,11 @@ The elbow method runs k-means clustering on the dataset for a range of values for k (say from 1-10) and then for each value of k computes an - average score for all clusters. By default, the ``distortion_score`` is + average score for all clusters. By default, the ``distortion`` score is computed, the sum of square distances from each point to its assigned - center. Other metrics can also be used such as the ``silhouette_score``, + center. Other metrics can also be used such as the ``silhouette`` score, the mean silhouette coefficient for all samples or the - ``calinski_harabaz_score``, which computes the ratio of dispersion between + ``calinski_harabaz`` score, which computes the ratio of dispersion between and within clusters. When these overall metrics for each model are plotted, it is possible to @@ -186,16 +186,21 @@ If you get a visualizer that doesn't have an elbow or inflection point, then this method may not be working. The elbow method does not work well - if the data is not very clustered; in this case you might see a smooth - curve and the value of k is unclear. Other scoring methods such as BIC or - SSE also can be used to explore if clustering is a correct choice. + if the data is not very clustered; in this case, you might see a smooth + curve and the value of k is unclear. Other scoring methods, such as BIC or + SSE, also can be used to explore if clustering is a correct choice. For a discussion on the Elbow method, read more at `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_. + + .. seealso:: The scikit-learn documentation for the `silhouette_score + <https://bit.ly/2LYWjYb>`_ and `calinski_harabaz_score + <https://bit.ly/2LW3Zu9>`_. The default, `distortion_score`, is + implemented in`yellowbrick.cluster.elbow`. .. todo:: add parallelization option for performance - .. todo:: add different metrics for scores and silhoutte - .. todo:: add timing information about how long its taking + .. todo:: add different metrics for scores and silhouette + .. todo:: add timing information about how long it's taking """ def __init__(self, model, ax=None, k=10,
{"golden_diff": "diff --git a/yellowbrick/cluster/elbow.py b/yellowbrick/cluster/elbow.py\n--- a/yellowbrick/cluster/elbow.py\n+++ b/yellowbrick/cluster/elbow.py\n@@ -125,11 +125,11 @@\n \n The elbow method runs k-means clustering on the dataset for a range of\n values for k (say from 1-10) and then for each value of k computes an\n- average score for all clusters. By default, the ``distortion_score`` is\n+ average score for all clusters. By default, the ``distortion`` score is\n computed, the sum of square distances from each point to its assigned\n- center. Other metrics can also be used such as the ``silhouette_score``,\n+ center. Other metrics can also be used such as the ``silhouette`` score,\n the mean silhouette coefficient for all samples or the\n- ``calinski_harabaz_score``, which computes the ratio of dispersion between\n+ ``calinski_harabaz`` score, which computes the ratio of dispersion between\n and within clusters.\n \n When these overall metrics for each model are plotted, it is possible to\n@@ -186,16 +186,21 @@\n \n If you get a visualizer that doesn't have an elbow or inflection point,\n then this method may not be working. The elbow method does not work well\n- if the data is not very clustered; in this case you might see a smooth\n- curve and the value of k is unclear. Other scoring methods such as BIC or\n- SSE also can be used to explore if clustering is a correct choice.\n+ if the data is not very clustered; in this case, you might see a smooth\n+ curve and the value of k is unclear. Other scoring methods, such as BIC or\n+ SSE, also can be used to explore if clustering is a correct choice.\n \n For a discussion on the Elbow method, read more at\n `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_.\n+ \n+ .. seealso:: The scikit-learn documentation for the `silhouette_score\n+ <https://bit.ly/2LYWjYb>`_ and `calinski_harabaz_score\n+ <https://bit.ly/2LW3Zu9>`_. The default, `distortion_score`, is\n+ implemented in`yellowbrick.cluster.elbow`.\n \n .. todo:: add parallelization option for performance\n- .. todo:: add different metrics for scores and silhoutte\n- .. todo:: add timing information about how long its taking\n+ .. todo:: add different metrics for scores and silhouette\n+ .. todo:: add timing information about how long it's taking\n \"\"\"\n \n def __init__(self, model, ax=None, k=10,\n", "issue": "Incorrect parameter values in K-Elbow Visualizer docstring\nInitially in the K-Elbow Visualizer's docstring, the possible values for the parameter `metric` are named as `distortion_score`, `silhouette_score`, and `calinski_harabaz_score`. However, using these values returns the following error:\r\n\r\n`YellowbrickValueError: '{}' is not a defined metric use one of distortion, silhouette, or calinski_harabaz`\r\n\r\nHowever, the correct names&mdash;`distortion`, `silhouette`, `calinski_harabaz`&mdash;are listed corrected further down under `Parameters`.\r\n\r\n![elbow-documentation](https://user-images.githubusercontent.com/19958803/43358572-0f72b404-9262-11e8-8201-7a9ef6cd3fec.png)\r\n\n", "before_files": [{"content": "# yellowbrick.cluster.elbow\n# Implements the elbow method for determining the optimal number of clusters.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Mar 23 22:36:31 2017 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: elbow.py [5a370c8] [email protected] $\n\n\"\"\"\nImplements the elbow method for determining the optimal number of clusters.\nhttps://bl.ocks.org/rpgove/0060ff3b656618e9136b\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport time\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import ClusteringScoreVisualizer\nfrom ..exceptions import YellowbrickValueError\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import calinski_harabaz_score\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import LabelEncoder\n\n\n## Packages for export\n__all__ = [\n \"KElbowVisualizer\", \"distortion_score\"\n]\n\n\n##########################################################################\n## Metrics\n##########################################################################\n\ndef distortion_score(X, labels, metric='euclidean'):\n \"\"\"\n Compute the mean distortion of all samples.\n\n The distortion is computed as the the sum of the squared distances between\n each observation and its closest centroid. Logically, this is the metric\n that K-Means attempts to minimize as it is fitting the model.\n\n .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/\n\n Parameters\n ----------\n X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]\n Array of pairwise distances between samples if metric == \"precomputed\"\n or a feature array for computing distances against the labels.\n\n labels : array, shape = [n_samples]\n Predicted labels for each sample\n\n metric : string\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by `sklearn.metrics.pairwise.pairwise_distances\n <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_\n\n .. todo:: add sample_size and random_state kwds similar to silhouette_score\n \"\"\"\n # Encode labels to get unique centers and groups\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n unique_labels = le.classes_\n\n # Sum of the distortions\n distortion = 0\n\n # Loop through each label (center) to compute the centroid\n for current_label in unique_labels:\n # Mask the instances that belong to the current label\n mask = labels == current_label\n instances = X[mask]\n\n # Compute the center of these instances\n center = instances.mean(axis=0)\n\n # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean\n # returns an array of 1 dimension less than the input. We expect\n # instances to be a 2D array, therefore to do pairwise computation we\n # require center to be a 2D array with a single row (the center).\n # See #370 for more detail.\n if not sp.issparse(instances):\n center = np.array([center])\n\n # Compute the square distances from the instances to the center\n distances = pairwise_distances(instances, center, metric=metric)\n distances = distances ** 2\n\n # Add the mean square distance to the distortion\n distortion += distances.mean()\n\n return distortion\n\n\n##########################################################################\n## Elbow Method\n##########################################################################\n\nKELBOW_SCOREMAP = {\n \"distortion\": distortion_score,\n \"silhouette\": silhouette_score,\n \"calinski_harabaz\": calinski_harabaz_score,\n}\n\n\nclass KElbowVisualizer(ClusteringScoreVisualizer):\n \"\"\"\n The K-Elbow Visualizer implements the \"elbow\" method of selecting the\n optimal number of clusters for K-means clustering. K-means is a simple\n unsupervised machine learning algorithm that groups data into a specified\n number (k) of clusters. Because the user must specify in advance what k to\n choose, the algorithm is somewhat naive -- it assigns all members to k\n clusters even if that is not the right k for the dataset.\n\n The elbow method runs k-means clustering on the dataset for a range of\n values for k (say from 1-10) and then for each value of k computes an\n average score for all clusters. By default, the ``distortion_score`` is\n computed, the sum of square distances from each point to its assigned\n center. Other metrics can also be used such as the ``silhouette_score``,\n the mean silhouette coefficient for all samples or the\n ``calinski_harabaz_score``, which computes the ratio of dispersion between\n and within clusters.\n\n When these overall metrics for each model are plotted, it is possible to\n visually determine the best value for K. If the line chart looks like an\n arm, then the \"elbow\" (the point of inflection on the curve) is the best\n value of k. The \"arm\" can be either up or down, but if there is a strong\n inflection point, it is a good indication that the underlying model fits\n best at that point.\n\n Parameters\n ----------\n\n model : a Scikit-Learn clusterer\n Should be an instance of a clusterer, specifically ``KMeans`` or\n ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised.\n\n ax : matplotlib Axes, default: None\n The axes to plot the figure on. If None is passed in the current axes\n will be used (or generated if required).\n\n k : integer or tuple\n The range of k to compute silhouette scores for. If a single integer\n is specified, then will compute the range (2,k) otherwise the\n specified range in the tuple is used.\n\n metric : string, default: ``\"distortion\"``\n Select the scoring metric to evaluate the clusters. The default is the\n mean distortion, defined by the sum of squared distances between each\n observation and its closest centroid. Other metrics include:\n\n - **distortion**: mean sum of squared distances to centers\n - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance\n - **calinski_harabaz**: ratio of within to between cluster dispersion\n\n timings : bool, default: True\n Display the fitting time per k to evaluate the amount of time required\n to train the clustering model.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Examples\n --------\n\n >>> from yellowbrick.cluster import KElbowVisualizer\n >>> from sklearn.cluster import KMeans\n >>> model = KElbowVisualizer(KMeans(), k=10)\n >>> model.fit(X)\n >>> model.poof()\n\n Notes\n -----\n\n If you get a visualizer that doesn't have an elbow or inflection point,\n then this method may not be working. The elbow method does not work well\n if the data is not very clustered; in this case you might see a smooth\n curve and the value of k is unclear. Other scoring methods such as BIC or\n SSE also can be used to explore if clustering is a correct choice.\n\n For a discussion on the Elbow method, read more at\n `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_.\n\n .. todo:: add parallelization option for performance\n .. todo:: add different metrics for scores and silhoutte\n .. todo:: add timing information about how long its taking\n \"\"\"\n\n def __init__(self, model, ax=None, k=10,\n metric=\"distortion\", timings=True, **kwargs):\n super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Get the scoring method\n if metric not in KELBOW_SCOREMAP:\n raise YellowbrickValueError(\n \"'{}' is not a defined metric \"\n \"use one of distortion, silhouette, or calinski_harabaz\"\n )\n\n # Store the arguments\n self.scoring_metric = KELBOW_SCOREMAP[metric]\n self.timings = timings\n\n # Convert K into a tuple argument if an integer\n if isinstance(k, int):\n k = (2, k+1)\n\n # Expand k in to the values we will use, capturing exceptions\n try:\n k = tuple(k)\n self.k_values_ = list(range(*k))\n except:\n raise YellowbrickValueError((\n \"Specify a range or maximal K value, the value '{}' \"\n \"is not a valid argument for K.\".format(k)\n ))\n\n\n # Holds the values of the silhoutte scores\n self.k_scores_ = None\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Fits n KMeans models where n is the length of ``self.k_values_``,\n storing the silhoutte scores in the ``self.k_scores_`` attribute.\n This method finishes up by calling draw to create the plot.\n \"\"\"\n\n self.k_scores_ = []\n self.k_timers_ = []\n\n for k in self.k_values_:\n # Compute the start time for each model\n start = time.time()\n\n # Set the k value and fit the model\n self.estimator.set_params(n_clusters=k)\n self.estimator.fit(X)\n\n # Append the time and score to our plottable metrics\n self.k_timers_.append(time.time() - start)\n self.k_scores_.append(\n self.scoring_metric(X, self.estimator.labels_)\n )\n\n self.draw()\n\n return self\n\n def draw(self):\n \"\"\"\n Draw the elbow curve for the specified scores and values of K.\n \"\"\"\n # Plot the silhouette score against k\n self.ax.plot(self.k_values_, self.k_scores_, marker=\"D\", label=\"score\")\n\n # If we're going to plot the timings, create a twinx axis\n if self.timings:\n self.axes = [self.ax, self.ax.twinx()]\n self.axes[1].plot(\n self.k_values_, self.k_timers_, label=\"fit time\",\n c='g', marker=\"o\", linestyle=\"--\", alpha=0.75,\n )\n\n return self.ax\n\n def finalize(self):\n \"\"\"\n Prepare the figure for rendering by setting the title as well as the\n X and Y axis labels and adding the legend.\n \"\"\"\n # Get the metric name\n metric = self.scoring_metric.__name__.replace(\"_\", \" \").title()\n\n # Set the title\n self.set_title(\n '{} Elbow for {} Clustering'.format(metric, self.name)\n )\n\n # Set the x and y labels\n self.ax.set_xlabel('k')\n self.ax.set_ylabel(metric.lower())\n\n # Set the second y axis labels\n if self.timings:\n self.axes[1].grid(False)\n self.axes[1].set_ylabel(\"fit time (seconds)\", color='g')\n self.axes[1].tick_params('y', colors='g')\n", "path": "yellowbrick/cluster/elbow.py"}], "after_files": [{"content": "# yellowbrick.cluster.elbow\n# Implements the elbow method for determining the optimal number of clusters.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Mar 23 22:36:31 2017 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: elbow.py [5a370c8] [email protected] $\n\n\"\"\"\nImplements the elbow method for determining the optimal number of clusters.\nhttps://bl.ocks.org/rpgove/0060ff3b656618e9136b\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport time\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import ClusteringScoreVisualizer\nfrom ..exceptions import YellowbrickValueError\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import calinski_harabaz_score\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import LabelEncoder\n\n\n## Packages for export\n__all__ = [\n \"KElbowVisualizer\", \"distortion_score\"\n]\n\n\n##########################################################################\n## Metrics\n##########################################################################\n\ndef distortion_score(X, labels, metric='euclidean'):\n \"\"\"\n Compute the mean distortion of all samples.\n\n The distortion is computed as the the sum of the squared distances between\n each observation and its closest centroid. Logically, this is the metric\n that K-Means attempts to minimize as it is fitting the model.\n\n .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/\n\n Parameters\n ----------\n X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]\n Array of pairwise distances between samples if metric == \"precomputed\"\n or a feature array for computing distances against the labels.\n\n labels : array, shape = [n_samples]\n Predicted labels for each sample\n\n metric : string\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by `sklearn.metrics.pairwise.pairwise_distances\n <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_\n\n .. todo:: add sample_size and random_state kwds similar to silhouette_score\n \"\"\"\n # Encode labels to get unique centers and groups\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n unique_labels = le.classes_\n\n # Sum of the distortions\n distortion = 0\n\n # Loop through each label (center) to compute the centroid\n for current_label in unique_labels:\n # Mask the instances that belong to the current label\n mask = labels == current_label\n instances = X[mask]\n\n # Compute the center of these instances\n center = instances.mean(axis=0)\n\n # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean\n # returns an array of 1 dimension less than the input. We expect\n # instances to be a 2D array, therefore to do pairwise computation we\n # require center to be a 2D array with a single row (the center).\n # See #370 for more detail.\n if not sp.issparse(instances):\n center = np.array([center])\n\n # Compute the square distances from the instances to the center\n distances = pairwise_distances(instances, center, metric=metric)\n distances = distances ** 2\n\n # Add the mean square distance to the distortion\n distortion += distances.mean()\n\n return distortion\n\n\n##########################################################################\n## Elbow Method\n##########################################################################\n\nKELBOW_SCOREMAP = {\n \"distortion\": distortion_score,\n \"silhouette\": silhouette_score,\n \"calinski_harabaz\": calinski_harabaz_score,\n}\n\n\nclass KElbowVisualizer(ClusteringScoreVisualizer):\n \"\"\"\n The K-Elbow Visualizer implements the \"elbow\" method of selecting the\n optimal number of clusters for K-means clustering. K-means is a simple\n unsupervised machine learning algorithm that groups data into a specified\n number (k) of clusters. Because the user must specify in advance what k to\n choose, the algorithm is somewhat naive -- it assigns all members to k\n clusters even if that is not the right k for the dataset.\n\n The elbow method runs k-means clustering on the dataset for a range of\n values for k (say from 1-10) and then for each value of k computes an\n average score for all clusters. By default, the ``distortion`` score is\n computed, the sum of square distances from each point to its assigned\n center. Other metrics can also be used such as the ``silhouette`` score,\n the mean silhouette coefficient for all samples or the\n ``calinski_harabaz`` score, which computes the ratio of dispersion between\n and within clusters.\n\n When these overall metrics for each model are plotted, it is possible to\n visually determine the best value for K. If the line chart looks like an\n arm, then the \"elbow\" (the point of inflection on the curve) is the best\n value of k. The \"arm\" can be either up or down, but if there is a strong\n inflection point, it is a good indication that the underlying model fits\n best at that point.\n\n Parameters\n ----------\n\n model : a Scikit-Learn clusterer\n Should be an instance of a clusterer, specifically ``KMeans`` or\n ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised.\n\n ax : matplotlib Axes, default: None\n The axes to plot the figure on. If None is passed in the current axes\n will be used (or generated if required).\n\n k : integer or tuple\n The range of k to compute silhouette scores for. If a single integer\n is specified, then will compute the range (2,k) otherwise the\n specified range in the tuple is used.\n\n metric : string, default: ``\"distortion\"``\n Select the scoring metric to evaluate the clusters. The default is the\n mean distortion, defined by the sum of squared distances between each\n observation and its closest centroid. Other metrics include:\n\n - **distortion**: mean sum of squared distances to centers\n - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance\n - **calinski_harabaz**: ratio of within to between cluster dispersion\n\n timings : bool, default: True\n Display the fitting time per k to evaluate the amount of time required\n to train the clustering model.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Examples\n --------\n\n >>> from yellowbrick.cluster import KElbowVisualizer\n >>> from sklearn.cluster import KMeans\n >>> model = KElbowVisualizer(KMeans(), k=10)\n >>> model.fit(X)\n >>> model.poof()\n\n Notes\n -----\n\n If you get a visualizer that doesn't have an elbow or inflection point,\n then this method may not be working. The elbow method does not work well\n if the data is not very clustered; in this case, you might see a smooth\n curve and the value of k is unclear. Other scoring methods, such as BIC or\n SSE, also can be used to explore if clustering is a correct choice.\n\n For a discussion on the Elbow method, read more at\n `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_.\n \n .. seealso:: The scikit-learn documentation for the `silhouette_score\n <https://bit.ly/2LYWjYb>`_ and `calinski_harabaz_score\n <https://bit.ly/2LW3Zu9>`_. The default, `distortion_score`, is\n implemented in`yellowbrick.cluster.elbow`.\n\n .. todo:: add parallelization option for performance\n .. todo:: add different metrics for scores and silhouette\n .. todo:: add timing information about how long it's taking\n \"\"\"\n\n def __init__(self, model, ax=None, k=10,\n metric=\"distortion\", timings=True, **kwargs):\n super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Get the scoring method\n if metric not in KELBOW_SCOREMAP:\n raise YellowbrickValueError(\n \"'{}' is not a defined metric \"\n \"use one of distortion, silhouette, or calinski_harabaz\"\n )\n\n # Store the arguments\n self.scoring_metric = KELBOW_SCOREMAP[metric]\n self.timings = timings\n\n # Convert K into a tuple argument if an integer\n if isinstance(k, int):\n k = (2, k+1)\n\n # Expand k in to the values we will use, capturing exceptions\n try:\n k = tuple(k)\n self.k_values_ = list(range(*k))\n except:\n raise YellowbrickValueError((\n \"Specify a range or maximal K value, the value '{}' \"\n \"is not a valid argument for K.\".format(k)\n ))\n\n\n # Holds the values of the silhoutte scores\n self.k_scores_ = None\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Fits n KMeans models where n is the length of ``self.k_values_``,\n storing the silhoutte scores in the ``self.k_scores_`` attribute.\n This method finishes up by calling draw to create the plot.\n \"\"\"\n\n self.k_scores_ = []\n self.k_timers_ = []\n\n for k in self.k_values_:\n # Compute the start time for each model\n start = time.time()\n\n # Set the k value and fit the model\n self.estimator.set_params(n_clusters=k)\n self.estimator.fit(X)\n\n # Append the time and score to our plottable metrics\n self.k_timers_.append(time.time() - start)\n self.k_scores_.append(\n self.scoring_metric(X, self.estimator.labels_)\n )\n\n self.draw()\n\n return self\n\n def draw(self):\n \"\"\"\n Draw the elbow curve for the specified scores and values of K.\n \"\"\"\n # Plot the silhouette score against k\n self.ax.plot(self.k_values_, self.k_scores_, marker=\"D\", label=\"score\")\n\n # If we're going to plot the timings, create a twinx axis\n if self.timings:\n self.axes = [self.ax, self.ax.twinx()]\n self.axes[1].plot(\n self.k_values_, self.k_timers_, label=\"fit time\",\n c='g', marker=\"o\", linestyle=\"--\", alpha=0.75,\n )\n\n return self.ax\n\n def finalize(self):\n \"\"\"\n Prepare the figure for rendering by setting the title as well as the\n X and Y axis labels and adding the legend.\n \"\"\"\n # Get the metric name\n metric = self.scoring_metric.__name__.replace(\"_\", \" \").title()\n\n # Set the title\n self.set_title(\n '{} Elbow for {} Clustering'.format(metric, self.name)\n )\n\n # Set the x and y labels\n self.ax.set_xlabel('k')\n self.ax.set_ylabel(metric.lower())\n\n # Set the second y axis labels\n if self.timings:\n self.axes[1].grid(False)\n self.axes[1].set_ylabel(\"fit time (seconds)\", color='g')\n self.axes[1].tick_params('y', colors='g')\n", "path": "yellowbrick/cluster/elbow.py"}]}
3,816
660
gh_patches_debug_61516
rasdani/github-patches
git_diff
open-mmlab__mmpose-267
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pylint: R1719 ```bash mmpose/models/backbones/shufflenet_v1.py:238:26: R1719: The if expression can be replaced with 'test' (simplifiable-if-expression) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmpose/models/backbones/shufflenet_v1.py` Content: ``` 1 import logging 2 3 import torch 4 import torch.nn as nn 5 import torch.utils.checkpoint as cp 6 from mmcv.cnn import (ConvModule, build_activation_layer, constant_init, 7 normal_init) 8 from torch.nn.modules.batchnorm import _BatchNorm 9 10 from ..registry import BACKBONES 11 from .base_backbone import BaseBackbone 12 from .utils import channel_shuffle, load_checkpoint, make_divisible 13 14 15 class ShuffleUnit(nn.Module): 16 """ShuffleUnit block. 17 18 ShuffleNet unit with pointwise group convolution (GConv) and channel 19 shuffle. 20 21 Args: 22 in_channels (int): The input channels of the ShuffleUnit. 23 out_channels (int): The output channels of the ShuffleUnit. 24 groups (int, optional): The number of groups to be used in grouped 1x1 25 convolutions in each ShuffleUnit. Default: 3 26 first_block (bool, optional): Whether it is the first ShuffleUnit of a 27 sequential ShuffleUnits. Default: False, which means not using the 28 grouped 1x1 convolution. 29 combine (str, optional): The ways to combine the input and output 30 branches. Default: 'add'. 31 conv_cfg (dict): Config dict for convolution layer. Default: None, 32 which means using conv2d. 33 norm_cfg (dict): Config dict for normalization layer. 34 Default: dict(type='BN'). 35 act_cfg (dict): Config dict for activation layer. 36 Default: dict(type='ReLU'). 37 with_cp (bool, optional): Use checkpoint or not. Using checkpoint 38 will save some memory while slowing down the training speed. 39 Default: False. 40 41 Returns: 42 Tensor: The output tensor. 43 """ 44 45 def __init__(self, 46 in_channels, 47 out_channels, 48 groups=3, 49 first_block=True, 50 combine='add', 51 conv_cfg=None, 52 norm_cfg=dict(type='BN'), 53 act_cfg=dict(type='ReLU'), 54 with_cp=False): 55 super().__init__() 56 self.in_channels = in_channels 57 self.out_channels = out_channels 58 self.first_block = first_block 59 self.combine = combine 60 self.groups = groups 61 self.bottleneck_channels = self.out_channels // 4 62 self.with_cp = with_cp 63 64 if self.combine == 'add': 65 self.depthwise_stride = 1 66 self._combine_func = self._add 67 assert in_channels == out_channels, ( 68 'in_channels must be equal to out_channels when combine ' 69 'is add') 70 elif self.combine == 'concat': 71 self.depthwise_stride = 2 72 self._combine_func = self._concat 73 self.out_channels -= self.in_channels 74 self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) 75 else: 76 raise ValueError(f'Cannot combine tensors with {self.combine}. ' 77 'Only "add" and "concat" are supported') 78 79 self.first_1x1_groups = 1 if first_block else self.groups 80 self.g_conv_1x1_compress = ConvModule( 81 in_channels=self.in_channels, 82 out_channels=self.bottleneck_channels, 83 kernel_size=1, 84 groups=self.first_1x1_groups, 85 conv_cfg=conv_cfg, 86 norm_cfg=norm_cfg, 87 act_cfg=act_cfg) 88 89 self.depthwise_conv3x3_bn = ConvModule( 90 in_channels=self.bottleneck_channels, 91 out_channels=self.bottleneck_channels, 92 kernel_size=3, 93 stride=self.depthwise_stride, 94 padding=1, 95 groups=self.bottleneck_channels, 96 conv_cfg=conv_cfg, 97 norm_cfg=norm_cfg, 98 act_cfg=None) 99 100 self.g_conv_1x1_expand = ConvModule( 101 in_channels=self.bottleneck_channels, 102 out_channels=self.out_channels, 103 kernel_size=1, 104 groups=self.groups, 105 conv_cfg=conv_cfg, 106 norm_cfg=norm_cfg, 107 act_cfg=None) 108 109 self.act = build_activation_layer(act_cfg) 110 111 @staticmethod 112 def _add(x, out): 113 # residual connection 114 return x + out 115 116 @staticmethod 117 def _concat(x, out): 118 # concatenate along channel axis 119 return torch.cat((x, out), 1) 120 121 def forward(self, x): 122 123 def _inner_forward(x): 124 residual = x 125 126 out = self.g_conv_1x1_compress(x) 127 out = self.depthwise_conv3x3_bn(out) 128 129 if self.groups > 1: 130 out = channel_shuffle(out, self.groups) 131 132 out = self.g_conv_1x1_expand(out) 133 134 if self.combine == 'concat': 135 residual = self.avgpool(residual) 136 out = self.act(out) 137 out = self._combine_func(residual, out) 138 else: 139 out = self._combine_func(residual, out) 140 out = self.act(out) 141 return out 142 143 if self.with_cp and x.requires_grad: 144 out = cp.checkpoint(_inner_forward, x) 145 else: 146 out = _inner_forward(x) 147 148 return out 149 150 151 @BACKBONES.register_module() 152 class ShuffleNetV1(BaseBackbone): 153 """ShuffleNetV1 backbone. 154 155 Args: 156 groups (int, optional): The number of groups to be used in grouped 1x1 157 convolutions in each ShuffleUnit. Default: 3. 158 widen_factor (float, optional): Width multiplier - adjusts the number 159 of channels in each layer by this amount. Default: 1.0. 160 out_indices (Sequence[int]): Output from which stages. 161 Default: (2, ) 162 frozen_stages (int): Stages to be frozen (all param fixed). 163 Default: -1, which means not freezing any parameters. 164 conv_cfg (dict): Config dict for convolution layer. Default: None, 165 which means using conv2d. 166 norm_cfg (dict): Config dict for normalization layer. 167 Default: dict(type='BN'). 168 act_cfg (dict): Config dict for activation layer. 169 Default: dict(type='ReLU'). 170 norm_eval (bool): Whether to set norm layers to eval mode, namely, 171 freeze running stats (mean and var). Note: Effect on Batch Norm 172 and its variants only. Default: False. 173 with_cp (bool): Use checkpoint or not. Using checkpoint will save some 174 memory while slowing down the training speed. Default: False. 175 """ 176 177 def __init__(self, 178 groups=3, 179 widen_factor=1.0, 180 out_indices=(2, ), 181 frozen_stages=-1, 182 conv_cfg=None, 183 norm_cfg=dict(type='BN'), 184 act_cfg=dict(type='ReLU'), 185 norm_eval=False, 186 with_cp=False): 187 super().__init__() 188 self.stage_blocks = [4, 8, 4] 189 self.groups = groups 190 191 for index in out_indices: 192 if index not in range(0, 3): 193 raise ValueError('the item in out_indices must in ' 194 f'range(0, 3). But received {index}') 195 196 if frozen_stages not in range(-1, 3): 197 raise ValueError('frozen_stages must be in range(-1, 3). ' 198 f'But received {frozen_stages}') 199 self.out_indices = out_indices 200 self.frozen_stages = frozen_stages 201 self.conv_cfg = conv_cfg 202 self.norm_cfg = norm_cfg 203 self.act_cfg = act_cfg 204 self.norm_eval = norm_eval 205 self.with_cp = with_cp 206 207 if groups == 1: 208 channels = (144, 288, 576) 209 elif groups == 2: 210 channels = (200, 400, 800) 211 elif groups == 3: 212 channels = (240, 480, 960) 213 elif groups == 4: 214 channels = (272, 544, 1088) 215 elif groups == 8: 216 channels = (384, 768, 1536) 217 else: 218 raise ValueError(f'{groups} groups is not supported for 1x1 ' 219 'Grouped Convolutions') 220 221 channels = [make_divisible(ch * widen_factor, 8) for ch in channels] 222 223 self.in_channels = int(24 * widen_factor) 224 225 self.conv1 = ConvModule( 226 in_channels=3, 227 out_channels=self.in_channels, 228 kernel_size=3, 229 stride=2, 230 padding=1, 231 conv_cfg=conv_cfg, 232 norm_cfg=norm_cfg, 233 act_cfg=act_cfg) 234 self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 235 236 self.layers = nn.ModuleList() 237 for i, num_blocks in enumerate(self.stage_blocks): 238 first_block = True if i == 0 else False 239 layer = self.make_layer(channels[i], num_blocks, first_block) 240 self.layers.append(layer) 241 242 def _freeze_stages(self): 243 if self.frozen_stages >= 0: 244 for param in self.conv1.parameters(): 245 param.requires_grad = False 246 for i in range(self.frozen_stages): 247 layer = self.layers[i] 248 layer.eval() 249 for param in layer.parameters(): 250 param.requires_grad = False 251 252 def init_weights(self, pretrained=None): 253 if isinstance(pretrained, str): 254 logger = logging.getLogger() 255 load_checkpoint(self, pretrained, strict=False, logger=logger) 256 elif pretrained is None: 257 for name, m in self.named_modules(): 258 if isinstance(m, nn.Conv2d): 259 if 'conv1' in name: 260 normal_init(m, mean=0, std=0.01) 261 else: 262 normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) 263 elif isinstance(m, (_BatchNorm, nn.GroupNorm)): 264 constant_init(m.weight, val=1, bias=0.0001) 265 if isinstance(m, _BatchNorm): 266 if m.running_mean is not None: 267 nn.init.constant_(m.running_mean, 0) 268 else: 269 raise TypeError('pretrained must be a str or None. But received ' 270 f'{type(pretrained)}') 271 272 def make_layer(self, out_channels, num_blocks, first_block=False): 273 """Stack ShuffleUnit blocks to make a layer. 274 275 Args: 276 out_channels (int): out_channels of the block. 277 num_blocks (int): Number of blocks. 278 first_block (bool, optional): Whether is the first ShuffleUnit of a 279 sequential ShuffleUnits. Default: False, which means not using 280 the grouped 1x1 convolution. 281 """ 282 layers = [] 283 for i in range(num_blocks): 284 first_block = first_block if i == 0 else False 285 combine_mode = 'concat' if i == 0 else 'add' 286 layers.append( 287 ShuffleUnit( 288 self.in_channels, 289 out_channels, 290 groups=self.groups, 291 first_block=first_block, 292 combine=combine_mode, 293 conv_cfg=self.conv_cfg, 294 norm_cfg=self.norm_cfg, 295 act_cfg=self.act_cfg, 296 with_cp=self.with_cp)) 297 self.in_channels = out_channels 298 299 return nn.Sequential(*layers) 300 301 def forward(self, x): 302 x = self.conv1(x) 303 x = self.maxpool(x) 304 305 outs = [] 306 for i, layer in enumerate(self.layers): 307 x = layer(x) 308 if i in self.out_indices: 309 outs.append(x) 310 311 if len(outs) == 1: 312 return outs[0] 313 else: 314 return tuple(outs) 315 316 def train(self, mode=True): 317 super().train(mode) 318 self._freeze_stages() 319 if mode and self.norm_eval: 320 for m in self.modules(): 321 if isinstance(m, _BatchNorm): 322 m.eval() 323 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py --- a/mmpose/models/backbones/shufflenet_v1.py +++ b/mmpose/models/backbones/shufflenet_v1.py @@ -235,7 +235,7 @@ self.layers = nn.ModuleList() for i, num_blocks in enumerate(self.stage_blocks): - first_block = True if i == 0 else False + first_block = (i == 0) layer = self.make_layer(channels[i], num_blocks, first_block) self.layers.append(layer)
{"golden_diff": "diff --git a/mmpose/models/backbones/shufflenet_v1.py b/mmpose/models/backbones/shufflenet_v1.py\n--- a/mmpose/models/backbones/shufflenet_v1.py\n+++ b/mmpose/models/backbones/shufflenet_v1.py\n@@ -235,7 +235,7 @@\n \n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n- first_block = True if i == 0 else False\n+ first_block = (i == 0)\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n", "issue": "Pylint: R1719\n```bash\r\nmmpose/models/backbones/shufflenet_v1.py:238:26: R1719: The if expression can be replaced with 'test' (simplifiable-if-expression)\r\n```\n", "before_files": [{"content": "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (ConvModule, build_activation_layer, constant_init,\n normal_init)\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..registry import BACKBONES\nfrom .base_backbone import BaseBackbone\nfrom .utils import channel_shuffle, load_checkpoint, make_divisible\n\n\nclass ShuffleUnit(nn.Module):\n \"\"\"ShuffleUnit block.\n\n ShuffleNet unit with pointwise group convolution (GConv) and channel\n shuffle.\n\n Args:\n in_channels (int): The input channels of the ShuffleUnit.\n out_channels (int): The output channels of the ShuffleUnit.\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3\n first_block (bool, optional): Whether it is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using the\n grouped 1x1 convolution.\n combine (str, optional): The ways to combine the input and output\n branches. Default: 'add'.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n will save some memory while slowing down the training speed.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n groups=3,\n first_block=True,\n combine='add',\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.first_block = first_block\n self.combine = combine\n self.groups = groups\n self.bottleneck_channels = self.out_channels // 4\n self.with_cp = with_cp\n\n if self.combine == 'add':\n self.depthwise_stride = 1\n self._combine_func = self._add\n assert in_channels == out_channels, (\n 'in_channels must be equal to out_channels when combine '\n 'is add')\n elif self.combine == 'concat':\n self.depthwise_stride = 2\n self._combine_func = self._concat\n self.out_channels -= self.in_channels\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n else:\n raise ValueError(f'Cannot combine tensors with {self.combine}. '\n 'Only \"add\" and \"concat\" are supported')\n\n self.first_1x1_groups = 1 if first_block else self.groups\n self.g_conv_1x1_compress = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=1,\n groups=self.first_1x1_groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n self.depthwise_conv3x3_bn = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=3,\n stride=self.depthwise_stride,\n padding=1,\n groups=self.bottleneck_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.g_conv_1x1_expand = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.out_channels,\n kernel_size=1,\n groups=self.groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.act = build_activation_layer(act_cfg)\n\n @staticmethod\n def _add(x, out):\n # residual connection\n return x + out\n\n @staticmethod\n def _concat(x, out):\n # concatenate along channel axis\n return torch.cat((x, out), 1)\n\n def forward(self, x):\n\n def _inner_forward(x):\n residual = x\n\n out = self.g_conv_1x1_compress(x)\n out = self.depthwise_conv3x3_bn(out)\n\n if self.groups > 1:\n out = channel_shuffle(out, self.groups)\n\n out = self.g_conv_1x1_expand(out)\n\n if self.combine == 'concat':\n residual = self.avgpool(residual)\n out = self.act(out)\n out = self._combine_func(residual, out)\n else:\n out = self._combine_func(residual, out)\n out = self.act(out)\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\[email protected]_module()\nclass ShuffleNetV1(BaseBackbone):\n \"\"\"ShuffleNetV1 backbone.\n\n Args:\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3.\n widen_factor (float, optional): Width multiplier - adjusts the number\n of channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int]): Output from which stages.\n Default: (2, )\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n groups=3,\n widen_factor=1.0,\n out_indices=(2, ),\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n with_cp=False):\n super().__init__()\n self.stage_blocks = [4, 8, 4]\n self.groups = groups\n\n for index in out_indices:\n if index not in range(0, 3):\n raise ValueError('the item in out_indices must in '\n f'range(0, 3). But received {index}')\n\n if frozen_stages not in range(-1, 3):\n raise ValueError('frozen_stages must be in range(-1, 3). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n if groups == 1:\n channels = (144, 288, 576)\n elif groups == 2:\n channels = (200, 400, 800)\n elif groups == 3:\n channels = (240, 480, 960)\n elif groups == 4:\n channels = (272, 544, 1088)\n elif groups == 8:\n channels = (384, 768, 1536)\n else:\n raise ValueError(f'{groups} groups is not supported for 1x1 '\n 'Grouped Convolutions')\n\n channels = [make_divisible(ch * widen_factor, 8) for ch in channels]\n\n self.in_channels = int(24 * widen_factor)\n\n self.conv1 = ConvModule(\n in_channels=3,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n first_block = True if i == 0 else False\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(self.frozen_stages):\n layer = self.layers[i]\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if 'conv1' in name:\n normal_init(m, mean=0, std=0.01)\n else:\n normal_init(m, mean=0, std=1.0 / m.weight.shape[1])\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m.weight, val=1, bias=0.0001)\n if isinstance(m, _BatchNorm):\n if m.running_mean is not None:\n nn.init.constant_(m.running_mean, 0)\n else:\n raise TypeError('pretrained must be a str or None. But received '\n f'{type(pretrained)}')\n\n def make_layer(self, out_channels, num_blocks, first_block=False):\n \"\"\"Stack ShuffleUnit blocks to make a layer.\n\n Args:\n out_channels (int): out_channels of the block.\n num_blocks (int): Number of blocks.\n first_block (bool, optional): Whether is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using\n the grouped 1x1 convolution.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n first_block = first_block if i == 0 else False\n combine_mode = 'concat' if i == 0 else 'add'\n layers.append(\n ShuffleUnit(\n self.in_channels,\n out_channels,\n groups=self.groups,\n first_block=first_block,\n combine=combine_mode,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n", "path": "mmpose/models/backbones/shufflenet_v1.py"}], "after_files": [{"content": "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (ConvModule, build_activation_layer, constant_init,\n normal_init)\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..registry import BACKBONES\nfrom .base_backbone import BaseBackbone\nfrom .utils import channel_shuffle, load_checkpoint, make_divisible\n\n\nclass ShuffleUnit(nn.Module):\n \"\"\"ShuffleUnit block.\n\n ShuffleNet unit with pointwise group convolution (GConv) and channel\n shuffle.\n\n Args:\n in_channels (int): The input channels of the ShuffleUnit.\n out_channels (int): The output channels of the ShuffleUnit.\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3\n first_block (bool, optional): Whether it is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using the\n grouped 1x1 convolution.\n combine (str, optional): The ways to combine the input and output\n branches. Default: 'add'.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n will save some memory while slowing down the training speed.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n groups=3,\n first_block=True,\n combine='add',\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.first_block = first_block\n self.combine = combine\n self.groups = groups\n self.bottleneck_channels = self.out_channels // 4\n self.with_cp = with_cp\n\n if self.combine == 'add':\n self.depthwise_stride = 1\n self._combine_func = self._add\n assert in_channels == out_channels, (\n 'in_channels must be equal to out_channels when combine '\n 'is add')\n elif self.combine == 'concat':\n self.depthwise_stride = 2\n self._combine_func = self._concat\n self.out_channels -= self.in_channels\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n else:\n raise ValueError(f'Cannot combine tensors with {self.combine}. '\n 'Only \"add\" and \"concat\" are supported')\n\n self.first_1x1_groups = 1 if first_block else self.groups\n self.g_conv_1x1_compress = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=1,\n groups=self.first_1x1_groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n\n self.depthwise_conv3x3_bn = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.bottleneck_channels,\n kernel_size=3,\n stride=self.depthwise_stride,\n padding=1,\n groups=self.bottleneck_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.g_conv_1x1_expand = ConvModule(\n in_channels=self.bottleneck_channels,\n out_channels=self.out_channels,\n kernel_size=1,\n groups=self.groups,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.act = build_activation_layer(act_cfg)\n\n @staticmethod\n def _add(x, out):\n # residual connection\n return x + out\n\n @staticmethod\n def _concat(x, out):\n # concatenate along channel axis\n return torch.cat((x, out), 1)\n\n def forward(self, x):\n\n def _inner_forward(x):\n residual = x\n\n out = self.g_conv_1x1_compress(x)\n out = self.depthwise_conv3x3_bn(out)\n\n if self.groups > 1:\n out = channel_shuffle(out, self.groups)\n\n out = self.g_conv_1x1_expand(out)\n\n if self.combine == 'concat':\n residual = self.avgpool(residual)\n out = self.act(out)\n out = self._combine_func(residual, out)\n else:\n out = self._combine_func(residual, out)\n out = self.act(out)\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\[email protected]_module()\nclass ShuffleNetV1(BaseBackbone):\n \"\"\"ShuffleNetV1 backbone.\n\n Args:\n groups (int, optional): The number of groups to be used in grouped 1x1\n convolutions in each ShuffleUnit. Default: 3.\n widen_factor (float, optional): Width multiplier - adjusts the number\n of channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int]): Output from which stages.\n Default: (2, )\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n groups=3,\n widen_factor=1.0,\n out_indices=(2, ),\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n with_cp=False):\n super().__init__()\n self.stage_blocks = [4, 8, 4]\n self.groups = groups\n\n for index in out_indices:\n if index not in range(0, 3):\n raise ValueError('the item in out_indices must in '\n f'range(0, 3). But received {index}')\n\n if frozen_stages not in range(-1, 3):\n raise ValueError('frozen_stages must be in range(-1, 3). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n if groups == 1:\n channels = (144, 288, 576)\n elif groups == 2:\n channels = (200, 400, 800)\n elif groups == 3:\n channels = (240, 480, 960)\n elif groups == 4:\n channels = (272, 544, 1088)\n elif groups == 8:\n channels = (384, 768, 1536)\n else:\n raise ValueError(f'{groups} groups is not supported for 1x1 '\n 'Grouped Convolutions')\n\n channels = [make_divisible(ch * widen_factor, 8) for ch in channels]\n\n self.in_channels = int(24 * widen_factor)\n\n self.conv1 = ConvModule(\n in_channels=3,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layers = nn.ModuleList()\n for i, num_blocks in enumerate(self.stage_blocks):\n first_block = (i == 0)\n layer = self.make_layer(channels[i], num_blocks, first_block)\n self.layers.append(layer)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(self.frozen_stages):\n layer = self.layers[i]\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if 'conv1' in name:\n normal_init(m, mean=0, std=0.01)\n else:\n normal_init(m, mean=0, std=1.0 / m.weight.shape[1])\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m.weight, val=1, bias=0.0001)\n if isinstance(m, _BatchNorm):\n if m.running_mean is not None:\n nn.init.constant_(m.running_mean, 0)\n else:\n raise TypeError('pretrained must be a str or None. But received '\n f'{type(pretrained)}')\n\n def make_layer(self, out_channels, num_blocks, first_block=False):\n \"\"\"Stack ShuffleUnit blocks to make a layer.\n\n Args:\n out_channels (int): out_channels of the block.\n num_blocks (int): Number of blocks.\n first_block (bool, optional): Whether is the first ShuffleUnit of a\n sequential ShuffleUnits. Default: False, which means not using\n the grouped 1x1 convolution.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n first_block = first_block if i == 0 else False\n combine_mode = 'concat' if i == 0 else 'add'\n layers.append(\n ShuffleUnit(\n self.in_channels,\n out_channels,\n groups=self.groups,\n first_block=first_block,\n combine=combine_mode,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n", "path": "mmpose/models/backbones/shufflenet_v1.py"}]}
3,767
146
gh_patches_debug_19361
rasdani/github-patches
git_diff
docker__docker-py-1248
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- create a sans-io docker client class and impliment Client with blocking requests use https://github.com/mikeal/deferred to create a sans-io version of Client (SansIOClient) that requires something like: ``` python class SimpleStream(object): def next(self) -> Deferred: ... class IOAdapter(object): def request(self, **kwargs) -> Deferred: ... def stream(self, **kwargs) -> SimpleStream: ... def unwrap_deferred(self, deferred: Deferred) -> Any: ... ``` and then implement it with something like: ``` python class BlockingSimpleStream(SimpleStream): def __init__(self, stream): self.generator = _stream_helper(stream): def next(self) -> Deferred: return deferred.succeeded(next(self.generator)) class BlockingIOAdapter(IOAdapter): def __init__(session: requests.Session): self.session = session def request(self, **kwargs) -> Deferred: return deferred.execute(self.session.request, **kwargs) def stream(self, **kwargs) -> BlockingSimpleStream: return BlockingSimpleStream(self.session.request(**kwargs, stream=True)) def unwrap_deferred(self, d: Deferred): return deferred.waitForDeferred(d).getResult() ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 from setuptools import setup 6 7 8 ROOT_DIR = os.path.dirname(__file__) 9 SOURCE_DIR = os.path.join(ROOT_DIR) 10 11 requirements = [ 12 'requests >= 2.5.2, < 2.11', 13 'six >= 1.4.0', 14 'websocket-client >= 0.32.0', 15 'docker-pycreds >= 0.2.1' 16 ] 17 18 if sys.platform == 'win32': 19 requirements.append('pypiwin32 >= 219') 20 21 extras_require = { 22 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5', 23 # While not imported explicitly, the ipaddress module is required for 24 # ssl_match_hostname to verify hosts match with certificates via 25 # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname 26 ':python_version < "3.3"': 'ipaddress >= 1.0.16', 27 } 28 29 version = None 30 exec(open('docker/version.py').read()) 31 32 with open('./test-requirements.txt') as test_reqs_txt: 33 test_requirements = [line for line in test_reqs_txt] 34 35 36 setup( 37 name="docker-py", 38 version=version, 39 description="Python client for Docker.", 40 url='https://github.com/docker/docker-py/', 41 packages=[ 42 'docker', 'docker.api', 'docker.auth', 'docker.transport', 43 'docker.utils', 'docker.utils.ports', 'docker.ssladapter', 44 'docker.types', 45 ], 46 install_requires=requirements, 47 tests_require=test_requirements, 48 extras_require=extras_require, 49 zip_safe=False, 50 test_suite='tests', 51 classifiers=[ 52 'Development Status :: 4 - Beta', 53 'Environment :: Other Environment', 54 'Intended Audience :: Developers', 55 'Operating System :: OS Independent', 56 'Programming Language :: Python', 57 'Programming Language :: Python :: 2', 58 'Programming Language :: Python :: 2.6', 59 'Programming Language :: Python :: 2.7', 60 'Programming Language :: Python :: 3', 61 'Programming Language :: Python :: 3.3', 62 'Programming Language :: Python :: 3.4', 63 'Programming Language :: Python :: 3.5', 64 'Topic :: Utilities', 65 'License :: OSI Approved :: Apache Software License', 66 ], 67 ) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,10 +33,20 @@ test_requirements = [line for line in test_reqs_txt] +long_description = '' +try: + with open('./README.rst') as readme_rst: + long_description = readme_rst.read() +except IOError: + # README.rst is only generated on release. Its absence should not prevent + # setup.py from working properly. + pass + setup( name="docker-py", version=version, description="Python client for Docker.", + long_description=long_description, url='https://github.com/docker/docker-py/', packages=[ 'docker', 'docker.api', 'docker.auth', 'docker.transport', @@ -64,4 +74,6 @@ 'Topic :: Utilities', 'License :: OSI Approved :: Apache Software License', ], + maintainer='Joffrey F', + maintainer_email='[email protected]', )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,10 +33,20 @@\n test_requirements = [line for line in test_reqs_txt]\n \n \n+long_description = ''\n+try:\n+ with open('./README.rst') as readme_rst:\n+ long_description = readme_rst.read()\n+except IOError:\n+ # README.rst is only generated on release. Its absence should not prevent\n+ # setup.py from working properly.\n+ pass\n+\n setup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n+ long_description=long_description,\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n@@ -64,4 +74,6 @@\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n+ maintainer='Joffrey F',\n+ maintainer_email='[email protected]',\n )\n", "issue": "create a sans-io docker client class and impliment Client with blocking requests\nuse https://github.com/mikeal/deferred to create a sans-io version of Client (SansIOClient) that requires something like:\n\n``` python\nclass SimpleStream(object):\n def next(self) -> Deferred:\n ...\n\nclass IOAdapter(object):\n def request(self, **kwargs) -> Deferred:\n ...\n\n def stream(self, **kwargs) -> SimpleStream:\n ...\n\n def unwrap_deferred(self, deferred: Deferred) -> Any:\n ...\n```\n\nand then implement it with something like:\n\n``` python\nclass BlockingSimpleStream(SimpleStream):\n def __init__(self, stream):\n self.generator = _stream_helper(stream):\n def next(self) -> Deferred:\n return deferred.succeeded(next(self.generator))\n\nclass BlockingIOAdapter(IOAdapter):\n def __init__(session: requests.Session):\n self.session = session\n\n def request(self, **kwargs) -> Deferred:\n return deferred.execute(self.session.request, **kwargs)\n\n def stream(self, **kwargs) -> BlockingSimpleStream:\n return BlockingSimpleStream(self.session.request(**kwargs, stream=True))\n\n def unwrap_deferred(self, d: Deferred):\n return deferred.waitForDeferred(d).getResult()\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n # While not imported explicitly, the ipaddress module is required for\n # ssl_match_hostname to verify hosts match with certificates via\n # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n # While not imported explicitly, the ipaddress module is required for\n # ssl_match_hostname to verify hosts match with certificates via\n # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\ntry:\n with open('./README.rst') as readme_rst:\n long_description = readme_rst.read()\nexcept IOError:\n # README.rst is only generated on release. Its absence should not prevent\n # setup.py from working properly.\n pass\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n long_description=long_description,\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]}
1,164
233
gh_patches_debug_12074
rasdani/github-patches
git_diff
quantumlib__Cirq-1946
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pytest fails but travis says green https://travis-ci.com/quantumlib/Cirq/jobs/225040090 ``` 4 failed, 7087 passed, 26 skipped, 3 warnings in 96.07 seconds The command "check/pytest --ignore=cirq/contrib --benchmark-skip --actually-quiet" exited with 0. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/ops/phased_x_gate.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """An `XPowGate` conjugated by `ZPowGate`s.""" 16 from typing import Union, Sequence, Tuple, Optional, cast 17 18 import math 19 import numpy as np 20 import sympy 21 22 import cirq 23 from cirq import value, protocols 24 from cirq._compat import proper_repr 25 from cirq.ops import gate_features, raw_types, op_tree, common_gates 26 from cirq.type_workarounds import NotImplementedType 27 28 29 @value.value_equality(manual_cls=True) 30 class PhasedXPowGate(gate_features.SingleQubitGate): 31 """A gate equivalent to the circuit ───Z^-p───X^t───Z^p───.""" 32 33 def __init__(self, 34 *, 35 phase_exponent: Union[float, sympy.Symbol], 36 exponent: Union[float, sympy.Symbol] = 1.0, 37 global_shift: float = 0.0) -> None: 38 """ 39 Args: 40 phase_exponent: The exponent on the Z gates conjugating the X gate. 41 exponent: The exponent on the X gate conjugated by Zs. 42 global_shift: How much to shift the operation's eigenvalues at 43 exponent=1. 44 """ 45 self._phase_exponent = value.canonicalize_half_turns(phase_exponent) 46 self._exponent = exponent 47 self._global_shift = global_shift 48 49 def _qasm_(self, 50 args: protocols.QasmArgs, 51 qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]: 52 if cirq.is_parameterized(self): 53 return None 54 55 args.validate_version('2.0') 56 57 e = cast(float, value.canonicalize_half_turns(self._exponent)) 58 p = cast(float, self.phase_exponent) 59 epsilon = 10**-args.precision 60 61 if abs(e + 0.5) <= epsilon: 62 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n', 63 p + 0.5, -p - 0.5, qubits[0]) 64 65 if abs(e - 0.5) <= epsilon: 66 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n', 67 p - 0.5, -p + 0.5, qubits[0]) 68 69 return args.format( 70 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\n', 71 -e, p + 0.5, -p - 0.5, qubits[0]) 72 73 def _decompose_(self, qubits: Sequence[raw_types.Qid] 74 ) -> op_tree.OP_TREE: 75 assert len(qubits) == 1 76 q = qubits[0] 77 z = cirq.Z(q)**self._phase_exponent 78 x = cirq.X(q)**self._exponent 79 if protocols.is_parameterized(z): 80 return NotImplemented 81 return z**-1, x, z 82 83 @property 84 def exponent(self) -> Union[float, sympy.Symbol]: 85 """The exponent on the central X gate conjugated by the Z gates.""" 86 return self._exponent 87 88 @property 89 def phase_exponent(self) -> Union[float, sympy.Symbol]: 90 """The exponent on the Z gates conjugating the X gate.""" 91 return self._phase_exponent 92 93 def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate': 94 new_exponent = protocols.mul(self._exponent, exponent, NotImplemented) 95 if new_exponent is NotImplemented: 96 return NotImplemented 97 return PhasedXPowGate(phase_exponent=self._phase_exponent, 98 exponent=new_exponent, 99 global_shift=self._global_shift) 100 101 def _trace_distance_bound_(self) -> Optional[float]: 102 if self._is_parameterized_(): 103 return None 104 return abs(np.sin(self._exponent * 0.5 * np.pi)) 105 106 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]: 107 """See `cirq.SupportsUnitary`.""" 108 if self._is_parameterized_(): 109 return NotImplemented 110 z = protocols.unitary(cirq.Z**self._phase_exponent) 111 x = protocols.unitary(cirq.X**self._exponent) 112 p = np.exp(1j * np.pi * self._global_shift * self._exponent) 113 return np.dot(np.dot(z, x), np.conj(z)) * p 114 115 def _pauli_expansion_(self) -> value.LinearDict[str]: 116 if self._is_parameterized_(): 117 return NotImplemented 118 phase_angle = np.pi * self._phase_exponent / 2 119 angle = np.pi * self._exponent / 2 120 phase = 1j**(2 * self._exponent * (self._global_shift + 0.5)) 121 return value.LinearDict({ 122 'I': phase * np.cos(angle), 123 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle), 124 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle), 125 }) 126 127 def _is_parameterized_(self) -> bool: 128 """See `cirq.SupportsParameterization`.""" 129 return (protocols.is_parameterized(self._exponent) or 130 protocols.is_parameterized(self._phase_exponent)) 131 132 def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate': 133 """See `cirq.SupportsParameterization`.""" 134 return PhasedXPowGate( 135 phase_exponent=param_resolver.value_of(self._phase_exponent), 136 exponent=param_resolver.value_of(self._exponent), 137 global_shift=self._global_shift) 138 139 def _phase_by_(self, phase_turns, qubit_index): 140 """See `cirq.SupportsPhase`.""" 141 assert qubit_index == 0 142 return PhasedXPowGate( 143 exponent=self._exponent, 144 phase_exponent=self._phase_exponent + phase_turns * 2, 145 global_shift=self._global_shift) 146 147 def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs 148 ) -> protocols.CircuitDiagramInfo: 149 """See `cirq.SupportsCircuitDiagramInfo`.""" 150 151 if (isinstance(self.phase_exponent, sympy.Symbol) or 152 args.precision is None): 153 s = 'PhasedX({})'.format(self.phase_exponent) 154 else: 155 s = 'PhasedX({{:.{}f}})'.format(args.precision).format( 156 self.phase_exponent) 157 return protocols.CircuitDiagramInfo( 158 wire_symbols=(s,), 159 exponent=value.canonicalize_half_turns(self._exponent)) 160 161 def __str__(self): 162 info = protocols.circuit_diagram_info(self) 163 if info.exponent == 1: 164 return info.wire_symbols[0] 165 return '{}^{}'.format(info.wire_symbols[0], info.exponent) 166 167 def __repr__(self): 168 args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))] 169 if self.exponent != 1: 170 args.append('exponent={}'.format(proper_repr(self.exponent))) 171 if self._global_shift != 0: 172 args.append('global_shift={!r}'.format(self._global_shift)) 173 return 'cirq.PhasedXPowGate({})'.format(', '.join(args)) 174 175 def _period(self): 176 exponents = [self._global_shift, 1 + self._global_shift] 177 real_periods = [abs(2/e) for e in exponents if e != 0] 178 int_periods = [int(np.round(e)) for e in real_periods] 179 if any(i != r for i, r in zip(real_periods, int_periods)): 180 return None 181 if len(int_periods) == 1: 182 return int_periods[0] 183 return int_periods[0] * int_periods[1] / math.gcd(*int_periods) 184 185 @property 186 def _canonical_exponent(self): 187 period = self._period() 188 if not period or isinstance(self._exponent, sympy.Symbol): 189 return self._exponent 190 191 return self._exponent % period 192 193 def _value_equality_values_cls_(self): 194 if self.phase_exponent == 0: 195 return common_gates.XPowGate 196 if self.phase_exponent == 0.5: 197 return common_gates.YPowGate 198 return PhasedXPowGate 199 200 def _value_equality_values_(self): 201 if self.phase_exponent == 0: 202 return common_gates.XPowGate( 203 exponent=self._exponent, 204 global_shift=self._global_shift)._value_equality_values_() 205 if self.phase_exponent == 0.5: 206 return common_gates.YPowGate( 207 exponent=self._exponent, 208 global_shift=self._global_shift)._value_equality_values_() 209 return self.phase_exponent, self._canonical_exponent, self._global_shift 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py --- a/cirq/ops/phased_x_gate.py +++ b/cirq/ops/phased_x_gate.py @@ -151,8 +151,10 @@ if (isinstance(self.phase_exponent, sympy.Symbol) or args.precision is None): s = 'PhasedX({})'.format(self.phase_exponent) + elif isinstance(self.phase_exponent, int): + s = 'PhasedX({})'.format(self.phase_exponent) else: - s = 'PhasedX({{:.{}f}})'.format(args.precision).format( + s = 'PhasedX({{:.{}}})'.format(args.precision).format( self.phase_exponent) return protocols.CircuitDiagramInfo( wire_symbols=(s,),
{"golden_diff": "diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py\n--- a/cirq/ops/phased_x_gate.py\n+++ b/cirq/ops/phased_x_gate.py\n@@ -151,8 +151,10 @@\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n+ elif isinstance(self.phase_exponent, int):\n+ s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n- s = 'PhasedX({{:.{}f}})'.format(args.precision).format(\n+ s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n", "issue": "pytest fails but travis says green\nhttps://travis-ci.com/quantumlib/Cirq/jobs/225040090\r\n\r\n```\r\n4 failed, 7087 passed, 26 skipped, 3 warnings in 96.07 seconds\r\nThe command \"check/pytest --ignore=cirq/contrib --benchmark-skip --actually-quiet\" exited with 0.\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree, common_gates\nfrom cirq.type_workarounds import NotImplementedType\n\n\[email protected]_equality(manual_cls=True)\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n return abs(np.sin(self._exponent * 0.5 * np.pi))\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (protocols.is_parameterized(self._exponent) or\n protocols.is_parameterized(self._phase_exponent))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}f}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n\n return self._exponent % period\n\n def _value_equality_values_cls_(self):\n if self.phase_exponent == 0:\n return common_gates.XPowGate\n if self.phase_exponent == 0.5:\n return common_gates.YPowGate\n return PhasedXPowGate\n\n def _value_equality_values_(self):\n if self.phase_exponent == 0:\n return common_gates.XPowGate(\n exponent=self._exponent,\n global_shift=self._global_shift)._value_equality_values_()\n if self.phase_exponent == 0.5:\n return common_gates.YPowGate(\n exponent=self._exponent,\n global_shift=self._global_shift)._value_equality_values_()\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree, common_gates\nfrom cirq.type_workarounds import NotImplementedType\n\n\[email protected]_equality(manual_cls=True)\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n return abs(np.sin(self._exponent * 0.5 * np.pi))\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (protocols.is_parameterized(self._exponent) or\n protocols.is_parameterized(self._phase_exponent))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n elif isinstance(self.phase_exponent, int):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n\n return self._exponent % period\n\n def _value_equality_values_cls_(self):\n if self.phase_exponent == 0:\n return common_gates.XPowGate\n if self.phase_exponent == 0.5:\n return common_gates.YPowGate\n return PhasedXPowGate\n\n def _value_equality_values_(self):\n if self.phase_exponent == 0:\n return common_gates.XPowGate(\n exponent=self._exponent,\n global_shift=self._global_shift)._value_equality_values_()\n if self.phase_exponent == 0.5:\n return common_gates.YPowGate(\n exponent=self._exponent,\n global_shift=self._global_shift)._value_equality_values_()\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}]}
2,987
195
gh_patches_debug_39188
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-5902
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add file output for code evaluation commands & Better long output handling ### What component of Red (cog, command, API) would you like to see improvements on? Dev ### Describe the enhancement you're suggesting. Currently when evaluating code with a large response, it would be shown in multiple messages. (when typing more) Dropping a file would be way more convenient. Furthermore instead of typing more, a button will be nice. When clicking the button, the button disappears and a new message with a new button appears. (if there is further output) ### Anything else? _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redbot/core/commands/context.py` Content: ``` 1 from __future__ import annotations 2 3 import asyncio 4 import contextlib 5 import os 6 import re 7 from typing import Iterable, List, Union, Optional, TYPE_CHECKING 8 import discord 9 from discord.ext.commands import Context as DPYContext 10 11 from .requires import PermState 12 from ..utils.chat_formatting import box 13 from ..utils.predicates import MessagePredicate 14 from ..utils import can_user_react_in, common_filters 15 16 if TYPE_CHECKING: 17 from .commands import Command 18 from ..bot import Red 19 20 TICK = "\N{WHITE HEAVY CHECK MARK}" 21 22 __all__ = ["Context", "GuildContext", "DMContext"] 23 24 25 class Context(DPYContext): 26 """Command invocation context for Red. 27 28 All context passed into commands will be of this type. 29 30 This class inherits from `discord.ext.commands.Context`. 31 32 Attributes 33 ---------- 34 assume_yes: bool 35 Whether or not interactive checks should 36 be skipped and assumed to be confirmed. 37 38 This is intended for allowing automation of tasks. 39 40 An example of this would be scheduled commands 41 not requiring interaction if the cog developer 42 checks this value prior to confirming something interactively. 43 44 Depending on the potential impact of a command, 45 it may still be appropriate not to use this setting. 46 permission_state: PermState 47 The permission state the current context is in. 48 """ 49 50 command: "Command" 51 invoked_subcommand: "Optional[Command]" 52 bot: "Red" 53 54 def __init__(self, **attrs): 55 self.assume_yes = attrs.pop("assume_yes", False) 56 super().__init__(**attrs) 57 self.permission_state: PermState = PermState.NORMAL 58 59 async def send(self, content=None, **kwargs): 60 """Sends a message to the destination with the content given. 61 62 This acts the same as `discord.ext.commands.Context.send`, with 63 one added keyword argument as detailed below in *Other Parameters*. 64 65 Parameters 66 ---------- 67 content : str 68 The content of the message to send. 69 70 Other Parameters 71 ---------------- 72 filter : callable (`str`) -> `str`, optional 73 A function which is used to filter the ``content`` before 74 it is sent. 75 This must take a single `str` as an argument, and return 76 the processed `str`. When `None` is passed, ``content`` won't be touched. 77 Defaults to `None`. 78 **kwargs 79 See `discord.ext.commands.Context.send`. 80 81 Returns 82 ------- 83 discord.Message 84 The message that was sent. 85 86 """ 87 88 _filter = kwargs.pop("filter", None) 89 90 if _filter and content: 91 content = _filter(str(content)) 92 93 return await super().send(content=content, **kwargs) 94 95 async def send_help(self, command=None): 96 """Send the command help message.""" 97 # This allows people to manually use this similarly 98 # to the upstream d.py version, while retaining our use. 99 command = command or self.command 100 await self.bot.send_help_for(self, command) 101 102 async def tick(self, *, message: Optional[str] = None) -> bool: 103 """Add a tick reaction to the command message. 104 105 Keyword Arguments 106 ----------------- 107 message : str, optional 108 The message to send if adding the reaction doesn't succeed. 109 110 Returns 111 ------- 112 bool 113 :code:`True` if adding the reaction succeeded. 114 115 """ 116 return await self.react_quietly(TICK, message=message) 117 118 async def react_quietly( 119 self, 120 reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str], 121 *, 122 message: Optional[str] = None, 123 ) -> bool: 124 """Adds a reaction to the command message. 125 126 Parameters 127 ---------- 128 reaction : Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str] 129 The emoji to react with. 130 131 Keyword Arguments 132 ----------------- 133 message : str, optional 134 The message to send if adding the reaction doesn't succeed. 135 136 Returns 137 ------- 138 bool 139 :code:`True` if adding the reaction succeeded. 140 """ 141 try: 142 if not can_user_react_in(self.me, self.channel): 143 raise RuntimeError 144 await self.message.add_reaction(reaction) 145 except (RuntimeError, discord.HTTPException): 146 if message is not None: 147 await self.send(message) 148 return False 149 else: 150 return True 151 152 async def send_interactive( 153 self, messages: Iterable[str], box_lang: str = None, timeout: int = 15 154 ) -> List[discord.Message]: 155 """Send multiple messages interactively. 156 157 The user will be prompted for whether or not they would like to view 158 the next message, one at a time. They will also be notified of how 159 many messages are remaining on each prompt. 160 161 Parameters 162 ---------- 163 messages : `iterable` of `str` 164 The messages to send. 165 box_lang : str 166 If specified, each message will be contained within a codeblock of 167 this language. 168 timeout : int 169 How long the user has to respond to the prompt before it times out. 170 After timing out, the bot deletes its prompt message. 171 172 """ 173 messages = tuple(messages) 174 ret = [] 175 176 for idx, page in enumerate(messages, 1): 177 if box_lang is None: 178 msg = await self.send(page) 179 else: 180 msg = await self.send(box(page, lang=box_lang)) 181 ret.append(msg) 182 n_remaining = len(messages) - idx 183 if n_remaining > 0: 184 if n_remaining == 1: 185 plural = "" 186 is_are = "is" 187 else: 188 plural = "s" 189 is_are = "are" 190 query = await self.send( 191 "There {} still {} message{} remaining. " 192 "Type `more` to continue." 193 "".format(is_are, n_remaining, plural) 194 ) 195 try: 196 resp = await self.bot.wait_for( 197 "message", 198 check=MessagePredicate.lower_equal_to("more", self), 199 timeout=timeout, 200 ) 201 except asyncio.TimeoutError: 202 with contextlib.suppress(discord.HTTPException): 203 await query.delete() 204 break 205 else: 206 try: 207 await self.channel.delete_messages((query, resp)) 208 except (discord.HTTPException, AttributeError): 209 # In case the bot can't delete other users' messages, 210 # or is not a bot account 211 # or channel is a DM 212 with contextlib.suppress(discord.HTTPException): 213 await query.delete() 214 return ret 215 216 async def embed_colour(self): 217 """ 218 Helper function to get the colour for an embed. 219 220 Returns 221 ------- 222 discord.Colour: 223 The colour to be used 224 """ 225 return await self.bot.get_embed_color(self) 226 227 @property 228 def embed_color(self): 229 # Rather than double awaiting. 230 return self.embed_colour 231 232 async def embed_requested(self): 233 """ 234 Short-hand for calling bot.embed_requested with permission checks. 235 236 Equivalent to: 237 238 .. code:: python 239 240 await ctx.bot.embed_requested(ctx) 241 242 Returns 243 ------- 244 bool: 245 :code:`True` if an embed is requested 246 """ 247 return await self.bot.embed_requested(self) 248 249 async def maybe_send_embed(self, message: str) -> discord.Message: 250 """ 251 Simple helper to send a simple message to context 252 without manually checking ctx.embed_requested 253 This should only be used for simple messages. 254 255 Parameters 256 ---------- 257 message: `str` 258 The string to send 259 260 Returns 261 ------- 262 discord.Message: 263 the message which was sent 264 265 Raises 266 ------ 267 discord.Forbidden 268 see `discord.abc.Messageable.send` 269 discord.HTTPException 270 see `discord.abc.Messageable.send` 271 ValueError 272 when the message's length is not between 1 and 2000 characters. 273 """ 274 if not message or len(message) > 2000: 275 raise ValueError("Message length must be between 1 and 2000") 276 if await self.embed_requested(): 277 return await self.send( 278 embed=discord.Embed(description=message, color=(await self.embed_colour())) 279 ) 280 else: 281 return await self.send( 282 message, 283 allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False), 284 ) 285 286 @property 287 def me(self) -> Union[discord.ClientUser, discord.Member]: 288 """ 289 discord.abc.User: The bot member or user object. 290 291 If the context is DM, this will be a `discord.User` object. 292 """ 293 if self.guild is not None: 294 return self.guild.me 295 else: 296 return self.bot.user 297 298 299 if TYPE_CHECKING or os.getenv("BUILDING_DOCS", False): 300 301 class DMContext(Context): 302 """ 303 At runtime, this will still be a normal context object. 304 305 This lies about some type narrowing for type analysis in commands 306 using a dm_only decorator. 307 308 It is only correct to use when those types are already narrowed 309 """ 310 311 @property 312 def author(self) -> discord.User: 313 ... 314 315 @property 316 def channel(self) -> discord.DMChannel: 317 ... 318 319 @property 320 def guild(self) -> None: 321 ... 322 323 @property 324 def me(self) -> discord.ClientUser: 325 ... 326 327 class GuildContext(Context): 328 """ 329 At runtime, this will still be a normal context object. 330 331 This lies about some type narrowing for type analysis in commands 332 using a guild_only decorator. 333 334 It is only correct to use when those types are already narrowed 335 """ 336 337 @property 338 def author(self) -> discord.Member: 339 ... 340 341 @property 342 def channel(self) -> Union[discord.TextChannel, discord.VoiceChannel, discord.Thread]: 343 ... 344 345 @property 346 def guild(self) -> discord.Guild: 347 ... 348 349 @property 350 def me(self) -> discord.Member: 351 ... 352 353 else: 354 GuildContext = Context 355 DMContext = Context 356 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redbot/core/commands/context.py b/redbot/core/commands/context.py --- a/redbot/core/commands/context.py +++ b/redbot/core/commands/context.py @@ -9,7 +9,7 @@ from discord.ext.commands import Context as DPYContext from .requires import PermState -from ..utils.chat_formatting import box +from ..utils.chat_formatting import box, text_to_file from ..utils.predicates import MessagePredicate from ..utils import can_user_react_in, common_filters @@ -150,7 +150,11 @@ return True async def send_interactive( - self, messages: Iterable[str], box_lang: str = None, timeout: int = 15 + self, + messages: Iterable[str], + box_lang: str = None, + timeout: int = 15, + join_character: str = "", ) -> List[discord.Message]: """Send multiple messages interactively. @@ -168,6 +172,9 @@ timeout : int How long the user has to respond to the prompt before it times out. After timing out, the bot deletes its prompt message. + join_character : str + The character used to join all the messages when the file output + is selected. """ messages = tuple(messages) @@ -189,13 +196,14 @@ is_are = "are" query = await self.send( "There {} still {} message{} remaining. " - "Type `more` to continue." + "Type `more` to continue or `file` to upload all contents as a file." "".format(is_are, n_remaining, plural) ) + pred = MessagePredicate.lower_contained_in(("more", "file"), self) try: resp = await self.bot.wait_for( "message", - check=MessagePredicate.lower_equal_to("more", self), + check=pred, timeout=timeout, ) except asyncio.TimeoutError: @@ -211,6 +219,9 @@ # or channel is a DM with contextlib.suppress(discord.HTTPException): await query.delete() + if pred.result == 1: + await self.send(file=text_to_file(join_character.join(messages))) + break return ret async def embed_colour(self):
{"golden_diff": "diff --git a/redbot/core/commands/context.py b/redbot/core/commands/context.py\n--- a/redbot/core/commands/context.py\n+++ b/redbot/core/commands/context.py\n@@ -9,7 +9,7 @@\n from discord.ext.commands import Context as DPYContext\n \n from .requires import PermState\n-from ..utils.chat_formatting import box\n+from ..utils.chat_formatting import box, text_to_file\n from ..utils.predicates import MessagePredicate\n from ..utils import can_user_react_in, common_filters\n \n@@ -150,7 +150,11 @@\n return True\n \n async def send_interactive(\n- self, messages: Iterable[str], box_lang: str = None, timeout: int = 15\n+ self,\n+ messages: Iterable[str],\n+ box_lang: str = None,\n+ timeout: int = 15,\n+ join_character: str = \"\",\n ) -> List[discord.Message]:\n \"\"\"Send multiple messages interactively.\n \n@@ -168,6 +172,9 @@\n timeout : int\n How long the user has to respond to the prompt before it times out.\n After timing out, the bot deletes its prompt message.\n+ join_character : str\n+ The character used to join all the messages when the file output\n+ is selected.\n \n \"\"\"\n messages = tuple(messages)\n@@ -189,13 +196,14 @@\n is_are = \"are\"\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n- \"Type `more` to continue.\"\n+ \"Type `more` to continue or `file` to upload all contents as a file.\"\n \"\".format(is_are, n_remaining, plural)\n )\n+ pred = MessagePredicate.lower_contained_in((\"more\", \"file\"), self)\n try:\n resp = await self.bot.wait_for(\n \"message\",\n- check=MessagePredicate.lower_equal_to(\"more\", self),\n+ check=pred,\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n@@ -211,6 +219,9 @@\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n+ if pred.result == 1:\n+ await self.send(file=text_to_file(join_character.join(messages)))\n+ break\n return ret\n \n async def embed_colour(self):\n", "issue": "Add file output for code evaluation commands & Better long output handling\n### What component of Red (cog, command, API) would you like to see improvements on?\n\nDev\n\n### Describe the enhancement you're suggesting.\n\nCurrently when evaluating code with a large response, it would be shown in multiple messages. (when typing more)\r\n\r\nDropping a file would be way more convenient. \r\n\r\nFurthermore instead of typing more, a button will be nice. When clicking the button, the button disappears and a new message with a new button appears. (if there is further output)\n\n### Anything else?\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\nimport re\nfrom typing import Iterable, List, Union, Optional, TYPE_CHECKING\nimport discord\nfrom discord.ext.commands import Context as DPYContext\n\nfrom .requires import PermState\nfrom ..utils.chat_formatting import box\nfrom ..utils.predicates import MessagePredicate\nfrom ..utils import can_user_react_in, common_filters\n\nif TYPE_CHECKING:\n from .commands import Command\n from ..bot import Red\n\nTICK = \"\\N{WHITE HEAVY CHECK MARK}\"\n\n__all__ = [\"Context\", \"GuildContext\", \"DMContext\"]\n\n\nclass Context(DPYContext):\n \"\"\"Command invocation context for Red.\n\n All context passed into commands will be of this type.\n\n This class inherits from `discord.ext.commands.Context`.\n\n Attributes\n ----------\n assume_yes: bool\n Whether or not interactive checks should\n be skipped and assumed to be confirmed.\n\n This is intended for allowing automation of tasks.\n\n An example of this would be scheduled commands\n not requiring interaction if the cog developer\n checks this value prior to confirming something interactively.\n\n Depending on the potential impact of a command,\n it may still be appropriate not to use this setting.\n permission_state: PermState\n The permission state the current context is in.\n \"\"\"\n\n command: \"Command\"\n invoked_subcommand: \"Optional[Command]\"\n bot: \"Red\"\n\n def __init__(self, **attrs):\n self.assume_yes = attrs.pop(\"assume_yes\", False)\n super().__init__(**attrs)\n self.permission_state: PermState = PermState.NORMAL\n\n async def send(self, content=None, **kwargs):\n \"\"\"Sends a message to the destination with the content given.\n\n This acts the same as `discord.ext.commands.Context.send`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Parameters\n ----------\n content : str\n The content of the message to send.\n\n Other Parameters\n ----------------\n filter : callable (`str`) -> `str`, optional\n A function which is used to filter the ``content`` before\n it is sent.\n This must take a single `str` as an argument, and return\n the processed `str`. When `None` is passed, ``content`` won't be touched.\n Defaults to `None`.\n **kwargs\n See `discord.ext.commands.Context.send`.\n\n Returns\n -------\n discord.Message\n The message that was sent.\n\n \"\"\"\n\n _filter = kwargs.pop(\"filter\", None)\n\n if _filter and content:\n content = _filter(str(content))\n\n return await super().send(content=content, **kwargs)\n\n async def send_help(self, command=None):\n \"\"\"Send the command help message.\"\"\"\n # This allows people to manually use this similarly\n # to the upstream d.py version, while retaining our use.\n command = command or self.command\n await self.bot.send_help_for(self, command)\n\n async def tick(self, *, message: Optional[str] = None) -> bool:\n \"\"\"Add a tick reaction to the command message.\n\n Keyword Arguments\n -----------------\n message : str, optional\n The message to send if adding the reaction doesn't succeed.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n\n \"\"\"\n return await self.react_quietly(TICK, message=message)\n\n async def react_quietly(\n self,\n reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str],\n *,\n message: Optional[str] = None,\n ) -> bool:\n \"\"\"Adds a reaction to the command message.\n\n Parameters\n ----------\n reaction : Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str]\n The emoji to react with.\n\n Keyword Arguments\n -----------------\n message : str, optional\n The message to send if adding the reaction doesn't succeed.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n \"\"\"\n try:\n if not can_user_react_in(self.me, self.channel):\n raise RuntimeError\n await self.message.add_reaction(reaction)\n except (RuntimeError, discord.HTTPException):\n if message is not None:\n await self.send(message)\n return False\n else:\n return True\n\n async def send_interactive(\n self, messages: Iterable[str], box_lang: str = None, timeout: int = 15\n ) -> List[discord.Message]:\n \"\"\"Send multiple messages interactively.\n\n The user will be prompted for whether or not they would like to view\n the next message, one at a time. They will also be notified of how\n many messages are remaining on each prompt.\n\n Parameters\n ----------\n messages : `iterable` of `str`\n The messages to send.\n box_lang : str\n If specified, each message will be contained within a codeblock of\n this language.\n timeout : int\n How long the user has to respond to the prompt before it times out.\n After timing out, the bot deletes its prompt message.\n\n \"\"\"\n messages = tuple(messages)\n ret = []\n\n for idx, page in enumerate(messages, 1):\n if box_lang is None:\n msg = await self.send(page)\n else:\n msg = await self.send(box(page, lang=box_lang))\n ret.append(msg)\n n_remaining = len(messages) - idx\n if n_remaining > 0:\n if n_remaining == 1:\n plural = \"\"\n is_are = \"is\"\n else:\n plural = \"s\"\n is_are = \"are\"\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n \"Type `more` to continue.\"\n \"\".format(is_are, n_remaining, plural)\n )\n try:\n resp = await self.bot.wait_for(\n \"message\",\n check=MessagePredicate.lower_equal_to(\"more\", self),\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n break\n else:\n try:\n await self.channel.delete_messages((query, resp))\n except (discord.HTTPException, AttributeError):\n # In case the bot can't delete other users' messages,\n # or is not a bot account\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n return ret\n\n async def embed_colour(self):\n \"\"\"\n Helper function to get the colour for an embed.\n\n Returns\n -------\n discord.Colour:\n The colour to be used\n \"\"\"\n return await self.bot.get_embed_color(self)\n\n @property\n def embed_color(self):\n # Rather than double awaiting.\n return self.embed_colour\n\n async def embed_requested(self):\n \"\"\"\n Short-hand for calling bot.embed_requested with permission checks.\n\n Equivalent to:\n\n .. code:: python\n\n await ctx.bot.embed_requested(ctx)\n\n Returns\n -------\n bool:\n :code:`True` if an embed is requested\n \"\"\"\n return await self.bot.embed_requested(self)\n\n async def maybe_send_embed(self, message: str) -> discord.Message:\n \"\"\"\n Simple helper to send a simple message to context\n without manually checking ctx.embed_requested\n This should only be used for simple messages.\n\n Parameters\n ----------\n message: `str`\n The string to send\n\n Returns\n -------\n discord.Message:\n the message which was sent\n\n Raises\n ------\n discord.Forbidden\n see `discord.abc.Messageable.send`\n discord.HTTPException\n see `discord.abc.Messageable.send`\n ValueError\n when the message's length is not between 1 and 2000 characters.\n \"\"\"\n if not message or len(message) > 2000:\n raise ValueError(\"Message length must be between 1 and 2000\")\n if await self.embed_requested():\n return await self.send(\n embed=discord.Embed(description=message, color=(await self.embed_colour()))\n )\n else:\n return await self.send(\n message,\n allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False),\n )\n\n @property\n def me(self) -> Union[discord.ClientUser, discord.Member]:\n \"\"\"\n discord.abc.User: The bot member or user object.\n\n If the context is DM, this will be a `discord.User` object.\n \"\"\"\n if self.guild is not None:\n return self.guild.me\n else:\n return self.bot.user\n\n\nif TYPE_CHECKING or os.getenv(\"BUILDING_DOCS\", False):\n\n class DMContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a dm_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.User:\n ...\n\n @property\n def channel(self) -> discord.DMChannel:\n ...\n\n @property\n def guild(self) -> None:\n ...\n\n @property\n def me(self) -> discord.ClientUser:\n ...\n\n class GuildContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a guild_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.Member:\n ...\n\n @property\n def channel(self) -> Union[discord.TextChannel, discord.VoiceChannel, discord.Thread]:\n ...\n\n @property\n def guild(self) -> discord.Guild:\n ...\n\n @property\n def me(self) -> discord.Member:\n ...\n\nelse:\n GuildContext = Context\n DMContext = Context\n", "path": "redbot/core/commands/context.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\nimport re\nfrom typing import Iterable, List, Union, Optional, TYPE_CHECKING\nimport discord\nfrom discord.ext.commands import Context as DPYContext\n\nfrom .requires import PermState\nfrom ..utils.chat_formatting import box, text_to_file\nfrom ..utils.predicates import MessagePredicate\nfrom ..utils import can_user_react_in, common_filters\n\nif TYPE_CHECKING:\n from .commands import Command\n from ..bot import Red\n\nTICK = \"\\N{WHITE HEAVY CHECK MARK}\"\n\n__all__ = [\"Context\", \"GuildContext\", \"DMContext\"]\n\n\nclass Context(DPYContext):\n \"\"\"Command invocation context for Red.\n\n All context passed into commands will be of this type.\n\n This class inherits from `discord.ext.commands.Context`.\n\n Attributes\n ----------\n assume_yes: bool\n Whether or not interactive checks should\n be skipped and assumed to be confirmed.\n\n This is intended for allowing automation of tasks.\n\n An example of this would be scheduled commands\n not requiring interaction if the cog developer\n checks this value prior to confirming something interactively.\n\n Depending on the potential impact of a command,\n it may still be appropriate not to use this setting.\n permission_state: PermState\n The permission state the current context is in.\n \"\"\"\n\n command: \"Command\"\n invoked_subcommand: \"Optional[Command]\"\n bot: \"Red\"\n\n def __init__(self, **attrs):\n self.assume_yes = attrs.pop(\"assume_yes\", False)\n super().__init__(**attrs)\n self.permission_state: PermState = PermState.NORMAL\n\n async def send(self, content=None, **kwargs):\n \"\"\"Sends a message to the destination with the content given.\n\n This acts the same as `discord.ext.commands.Context.send`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Parameters\n ----------\n content : str\n The content of the message to send.\n\n Other Parameters\n ----------------\n filter : callable (`str`) -> `str`, optional\n A function which is used to filter the ``content`` before\n it is sent.\n This must take a single `str` as an argument, and return\n the processed `str`. When `None` is passed, ``content`` won't be touched.\n Defaults to `None`.\n **kwargs\n See `discord.ext.commands.Context.send`.\n\n Returns\n -------\n discord.Message\n The message that was sent.\n\n \"\"\"\n\n _filter = kwargs.pop(\"filter\", None)\n\n if _filter and content:\n content = _filter(str(content))\n\n return await super().send(content=content, **kwargs)\n\n async def send_help(self, command=None):\n \"\"\"Send the command help message.\"\"\"\n # This allows people to manually use this similarly\n # to the upstream d.py version, while retaining our use.\n command = command or self.command\n await self.bot.send_help_for(self, command)\n\n async def tick(self, *, message: Optional[str] = None) -> bool:\n \"\"\"Add a tick reaction to the command message.\n\n Keyword Arguments\n -----------------\n message : str, optional\n The message to send if adding the reaction doesn't succeed.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n\n \"\"\"\n return await self.react_quietly(TICK, message=message)\n\n async def react_quietly(\n self,\n reaction: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str],\n *,\n message: Optional[str] = None,\n ) -> bool:\n \"\"\"Adds a reaction to the command message.\n\n Parameters\n ----------\n reaction : Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str]\n The emoji to react with.\n\n Keyword Arguments\n -----------------\n message : str, optional\n The message to send if adding the reaction doesn't succeed.\n\n Returns\n -------\n bool\n :code:`True` if adding the reaction succeeded.\n \"\"\"\n try:\n if not can_user_react_in(self.me, self.channel):\n raise RuntimeError\n await self.message.add_reaction(reaction)\n except (RuntimeError, discord.HTTPException):\n if message is not None:\n await self.send(message)\n return False\n else:\n return True\n\n async def send_interactive(\n self,\n messages: Iterable[str],\n box_lang: str = None,\n timeout: int = 15,\n join_character: str = \"\",\n ) -> List[discord.Message]:\n \"\"\"Send multiple messages interactively.\n\n The user will be prompted for whether or not they would like to view\n the next message, one at a time. They will also be notified of how\n many messages are remaining on each prompt.\n\n Parameters\n ----------\n messages : `iterable` of `str`\n The messages to send.\n box_lang : str\n If specified, each message will be contained within a codeblock of\n this language.\n timeout : int\n How long the user has to respond to the prompt before it times out.\n After timing out, the bot deletes its prompt message.\n join_character : str\n The character used to join all the messages when the file output\n is selected.\n\n \"\"\"\n messages = tuple(messages)\n ret = []\n\n for idx, page in enumerate(messages, 1):\n if box_lang is None:\n msg = await self.send(page)\n else:\n msg = await self.send(box(page, lang=box_lang))\n ret.append(msg)\n n_remaining = len(messages) - idx\n if n_remaining > 0:\n if n_remaining == 1:\n plural = \"\"\n is_are = \"is\"\n else:\n plural = \"s\"\n is_are = \"are\"\n query = await self.send(\n \"There {} still {} message{} remaining. \"\n \"Type `more` to continue or `file` to upload all contents as a file.\"\n \"\".format(is_are, n_remaining, plural)\n )\n pred = MessagePredicate.lower_contained_in((\"more\", \"file\"), self)\n try:\n resp = await self.bot.wait_for(\n \"message\",\n check=pred,\n timeout=timeout,\n )\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n break\n else:\n try:\n await self.channel.delete_messages((query, resp))\n except (discord.HTTPException, AttributeError):\n # In case the bot can't delete other users' messages,\n # or is not a bot account\n # or channel is a DM\n with contextlib.suppress(discord.HTTPException):\n await query.delete()\n if pred.result == 1:\n await self.send(file=text_to_file(join_character.join(messages)))\n break\n return ret\n\n async def embed_colour(self):\n \"\"\"\n Helper function to get the colour for an embed.\n\n Returns\n -------\n discord.Colour:\n The colour to be used\n \"\"\"\n return await self.bot.get_embed_color(self)\n\n @property\n def embed_color(self):\n # Rather than double awaiting.\n return self.embed_colour\n\n async def embed_requested(self):\n \"\"\"\n Short-hand for calling bot.embed_requested with permission checks.\n\n Equivalent to:\n\n .. code:: python\n\n await ctx.bot.embed_requested(ctx)\n\n Returns\n -------\n bool:\n :code:`True` if an embed is requested\n \"\"\"\n return await self.bot.embed_requested(self)\n\n async def maybe_send_embed(self, message: str) -> discord.Message:\n \"\"\"\n Simple helper to send a simple message to context\n without manually checking ctx.embed_requested\n This should only be used for simple messages.\n\n Parameters\n ----------\n message: `str`\n The string to send\n\n Returns\n -------\n discord.Message:\n the message which was sent\n\n Raises\n ------\n discord.Forbidden\n see `discord.abc.Messageable.send`\n discord.HTTPException\n see `discord.abc.Messageable.send`\n ValueError\n when the message's length is not between 1 and 2000 characters.\n \"\"\"\n if not message or len(message) > 2000:\n raise ValueError(\"Message length must be between 1 and 2000\")\n if await self.embed_requested():\n return await self.send(\n embed=discord.Embed(description=message, color=(await self.embed_colour()))\n )\n else:\n return await self.send(\n message,\n allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False),\n )\n\n @property\n def me(self) -> Union[discord.ClientUser, discord.Member]:\n \"\"\"\n discord.abc.User: The bot member or user object.\n\n If the context is DM, this will be a `discord.User` object.\n \"\"\"\n if self.guild is not None:\n return self.guild.me\n else:\n return self.bot.user\n\n\nif TYPE_CHECKING or os.getenv(\"BUILDING_DOCS\", False):\n\n class DMContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a dm_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.User:\n ...\n\n @property\n def channel(self) -> discord.DMChannel:\n ...\n\n @property\n def guild(self) -> None:\n ...\n\n @property\n def me(self) -> discord.ClientUser:\n ...\n\n class GuildContext(Context):\n \"\"\"\n At runtime, this will still be a normal context object.\n\n This lies about some type narrowing for type analysis in commands\n using a guild_only decorator.\n\n It is only correct to use when those types are already narrowed\n \"\"\"\n\n @property\n def author(self) -> discord.Member:\n ...\n\n @property\n def channel(self) -> Union[discord.TextChannel, discord.VoiceChannel, discord.Thread]:\n ...\n\n @property\n def guild(self) -> discord.Guild:\n ...\n\n @property\n def me(self) -> discord.Member:\n ...\n\nelse:\n GuildContext = Context\n DMContext = Context\n", "path": "redbot/core/commands/context.py"}]}
3,533
530
gh_patches_debug_12517
rasdani/github-patches
git_diff
avocado-framework__avocado-4253
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- distutils depreaction https://www.python.org/dev/peps/pep-0632/ We have one module using this: `avocado.utils.kernel`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `avocado/utils/kernel.py` Content: ``` 1 # This program is free software; you can redistribute it and/or modify 2 # it under the terms of the GNU General Public License as published by 3 # the Free Software Foundation; either version 2 of the License, or 4 # (at your option) any later version. 5 # 6 # This program is distributed in the hope that it will be useful, 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 9 # 10 # See LICENSE for more details. 11 # 12 # Copyright: Red Hat Inc. 2014 13 # Author: Ruda Moura <[email protected]> 14 # Author: Santhosh G <[email protected]> 15 16 """ 17 Provides utilities for the Linux kernel. 18 """ 19 20 import logging 21 import multiprocessing 22 import os 23 import shutil 24 import tempfile 25 from distutils.version import LooseVersion # pylint: disable=E0611 26 27 from . import archive, asset, build, distro, process 28 29 LOG = logging.getLogger('avocado.test') 30 31 32 class KernelBuild: 33 34 """ 35 Build the Linux Kernel from official tarballs. 36 """ 37 38 URL = 'https://www.kernel.org/pub/linux/kernel/v{major}.x/' 39 SOURCE = 'linux-{version}.tar.gz' 40 41 def __init__(self, version, config_path=None, work_dir=None, 42 data_dirs=None): 43 """ 44 Creates an instance of :class:`KernelBuild`. 45 46 :param version: kernel version ("3.19.8"). 47 :param config_path: path to config file. 48 :param work_dir: work directory. 49 :param data_dirs: list of directories to keep the downloaded kernel 50 :return: None. 51 """ 52 self.asset_path = None 53 self.version = version 54 self.config_path = config_path 55 self.distro = distro.detect() 56 if work_dir is None: 57 work_dir = tempfile.mkdtemp(prefix='avocado_' + __name__) 58 self.work_dir = work_dir 59 if data_dirs is not None: 60 self.data_dirs = data_dirs 61 else: 62 self.data_dirs = [self.work_dir] 63 self._build_dir = os.path.join(self.work_dir, 'linux-%s' % self.version) 64 65 def __repr__(self): 66 return "KernelBuild('%s, %s, %s')" % (self.version, 67 self.config_path, 68 self.work_dir) 69 70 @property 71 def vmlinux(self): 72 """ 73 Return the vmlinux path if the file exists 74 """ 75 if not self.build_dir: 76 return None 77 vmlinux_path = os.path.join(self.build_dir, 'vmlinux') 78 if os.path.isfile(vmlinux_path): 79 return vmlinux_path 80 return None 81 82 @property 83 def build_dir(self): 84 """ 85 Return the build path if the directory exists 86 """ 87 if os.path.isdir(self._build_dir): 88 return self._build_dir 89 return None 90 91 def _build_kernel_url(self, base_url=None): 92 kernel_file = self.SOURCE.format(version=self.version) 93 if base_url is None: 94 base_url = self.URL.format(major=self.version.split('.', 1)[0]) 95 return base_url + kernel_file 96 97 def download(self, url=None): 98 """ 99 Download kernel source. 100 101 :param url: override the url from where to fetch the kernel 102 source tarball 103 :type url: str or None 104 """ 105 full_url = self._build_kernel_url(base_url=url) 106 self.asset_path = asset.Asset(full_url, asset_hash=None, 107 algorithm=None, locations=None, 108 cache_dirs=self.data_dirs).fetch() 109 110 def uncompress(self): 111 """ 112 Uncompress kernel source. 113 114 :raises: Exception in case the tarball is not downloaded 115 """ 116 if self.asset_path: 117 LOG.info("Uncompressing tarball") 118 archive.extract(self.asset_path, self.work_dir) 119 else: 120 raise Exception("Unable to find the tarball") 121 122 def configure(self, targets=('defconfig'), extra_configs=None): 123 """ 124 Configure/prepare kernel source to build. 125 126 :param targets: configuration targets. Default is 'defconfig'. 127 :type targets: list of str 128 :param extra_configs: additional configurations in the form of 129 CONFIG_NAME=VALUE. 130 :type extra_configs: list of str 131 """ 132 build.make(self._build_dir, extra_args='-C %s mrproper' % 133 self._build_dir) 134 if self.config_path is not None: 135 dotconfig = os.path.join(self._build_dir, '.config') 136 shutil.copy(self.config_path, dotconfig) 137 build.make(self._build_dir, extra_args='-C %s olddefconfig' % 138 self._build_dir) 139 else: 140 if isinstance(targets, list): 141 _targets = " ".join(targets) 142 else: 143 _targets = targets 144 build.make(self.build_dir, 145 extra_args='-C %s %s' % (self.build_dir, _targets)) 146 if extra_configs: 147 with tempfile.NamedTemporaryFile(mode='w+t', 148 prefix='avocado_') as config_file: 149 config_file.write('\n'.join(extra_configs)) 150 config_file.flush() 151 cmd = ['cd', self._build_dir, '&&', 152 './scripts/kconfig/merge_config.sh', '.config', 153 config_file.name] 154 process.run(" ".join(cmd), shell=True) 155 156 def build(self, binary_package=False, njobs=multiprocessing.cpu_count()): 157 """ 158 Build kernel from source. 159 160 :param binary_package: when True, the appropriate 161 platform package is built 162 for install() to use 163 :type binary_pacakge: bool 164 :param njobs: number of jobs. It is mapped to the -j option from make. 165 If njobs is None then do not limit the number of jobs 166 (e.g. uses -j without value). The -j is omitted if a 167 value equal or less than zero is passed. Default value 168 is set to `multiprocessing.cpu_count()`. 169 :type njobs: int or None 170 """ 171 make_args = [] 172 LOG.info("Starting build the kernel") 173 174 if njobs is None: 175 make_args.append('-j') 176 elif njobs > 0: 177 make_args.extend(['-j', str(njobs)]) 178 make_args.extend(['-C', self._build_dir]) 179 180 if binary_package is True: 181 if self.distro.name == "Ubuntu": 182 make_args.append("deb-pkg") 183 184 build.make(self._build_dir, extra_args=" ".join(make_args)) 185 186 def install(self): 187 """ 188 Install built kernel. 189 """ 190 LOG.info("Starting kernel install") 191 if self.distro.name == "Ubuntu": 192 process.run('dpkg -i %s/*.deb' % 193 self.work_dir, shell=True, sudo=True) 194 else: 195 LOG.info("Skipping kernel install") 196 197 def __del__(self): 198 shutil.rmtree(self.work_dir) 199 200 201 def check_version(version): 202 """ 203 This utility function compares the current kernel version with 204 the version parameter and gives assertion error if the version 205 parameter is greater. 206 207 :type version: string 208 :param version: version to be compared with current kernel version 209 """ 210 assert LooseVersion(os.uname()[2]) > LooseVersion(version), "Old kernel" 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/avocado/utils/kernel.py b/avocado/utils/kernel.py --- a/avocado/utils/kernel.py +++ b/avocado/utils/kernel.py @@ -22,7 +22,8 @@ import os import shutil import tempfile -from distutils.version import LooseVersion # pylint: disable=E0611 + +from pkg_resources import packaging from . import archive, asset, build, distro, process @@ -207,4 +208,6 @@ :type version: string :param version: version to be compared with current kernel version """ - assert LooseVersion(os.uname()[2]) > LooseVersion(version), "Old kernel" + os_version = packaging.version.parse(os.uname()[2]) + version = packaging.version.parse(version) + assert os_version > version, "Old kernel" \ No newline at end of file
{"golden_diff": "diff --git a/avocado/utils/kernel.py b/avocado/utils/kernel.py\n--- a/avocado/utils/kernel.py\n+++ b/avocado/utils/kernel.py\n@@ -22,7 +22,8 @@\n import os\n import shutil\n import tempfile\n-from distutils.version import LooseVersion # pylint: disable=E0611\n+\n+from pkg_resources import packaging\n \n from . import archive, asset, build, distro, process\n \n@@ -207,4 +208,6 @@\n :type version: string\n :param version: version to be compared with current kernel version\n \"\"\"\n- assert LooseVersion(os.uname()[2]) > LooseVersion(version), \"Old kernel\"\n+ os_version = packaging.version.parse(os.uname()[2])\n+ version = packaging.version.parse(version)\n+ assert os_version > version, \"Old kernel\"\n\\ No newline at end of file\n", "issue": "distutils depreaction\nhttps://www.python.org/dev/peps/pep-0632/\r\n\r\nWe have one module using this: `avocado.utils.kernel`.\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014\n# Author: Ruda Moura <[email protected]>\n# Author: Santhosh G <[email protected]>\n\n\"\"\"\nProvides utilities for the Linux kernel.\n\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport tempfile\nfrom distutils.version import LooseVersion # pylint: disable=E0611\n\nfrom . import archive, asset, build, distro, process\n\nLOG = logging.getLogger('avocado.test')\n\n\nclass KernelBuild:\n\n \"\"\"\n Build the Linux Kernel from official tarballs.\n \"\"\"\n\n URL = 'https://www.kernel.org/pub/linux/kernel/v{major}.x/'\n SOURCE = 'linux-{version}.tar.gz'\n\n def __init__(self, version, config_path=None, work_dir=None,\n data_dirs=None):\n \"\"\"\n Creates an instance of :class:`KernelBuild`.\n\n :param version: kernel version (\"3.19.8\").\n :param config_path: path to config file.\n :param work_dir: work directory.\n :param data_dirs: list of directories to keep the downloaded kernel\n :return: None.\n \"\"\"\n self.asset_path = None\n self.version = version\n self.config_path = config_path\n self.distro = distro.detect()\n if work_dir is None:\n work_dir = tempfile.mkdtemp(prefix='avocado_' + __name__)\n self.work_dir = work_dir\n if data_dirs is not None:\n self.data_dirs = data_dirs\n else:\n self.data_dirs = [self.work_dir]\n self._build_dir = os.path.join(self.work_dir, 'linux-%s' % self.version)\n\n def __repr__(self):\n return \"KernelBuild('%s, %s, %s')\" % (self.version,\n self.config_path,\n self.work_dir)\n\n @property\n def vmlinux(self):\n \"\"\"\n Return the vmlinux path if the file exists\n \"\"\"\n if not self.build_dir:\n return None\n vmlinux_path = os.path.join(self.build_dir, 'vmlinux')\n if os.path.isfile(vmlinux_path):\n return vmlinux_path\n return None\n\n @property\n def build_dir(self):\n \"\"\"\n Return the build path if the directory exists\n \"\"\"\n if os.path.isdir(self._build_dir):\n return self._build_dir\n return None\n\n def _build_kernel_url(self, base_url=None):\n kernel_file = self.SOURCE.format(version=self.version)\n if base_url is None:\n base_url = self.URL.format(major=self.version.split('.', 1)[0])\n return base_url + kernel_file\n\n def download(self, url=None):\n \"\"\"\n Download kernel source.\n\n :param url: override the url from where to fetch the kernel\n source tarball\n :type url: str or None\n \"\"\"\n full_url = self._build_kernel_url(base_url=url)\n self.asset_path = asset.Asset(full_url, asset_hash=None,\n algorithm=None, locations=None,\n cache_dirs=self.data_dirs).fetch()\n\n def uncompress(self):\n \"\"\"\n Uncompress kernel source.\n\n :raises: Exception in case the tarball is not downloaded\n \"\"\"\n if self.asset_path:\n LOG.info(\"Uncompressing tarball\")\n archive.extract(self.asset_path, self.work_dir)\n else:\n raise Exception(\"Unable to find the tarball\")\n\n def configure(self, targets=('defconfig'), extra_configs=None):\n \"\"\"\n Configure/prepare kernel source to build.\n\n :param targets: configuration targets. Default is 'defconfig'.\n :type targets: list of str\n :param extra_configs: additional configurations in the form of\n CONFIG_NAME=VALUE.\n :type extra_configs: list of str\n \"\"\"\n build.make(self._build_dir, extra_args='-C %s mrproper' %\n self._build_dir)\n if self.config_path is not None:\n dotconfig = os.path.join(self._build_dir, '.config')\n shutil.copy(self.config_path, dotconfig)\n build.make(self._build_dir, extra_args='-C %s olddefconfig' %\n self._build_dir)\n else:\n if isinstance(targets, list):\n _targets = \" \".join(targets)\n else:\n _targets = targets\n build.make(self.build_dir,\n extra_args='-C %s %s' % (self.build_dir, _targets))\n if extra_configs:\n with tempfile.NamedTemporaryFile(mode='w+t',\n prefix='avocado_') as config_file:\n config_file.write('\\n'.join(extra_configs))\n config_file.flush()\n cmd = ['cd', self._build_dir, '&&',\n './scripts/kconfig/merge_config.sh', '.config',\n config_file.name]\n process.run(\" \".join(cmd), shell=True)\n\n def build(self, binary_package=False, njobs=multiprocessing.cpu_count()):\n \"\"\"\n Build kernel from source.\n\n :param binary_package: when True, the appropriate\n platform package is built\n for install() to use\n :type binary_pacakge: bool\n :param njobs: number of jobs. It is mapped to the -j option from make.\n If njobs is None then do not limit the number of jobs\n (e.g. uses -j without value). The -j is omitted if a\n value equal or less than zero is passed. Default value\n is set to `multiprocessing.cpu_count()`.\n :type njobs: int or None\n \"\"\"\n make_args = []\n LOG.info(\"Starting build the kernel\")\n\n if njobs is None:\n make_args.append('-j')\n elif njobs > 0:\n make_args.extend(['-j', str(njobs)])\n make_args.extend(['-C', self._build_dir])\n\n if binary_package is True:\n if self.distro.name == \"Ubuntu\":\n make_args.append(\"deb-pkg\")\n\n build.make(self._build_dir, extra_args=\" \".join(make_args))\n\n def install(self):\n \"\"\"\n Install built kernel.\n \"\"\"\n LOG.info(\"Starting kernel install\")\n if self.distro.name == \"Ubuntu\":\n process.run('dpkg -i %s/*.deb' %\n self.work_dir, shell=True, sudo=True)\n else:\n LOG.info(\"Skipping kernel install\")\n\n def __del__(self):\n shutil.rmtree(self.work_dir)\n\n\ndef check_version(version):\n \"\"\"\n This utility function compares the current kernel version with\n the version parameter and gives assertion error if the version\n parameter is greater.\n\n :type version: string\n :param version: version to be compared with current kernel version\n \"\"\"\n assert LooseVersion(os.uname()[2]) > LooseVersion(version), \"Old kernel\"\n", "path": "avocado/utils/kernel.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2014\n# Author: Ruda Moura <[email protected]>\n# Author: Santhosh G <[email protected]>\n\n\"\"\"\nProvides utilities for the Linux kernel.\n\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport tempfile\n\nfrom pkg_resources import packaging\n\nfrom . import archive, asset, build, distro, process\n\nLOG = logging.getLogger('avocado.test')\n\n\nclass KernelBuild:\n\n \"\"\"\n Build the Linux Kernel from official tarballs.\n \"\"\"\n\n URL = 'https://www.kernel.org/pub/linux/kernel/v{major}.x/'\n SOURCE = 'linux-{version}.tar.gz'\n\n def __init__(self, version, config_path=None, work_dir=None,\n data_dirs=None):\n \"\"\"\n Creates an instance of :class:`KernelBuild`.\n\n :param version: kernel version (\"3.19.8\").\n :param config_path: path to config file.\n :param work_dir: work directory.\n :param data_dirs: list of directories to keep the downloaded kernel\n :return: None.\n \"\"\"\n self.asset_path = None\n self.version = version\n self.config_path = config_path\n self.distro = distro.detect()\n if work_dir is None:\n work_dir = tempfile.mkdtemp(prefix='avocado_' + __name__)\n self.work_dir = work_dir\n if data_dirs is not None:\n self.data_dirs = data_dirs\n else:\n self.data_dirs = [self.work_dir]\n self._build_dir = os.path.join(self.work_dir, 'linux-%s' % self.version)\n\n def __repr__(self):\n return \"KernelBuild('%s, %s, %s')\" % (self.version,\n self.config_path,\n self.work_dir)\n\n @property\n def vmlinux(self):\n \"\"\"\n Return the vmlinux path if the file exists\n \"\"\"\n if not self.build_dir:\n return None\n vmlinux_path = os.path.join(self.build_dir, 'vmlinux')\n if os.path.isfile(vmlinux_path):\n return vmlinux_path\n return None\n\n @property\n def build_dir(self):\n \"\"\"\n Return the build path if the directory exists\n \"\"\"\n if os.path.isdir(self._build_dir):\n return self._build_dir\n return None\n\n def _build_kernel_url(self, base_url=None):\n kernel_file = self.SOURCE.format(version=self.version)\n if base_url is None:\n base_url = self.URL.format(major=self.version.split('.', 1)[0])\n return base_url + kernel_file\n\n def download(self, url=None):\n \"\"\"\n Download kernel source.\n\n :param url: override the url from where to fetch the kernel\n source tarball\n :type url: str or None\n \"\"\"\n full_url = self._build_kernel_url(base_url=url)\n self.asset_path = asset.Asset(full_url, asset_hash=None,\n algorithm=None, locations=None,\n cache_dirs=self.data_dirs).fetch()\n\n def uncompress(self):\n \"\"\"\n Uncompress kernel source.\n\n :raises: Exception in case the tarball is not downloaded\n \"\"\"\n if self.asset_path:\n LOG.info(\"Uncompressing tarball\")\n archive.extract(self.asset_path, self.work_dir)\n else:\n raise Exception(\"Unable to find the tarball\")\n\n def configure(self, targets=('defconfig'), extra_configs=None):\n \"\"\"\n Configure/prepare kernel source to build.\n\n :param targets: configuration targets. Default is 'defconfig'.\n :type targets: list of str\n :param extra_configs: additional configurations in the form of\n CONFIG_NAME=VALUE.\n :type extra_configs: list of str\n \"\"\"\n build.make(self._build_dir, extra_args='-C %s mrproper' %\n self._build_dir)\n if self.config_path is not None:\n dotconfig = os.path.join(self._build_dir, '.config')\n shutil.copy(self.config_path, dotconfig)\n build.make(self._build_dir, extra_args='-C %s olddefconfig' %\n self._build_dir)\n else:\n if isinstance(targets, list):\n _targets = \" \".join(targets)\n else:\n _targets = targets\n build.make(self.build_dir,\n extra_args='-C %s %s' % (self.build_dir, _targets))\n if extra_configs:\n with tempfile.NamedTemporaryFile(mode='w+t',\n prefix='avocado_') as config_file:\n config_file.write('\\n'.join(extra_configs))\n config_file.flush()\n cmd = ['cd', self._build_dir, '&&',\n './scripts/kconfig/merge_config.sh', '.config',\n config_file.name]\n process.run(\" \".join(cmd), shell=True)\n\n def build(self, binary_package=False, njobs=multiprocessing.cpu_count()):\n \"\"\"\n Build kernel from source.\n\n :param binary_package: when True, the appropriate\n platform package is built\n for install() to use\n :type binary_pacakge: bool\n :param njobs: number of jobs. It is mapped to the -j option from make.\n If njobs is None then do not limit the number of jobs\n (e.g. uses -j without value). The -j is omitted if a\n value equal or less than zero is passed. Default value\n is set to `multiprocessing.cpu_count()`.\n :type njobs: int or None\n \"\"\"\n make_args = []\n LOG.info(\"Starting build the kernel\")\n\n if njobs is None:\n make_args.append('-j')\n elif njobs > 0:\n make_args.extend(['-j', str(njobs)])\n make_args.extend(['-C', self._build_dir])\n\n if binary_package is True:\n if self.distro.name == \"Ubuntu\":\n make_args.append(\"deb-pkg\")\n\n build.make(self._build_dir, extra_args=\" \".join(make_args))\n\n def install(self):\n \"\"\"\n Install built kernel.\n \"\"\"\n LOG.info(\"Starting kernel install\")\n if self.distro.name == \"Ubuntu\":\n process.run('dpkg -i %s/*.deb' %\n self.work_dir, shell=True, sudo=True)\n else:\n LOG.info(\"Skipping kernel install\")\n\n def __del__(self):\n shutil.rmtree(self.work_dir)\n\n\ndef check_version(version):\n \"\"\"\n This utility function compares the current kernel version with\n the version parameter and gives assertion error if the version\n parameter is greater.\n\n :type version: string\n :param version: version to be compared with current kernel version\n \"\"\"\n os_version = packaging.version.parse(os.uname()[2])\n version = packaging.version.parse(version)\n assert os_version > version, \"Old kernel\"", "path": "avocado/utils/kernel.py"}]}
2,409
197
gh_patches_debug_49088
rasdani/github-patches
git_diff
fossasia__open-event-server-5232
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- is_stripe_linked property does not set on connecting stripe account **Describe the bug** is_stripe_linked property does not set on connecting stripe account. It throws error. **Expected behavior** Property should set appropriately. **Error** ``` Class 'sqlalchemy.orm.query.Query' is not mapped Traceback (most recent call last): File "/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/session.py", line 1722, in add state = attributes.instance_state(instance) AttributeError: 'Query' object has no attribute '_sa_instance_state' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/rs/Pradeep/github/open-event-server/app/api/helpers/db.py", line 22, in save_to_db db.session.add(item) File "/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py", line 157, in do return getattr(self.registry(), name)(*args, **kwargs) File "/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/session.py", line 1724, in add raise exc.UnmappedInstanceError(instance) sqlalchemy.orm.exc.UnmappedInstanceError: Class 'sqlalchemy.orm.query.Query' is not mapped ERROR:root:DB Exception! Class 'sqlalchemy.orm.query.Query' is not mapped ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/stripe_authorization.py` Content: ``` 1 from flask_rest_jsonapi import ResourceDetail, ResourceList 2 from sqlalchemy.orm.exc import NoResultFound 3 4 from app.api.bootstrap import api 5 from app.api.helpers.db import safe_query, get_count, save_to_db 6 from app.api.helpers.exceptions import ForbiddenException, ConflictException, UnprocessableEntity 7 from app.api.helpers.payment import StripePaymentsManager 8 from app.api.helpers.permission_manager import has_access 9 from app.api.helpers.permissions import jwt_required 10 from app.api.helpers.utilities import require_relationship 11 from app.api.schema.stripe_authorization import StripeAuthorizationSchema 12 from app.models import db 13 from app.models.event import Event 14 from app.models.stripe_authorization import StripeAuthorization 15 16 17 class StripeAuthorizationListPost(ResourceList): 18 """ 19 List and Create Stripe Authorization 20 """ 21 def before_post(self, args, kwargs, data): 22 """ 23 before post method to check for required relationship and proper permission 24 :param args: 25 :param kwargs: 26 :param data: 27 :return: 28 """ 29 require_relationship(['event'], data) 30 if not has_access('is_organizer', event_id=data['event']): 31 raise ForbiddenException({'source': ''}, "Minimum Organizer access required") 32 if get_count(db.session.query(Event).filter_by(id=int(data['event']), can_pay_by_stripe=False)) > 0: 33 raise ForbiddenException({'pointer': ''}, "Stripe payment is disabled for this Event") 34 35 def before_create_object(self, data, view_kwargs): 36 """ 37 method to check if stripe authorization object already exists for an event. 38 Raises ConflictException if it already exists. 39 If it doesn't, then uses the StripePaymentManager to get the other credentials from Stripe. 40 :param data: 41 :param view_kwargs: 42 :return: 43 """ 44 try: 45 self.session.query(StripeAuthorization).filter_by(event_id=data['event'], deleted_at=None).one() 46 except NoResultFound: 47 credentials = StripePaymentsManager\ 48 .get_event_organizer_credentials_from_stripe(data['stripe_auth_code']) 49 if 'error' in credentials: 50 raise UnprocessableEntity({'pointer': '/data/stripe_auth_code'}, credentials['error_description']) 51 data['stripe_secret_key'] = credentials['access_token'] 52 data['stripe_refresh_token'] = credentials['refresh_token'] 53 data['stripe_publishable_key'] = credentials['stripe_publishable_key'] 54 data['stripe_user_id'] = credentials['stripe_user_id'] 55 else: 56 raise ConflictException({'pointer': '/data/relationships/event'}, 57 "Stripe Authorization already exists for this event") 58 59 def after_create_object(self, stripe_authorization, data, view_kwargs): 60 """ 61 after create object method for StripeAuthorizationListPost Class 62 :param stripe_authorization: Stripe authorization created from mashmallow_jsonapi 63 :param data: 64 :param view_kwargs: 65 :return: 66 """ 67 event = db.session.query(Event).filter_by(id=int(data['event'])) 68 event.is_stripe_linked = True 69 save_to_db(event) 70 71 schema = StripeAuthorizationSchema 72 decorators = (jwt_required, ) 73 methods = ['POST'] 74 data_layer = {'session': db.session, 75 'model': StripeAuthorization, 76 'methods': { 77 'before_create_object': before_create_object, 78 'after_create_object': after_create_object 79 }} 80 81 82 class StripeAuthorizationDetail(ResourceDetail): 83 """ 84 Stripe Authorization Detail Resource by ID 85 """ 86 def before_get_object(self, view_kwargs): 87 """ 88 method to get id of stripe authorization related to an event 89 :param view_kwargs: 90 :return: 91 """ 92 if view_kwargs.get('event_identifier'): 93 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier') 94 view_kwargs['event_id'] = event.id 95 96 if view_kwargs.get('event_id'): 97 stripe_authorization = \ 98 safe_query(self, StripeAuthorization, 'event_id', view_kwargs['event_id'], 'event_id') 99 view_kwargs['id'] = stripe_authorization.id 100 101 def after_delete_object(self, stripe_authorization, view_kwargs): 102 """Make work after delete object 103 :param stripe_authorization: stripe authorization. 104 :param dict view_kwargs: kwargs from the resource view 105 """ 106 event = stripe_authorization.event 107 event.is_stripe_linked = False 108 save_to_db(event) 109 110 decorators = (api.has_permission('is_coorganizer', fetch="event_id", 111 fetch_as="event_id", model=StripeAuthorization),) 112 schema = StripeAuthorizationSchema 113 data_layer = {'session': db.session, 114 'model': StripeAuthorization, 115 'methods': { 116 'before_get_object': before_get_object, 117 'after_delete_object': after_delete_object 118 }} 119 120 121 class StripeAuthorizationRelationship(ResourceDetail): 122 """ 123 Stripe Authorization Relationship 124 """ 125 126 decorators = (api.has_permission('is_coorganizer', fetch="event_id", 127 fetch_as="event_id", model=StripeAuthorization),) 128 schema = StripeAuthorizationSchema 129 data_layer = {'session': db.session, 130 'model': StripeAuthorization} 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/stripe_authorization.py b/app/api/stripe_authorization.py --- a/app/api/stripe_authorization.py +++ b/app/api/stripe_authorization.py @@ -64,7 +64,7 @@ :param view_kwargs: :return: """ - event = db.session.query(Event).filter_by(id=int(data['event'])) + event = db.session.query(Event).filter_by(id=int(data['event'])).one() event.is_stripe_linked = True save_to_db(event)
{"golden_diff": "diff --git a/app/api/stripe_authorization.py b/app/api/stripe_authorization.py\n--- a/app/api/stripe_authorization.py\n+++ b/app/api/stripe_authorization.py\n@@ -64,7 +64,7 @@\n :param view_kwargs:\n :return:\n \"\"\"\n- event = db.session.query(Event).filter_by(id=int(data['event']))\n+ event = db.session.query(Event).filter_by(id=int(data['event'])).one()\n event.is_stripe_linked = True\n save_to_db(event)\n", "issue": "is_stripe_linked property does not set on connecting stripe account\n**Describe the bug**\r\nis_stripe_linked property does not set on connecting stripe account. It throws error.\r\n\r\n**Expected behavior**\r\nProperty should set appropriately.\r\n\r\n**Error**\r\n```\r\nClass 'sqlalchemy.orm.query.Query' is not mapped Traceback (most recent call last):\r\n File \"/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/session.py\", line 1722, in add\r\n state = attributes.instance_state(instance)\r\nAttributeError: 'Query' object has no attribute '_sa_instance_state'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/rs/Pradeep/github/open-event-server/app/api/helpers/db.py\", line 22, in save_to_db\r\n db.session.add(item)\r\n File \"/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py\", line 157, in do\r\n return getattr(self.registry(), name)(*args, **kwargs)\r\n File \"/home/rs/Pradeep/github/open-event-server/env/lib/python3.5/site-packages/sqlalchemy/orm/session.py\", line 1724, in add\r\n raise exc.UnmappedInstanceError(instance)\r\nsqlalchemy.orm.exc.UnmappedInstanceError: Class 'sqlalchemy.orm.query.Query' is not mapped\r\nERROR:root:DB Exception! Class 'sqlalchemy.orm.query.Query' is not mapped\r\n```\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count, save_to_db\nfrom app.api.helpers.exceptions import ForbiddenException, ConflictException, UnprocessableEntity\nfrom app.api.helpers.payment import StripePaymentsManager\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.stripe_authorization import StripeAuthorizationSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.stripe_authorization import StripeAuthorization\n\n\nclass StripeAuthorizationListPost(ResourceList):\n \"\"\"\n List and Create Stripe Authorization\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_organizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, \"Minimum Organizer access required\")\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), can_pay_by_stripe=False)) > 0:\n raise ForbiddenException({'pointer': ''}, \"Stripe payment is disabled for this Event\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if stripe authorization object already exists for an event.\n Raises ConflictException if it already exists.\n If it doesn't, then uses the StripePaymentManager to get the other credentials from Stripe.\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n try:\n self.session.query(StripeAuthorization).filter_by(event_id=data['event'], deleted_at=None).one()\n except NoResultFound:\n credentials = StripePaymentsManager\\\n .get_event_organizer_credentials_from_stripe(data['stripe_auth_code'])\n if 'error' in credentials:\n raise UnprocessableEntity({'pointer': '/data/stripe_auth_code'}, credentials['error_description'])\n data['stripe_secret_key'] = credentials['access_token']\n data['stripe_refresh_token'] = credentials['refresh_token']\n data['stripe_publishable_key'] = credentials['stripe_publishable_key']\n data['stripe_user_id'] = credentials['stripe_user_id']\n else:\n raise ConflictException({'pointer': '/data/relationships/event'},\n \"Stripe Authorization already exists for this event\")\n\n def after_create_object(self, stripe_authorization, data, view_kwargs):\n \"\"\"\n after create object method for StripeAuthorizationListPost Class\n :param stripe_authorization: Stripe authorization created from mashmallow_jsonapi\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n event = db.session.query(Event).filter_by(id=int(data['event']))\n event.is_stripe_linked = True\n save_to_db(event)\n\n schema = StripeAuthorizationSchema\n decorators = (jwt_required, )\n methods = ['POST']\n data_layer = {'session': db.session,\n 'model': StripeAuthorization,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass StripeAuthorizationDetail(ResourceDetail):\n \"\"\"\n Stripe Authorization Detail Resource by ID\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n method to get id of stripe authorization related to an event\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n view_kwargs['event_id'] = event.id\n\n if view_kwargs.get('event_id'):\n stripe_authorization = \\\n safe_query(self, StripeAuthorization, 'event_id', view_kwargs['event_id'], 'event_id')\n view_kwargs['id'] = stripe_authorization.id\n\n def after_delete_object(self, stripe_authorization, view_kwargs):\n \"\"\"Make work after delete object\n :param stripe_authorization: stripe authorization.\n :param dict view_kwargs: kwargs from the resource view\n \"\"\"\n event = stripe_authorization.event\n event.is_stripe_linked = False\n save_to_db(event)\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\",\n fetch_as=\"event_id\", model=StripeAuthorization),)\n schema = StripeAuthorizationSchema\n data_layer = {'session': db.session,\n 'model': StripeAuthorization,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_delete_object': after_delete_object\n }}\n\n\nclass StripeAuthorizationRelationship(ResourceDetail):\n \"\"\"\n Stripe Authorization Relationship\n \"\"\"\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\",\n fetch_as=\"event_id\", model=StripeAuthorization),)\n schema = StripeAuthorizationSchema\n data_layer = {'session': db.session,\n 'model': StripeAuthorization}\n", "path": "app/api/stripe_authorization.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count, save_to_db\nfrom app.api.helpers.exceptions import ForbiddenException, ConflictException, UnprocessableEntity\nfrom app.api.helpers.payment import StripePaymentsManager\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.stripe_authorization import StripeAuthorizationSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.stripe_authorization import StripeAuthorization\n\n\nclass StripeAuthorizationListPost(ResourceList):\n \"\"\"\n List and Create Stripe Authorization\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_organizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, \"Minimum Organizer access required\")\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), can_pay_by_stripe=False)) > 0:\n raise ForbiddenException({'pointer': ''}, \"Stripe payment is disabled for this Event\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if stripe authorization object already exists for an event.\n Raises ConflictException if it already exists.\n If it doesn't, then uses the StripePaymentManager to get the other credentials from Stripe.\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n try:\n self.session.query(StripeAuthorization).filter_by(event_id=data['event'], deleted_at=None).one()\n except NoResultFound:\n credentials = StripePaymentsManager\\\n .get_event_organizer_credentials_from_stripe(data['stripe_auth_code'])\n if 'error' in credentials:\n raise UnprocessableEntity({'pointer': '/data/stripe_auth_code'}, credentials['error_description'])\n data['stripe_secret_key'] = credentials['access_token']\n data['stripe_refresh_token'] = credentials['refresh_token']\n data['stripe_publishable_key'] = credentials['stripe_publishable_key']\n data['stripe_user_id'] = credentials['stripe_user_id']\n else:\n raise ConflictException({'pointer': '/data/relationships/event'},\n \"Stripe Authorization already exists for this event\")\n\n def after_create_object(self, stripe_authorization, data, view_kwargs):\n \"\"\"\n after create object method for StripeAuthorizationListPost Class\n :param stripe_authorization: Stripe authorization created from mashmallow_jsonapi\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n event = db.session.query(Event).filter_by(id=int(data['event'])).one()\n event.is_stripe_linked = True\n save_to_db(event)\n\n schema = StripeAuthorizationSchema\n decorators = (jwt_required, )\n methods = ['POST']\n data_layer = {'session': db.session,\n 'model': StripeAuthorization,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass StripeAuthorizationDetail(ResourceDetail):\n \"\"\"\n Stripe Authorization Detail Resource by ID\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n method to get id of stripe authorization related to an event\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n view_kwargs['event_id'] = event.id\n\n if view_kwargs.get('event_id'):\n stripe_authorization = \\\n safe_query(self, StripeAuthorization, 'event_id', view_kwargs['event_id'], 'event_id')\n view_kwargs['id'] = stripe_authorization.id\n\n def after_delete_object(self, stripe_authorization, view_kwargs):\n \"\"\"Make work after delete object\n :param stripe_authorization: stripe authorization.\n :param dict view_kwargs: kwargs from the resource view\n \"\"\"\n event = stripe_authorization.event\n event.is_stripe_linked = False\n save_to_db(event)\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\",\n fetch_as=\"event_id\", model=StripeAuthorization),)\n schema = StripeAuthorizationSchema\n data_layer = {'session': db.session,\n 'model': StripeAuthorization,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_delete_object': after_delete_object\n }}\n\n\nclass StripeAuthorizationRelationship(ResourceDetail):\n \"\"\"\n Stripe Authorization Relationship\n \"\"\"\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\",\n fetch_as=\"event_id\", model=StripeAuthorization),)\n schema = StripeAuthorizationSchema\n data_layer = {'session': db.session,\n 'model': StripeAuthorization}\n", "path": "app/api/stripe_authorization.py"}]}
1,939
116
gh_patches_debug_1408
rasdani/github-patches
git_diff
cupy__cupy-3570
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cupy.percentile only calculates integer percentiles when the input data is an integer. This seems to be caused by a cast of the percentiles array `q` to the same type as the input array `a` in the cupy.percentile source : https://github.com/cupy/cupy/blob/adfcc44bc9a17886a340cd85b7c9ebadd94b38a1/cupy/statistics/order.py#L189 Example code to reproduce the issue: `cupy.percentile(cupy.arange(1001).astype(cupy.int16),[98, 99, 99.9, 100]).get()` `array([ 980., 990., 990., 1000.])` `cupy.percentile(cupy.arange(1001).astype(cupy.float16),[98, 99, 99.9, 100]).get()` `array([ 980., 990., 999., 1000.])` For comparison the numpy version always calculates correctly: `numpy.percentile(numpy.arange(1001).astype(numpy.int16),[98, 99, 99.9, 100])` `array([ 980., 990., 999., 1000.])` Cupy configuration: CuPy Version : 7.6.0 CUDA Root : C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2 CUDA Build Version : 10020 CUDA Driver Version : 10020 CUDA Runtime Version : 10020 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/statistics/order.py` Content: ``` 1 import warnings 2 3 import cupy 4 from cupy import core 5 from cupy.core import _routines_statistics as _statistics 6 from cupy.core import _fusion_thread_local 7 from cupy.logic import content 8 9 10 def amin(a, axis=None, out=None, keepdims=False): 11 """Returns the minimum of an array or the minimum along an axis. 12 13 .. note:: 14 15 When at least one element is NaN, the corresponding min value will be 16 NaN. 17 18 Args: 19 a (cupy.ndarray): Array to take the minimum. 20 axis (int): Along which axis to take the minimum. The flattened array 21 is used by default. 22 out (cupy.ndarray): Output array. 23 keepdims (bool): If ``True``, the axis is remained as an axis of 24 size one. 25 26 Returns: 27 cupy.ndarray: The minimum of ``a``, along the axis if specified. 28 29 .. seealso:: :func:`numpy.amin` 30 31 """ 32 if _fusion_thread_local.is_fusing(): 33 if keepdims: 34 raise NotImplementedError( 35 'cupy.amin does not support `keepdims` in fusion yet.') 36 return _fusion_thread_local.call_reduction( 37 _statistics.amin, a, axis=axis, out=out) 38 39 # TODO(okuta): check type 40 return a.min(axis=axis, out=out, keepdims=keepdims) 41 42 43 def amax(a, axis=None, out=None, keepdims=False): 44 """Returns the maximum of an array or the maximum along an axis. 45 46 .. note:: 47 48 When at least one element is NaN, the corresponding min value will be 49 NaN. 50 51 Args: 52 a (cupy.ndarray): Array to take the maximum. 53 axis (int): Along which axis to take the maximum. The flattened array 54 is used by default. 55 out (cupy.ndarray): Output array. 56 keepdims (bool): If ``True``, the axis is remained as an axis of 57 size one. 58 59 Returns: 60 cupy.ndarray: The maximum of ``a``, along the axis if specified. 61 62 .. seealso:: :func:`numpy.amax` 63 64 """ 65 if _fusion_thread_local.is_fusing(): 66 if keepdims: 67 raise NotImplementedError( 68 'cupy.amax does not support `keepdims` in fusion yet.') 69 return _fusion_thread_local.call_reduction( 70 _statistics.amax, a, axis=axis, out=out) 71 72 # TODO(okuta): check type 73 return a.max(axis=axis, out=out, keepdims=keepdims) 74 75 76 def nanmin(a, axis=None, out=None, keepdims=False): 77 """Returns the minimum of an array along an axis ignoring NaN. 78 79 When there is a slice whose elements are all NaN, a :class:`RuntimeWarning` 80 is raised and NaN is returned. 81 82 Args: 83 a (cupy.ndarray): Array to take the minimum. 84 axis (int): Along which axis to take the minimum. The flattened array 85 is used by default. 86 out (cupy.ndarray): Output array. 87 keepdims (bool): If ``True``, the axis is remained as an axis of 88 size one. 89 90 Returns: 91 cupy.ndarray: The minimum of ``a``, along the axis if specified. 92 93 .. warning:: 94 95 This function may synchronize the device. 96 97 .. seealso:: :func:`numpy.nanmin` 98 99 """ 100 # TODO(niboshi): Avoid synchronization. 101 res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims) 102 if content.isnan(res).any(): # synchronize! 103 warnings.warn('All-NaN slice encountered', RuntimeWarning) 104 return res 105 106 107 def nanmax(a, axis=None, out=None, keepdims=False): 108 """Returns the maximum of an array along an axis ignoring NaN. 109 110 When there is a slice whose elements are all NaN, a :class:`RuntimeWarning` 111 is raised and NaN is returned. 112 113 Args: 114 a (cupy.ndarray): Array to take the maximum. 115 axis (int): Along which axis to take the maximum. The flattened array 116 is used by default. 117 out (cupy.ndarray): Output array. 118 keepdims (bool): If ``True``, the axis is remained as an axis of 119 size one. 120 121 Returns: 122 cupy.ndarray: The maximum of ``a``, along the axis if specified. 123 124 .. warning:: 125 126 This function may synchronize the device. 127 128 .. seealso:: :func:`numpy.nanmax` 129 130 """ 131 # TODO(niboshi): Avoid synchronization. 132 res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims) 133 if content.isnan(res).any(): # synchronize! 134 warnings.warn('All-NaN slice encountered', RuntimeWarning) 135 return res 136 137 138 def ptp(a, axis=None, out=None, keepdims=False): 139 """Returns the range of values (maximum - minimum) along an axis. 140 141 .. note:: 142 143 The name of the function comes from the acronym for 'peak to peak'. 144 145 When at least one element is NaN, the corresponding ptp value will be 146 NaN. 147 148 Args: 149 a (cupy.ndarray): Array over which to take the range. 150 axis (int): Axis along which to take the minimum. The flattened 151 array is used by default. 152 out (cupy.ndarray): Output array. 153 keepdims (bool): If ``True``, the axis is retained as an axis of 154 size one. 155 156 Returns: 157 cupy.ndarray: The minimum of ``a``, along the axis if specified. 158 159 .. seealso:: :func:`numpy.amin` 160 161 """ 162 return a.ptp(axis=axis, out=out, keepdims=keepdims) 163 164 165 def percentile(a, q, axis=None, out=None, interpolation='linear', 166 keepdims=False): 167 """Computes the q-th percentile of the data along the specified axis. 168 169 Args: 170 a (cupy.ndarray): Array for which to compute percentiles. 171 q (float, tuple of floats or cupy.ndarray): Percentiles to compute 172 in the range between 0 and 100 inclusive. 173 axis (int or tuple of ints): Along which axis or axes to compute the 174 percentiles. The flattened array is used by default. 175 out (cupy.ndarray): Output array. 176 interpolation (str): Interpolation method when a quantile lies between 177 two data points. ``linear`` interpolation is used by default. 178 Supported interpolations are``lower``, ``higher``, ``midpoint``, 179 ``nearest`` and ``linear``. 180 keepdims (bool): If ``True``, the axis is remained as an axis of 181 size one. 182 183 Returns: 184 cupy.ndarray: The percentiles of ``a``, along the axis if specified. 185 186 .. seealso:: :func:`numpy.percentile` 187 188 """ 189 q = cupy.asarray(q, dtype=a.dtype) 190 if q.ndim == 0: 191 q = q[None] 192 zerod = True 193 else: 194 zerod = False 195 if q.ndim > 1: 196 raise ValueError('Expected q to have a dimension of 1.\n' 197 'Actual: {0} != 1'.format(q.ndim)) 198 199 if keepdims: 200 if axis is None: 201 keepdim = (1,) * a.ndim 202 else: 203 keepdim = list(a.shape) 204 for ax in axis: 205 keepdim[ax % a.ndim] = 1 206 keepdim = tuple(keepdim) 207 208 # Copy a since we need it sorted but without modifying the original array 209 if isinstance(axis, int): 210 axis = axis, 211 if axis is None: 212 ap = a.flatten() 213 nkeep = 0 214 else: 215 # Reduce axes from a and put them last 216 axis = tuple(ax % a.ndim for ax in axis) 217 keep = set(range(a.ndim)) - set(axis) 218 nkeep = len(keep) 219 for i, s in enumerate(sorted(keep)): 220 a = a.swapaxes(i, s) 221 ap = a.reshape(a.shape[:nkeep] + (-1,)).copy() 222 223 axis = -1 224 ap.sort(axis=axis) 225 Nx = ap.shape[axis] 226 indices = q * 0.01 * (Nx - 1.) # percents to decimals 227 228 if interpolation == 'lower': 229 indices = cupy.floor(indices).astype(cupy.int32) 230 elif interpolation == 'higher': 231 indices = cupy.ceil(indices).astype(cupy.int32) 232 elif interpolation == 'midpoint': 233 indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices)) 234 elif interpolation == 'nearest': 235 # TODO(hvy): Implement nearest using around 236 raise ValueError('\'nearest\' interpolation is not yet supported. ' 237 'Please use any other interpolation method.') 238 elif interpolation == 'linear': 239 pass 240 else: 241 raise ValueError('Unexpected interpolation method.\n' 242 'Actual: \'{0}\' not in (\'linear\', \'lower\', ' 243 '\'higher\', \'midpoint\')'.format(interpolation)) 244 245 if indices.dtype == cupy.int32: 246 ret = cupy.rollaxis(ap, axis) 247 ret = ret.take(indices, axis=0, out=out) 248 else: 249 if out is None: 250 ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64) 251 else: 252 ret = cupy.rollaxis(out, 0, out.ndim) 253 254 cupy.ElementwiseKernel( 255 'S idx, raw T a, raw int32 offset', 'U ret', 256 ''' 257 ptrdiff_t idx_below = floor(idx); 258 U weight_above = idx - idx_below; 259 260 ptrdiff_t offset_i = _ind.get()[0] * offset; 261 ret = a[offset_i + idx_below] * (1.0 - weight_above) 262 + a[offset_i + idx_below + 1] * weight_above; 263 ''', 264 'percentile_weightnening' 265 )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret) 266 ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis 267 268 if zerod: 269 ret = ret.squeeze(0) 270 if keepdims: 271 if q.size > 1: 272 keepdim = (-1,) + keepdim 273 ret = ret.reshape(keepdim) 274 275 return core._internal_ascontiguousarray(ret) 276 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/statistics/order.py b/cupy/statistics/order.py --- a/cupy/statistics/order.py +++ b/cupy/statistics/order.py @@ -186,7 +186,8 @@ .. seealso:: :func:`numpy.percentile` """ - q = cupy.asarray(q, dtype=a.dtype) + if not isinstance(q, cupy.ndarray): + q = cupy.asarray(q, dtype='d') if q.ndim == 0: q = q[None] zerod = True
{"golden_diff": "diff --git a/cupy/statistics/order.py b/cupy/statistics/order.py\n--- a/cupy/statistics/order.py\n+++ b/cupy/statistics/order.py\n@@ -186,7 +186,8 @@\n .. seealso:: :func:`numpy.percentile`\n \n \"\"\"\n- q = cupy.asarray(q, dtype=a.dtype)\n+ if not isinstance(q, cupy.ndarray):\n+ q = cupy.asarray(q, dtype='d')\n if q.ndim == 0:\n q = q[None]\n zerod = True\n", "issue": "cupy.percentile only calculates integer percentiles when the input data is an integer.\nThis seems to be caused by a cast of the percentiles array `q` to the same type as the input array `a` in the cupy.percentile source :\r\n\r\nhttps://github.com/cupy/cupy/blob/adfcc44bc9a17886a340cd85b7c9ebadd94b38a1/cupy/statistics/order.py#L189\r\n\r\nExample code to reproduce the issue:\r\n\r\n`cupy.percentile(cupy.arange(1001).astype(cupy.int16),[98, 99, 99.9, 100]).get()`\r\n`array([ 980., 990., 990., 1000.])`\r\n\r\n`cupy.percentile(cupy.arange(1001).astype(cupy.float16),[98, 99, 99.9, 100]).get()`\r\n`array([ 980., 990., 999., 1000.])`\r\n\r\nFor comparison the numpy version always calculates correctly:\r\n\r\n`numpy.percentile(numpy.arange(1001).astype(numpy.int16),[98, 99, 99.9, 100])`\r\n`array([ 980., 990., 999., 1000.])`\r\n\r\nCupy configuration:\r\nCuPy Version : 7.6.0\r\nCUDA Root : C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\r\nCUDA Build Version : 10020\r\nCUDA Driver Version : 10020\r\nCUDA Runtime Version : 10020\r\n\n", "before_files": [{"content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import _fusion_thread_local\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amin, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amax, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef ptp(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the range of values (maximum - minimum) along an axis.\n\n .. note::\n\n The name of the function comes from the acronym for 'peak to peak'.\n\n When at least one element is NaN, the corresponding ptp value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array over which to take the range.\n axis (int): Axis along which to take the minimum. The flattened\n array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is retained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n return a.ptp(axis=axis, out=out, keepdims=keepdims)\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n q = cupy.asarray(q, dtype=a.dtype)\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return core._internal_ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py"}], "after_files": [{"content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import _fusion_thread_local\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amin, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amax, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef ptp(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the range of values (maximum - minimum) along an axis.\n\n .. note::\n\n The name of the function comes from the acronym for 'peak to peak'.\n\n When at least one element is NaN, the corresponding ptp value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array over which to take the range.\n axis (int): Axis along which to take the minimum. The flattened\n array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is retained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n return a.ptp(axis=axis, out=out, keepdims=keepdims)\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n if not isinstance(q, cupy.ndarray):\n q = cupy.asarray(q, dtype='d')\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return core._internal_ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py"}]}
3,671
123
gh_patches_debug_13801
rasdani/github-patches
git_diff
conan-io__conan-center-index-19732
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] sail/0.9.0-rc2: PR broke the package ### Description I've just noticed that the V2 version of sail suddenly disappeared from the center. This was caused by https://github.com/conan-io/conan-center-index/pull/18454 Please please please don't merge pull requests that break packages! Could you please also revert the PR? I have no access to the computer right now. ### Package and Environment Details All envs ### Conan profile All profiles ### Steps to reproduce No steps ### Logs No logs --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/sail/all/conanfile.py` Content: ``` 1 from conan import ConanFile 2 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout 3 from conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, rename, rmdir 4 from conan.tools.microsoft import is_msvc 5 import os 6 7 required_conan_version = ">=1.53.0" 8 9 class SAILConan(ConanFile): 10 name = "sail" 11 description = "The missing small and fast image decoding library for humans (not for machines)" 12 url = "https://github.com/conan-io/conan-center-index" 13 homepage = "https://sail.software" 14 topics = ( "image", "encoding", "decoding", "graphics" ) 15 license = "MIT" 16 settings = "os", "arch", "compiler", "build_type" 17 options = { 18 "shared": [True, False], 19 "fPIC": [True, False], 20 "thread_safe": [True, False], 21 "with_avif": [True, False], 22 "with_gif": [True, False], 23 "with_jpeg2000": [True, False], 24 "with_jpeg": ["libjpeg", "libjpeg-turbo", False], 25 "with_png": [True, False], 26 "with_tiff": [True, False], 27 "with_webp": [True, False], 28 } 29 default_options = { 30 "shared": False, 31 "fPIC": True, 32 "thread_safe": True, 33 "with_avif": True, 34 "with_gif": True, 35 "with_jpeg2000": True, 36 "with_jpeg": "libjpeg", 37 "with_png": True, 38 "with_tiff": True, 39 "with_webp": True, 40 } 41 42 def export_sources(self): 43 export_conandata_patches(self) 44 45 def config_options(self): 46 if self.settings.os == "Windows": 47 self.options.rm_safe("fPIC") 48 49 def configure(self): 50 if self.options.shared: 51 self.options.rm_safe("fPIC") 52 53 def requirements(self): 54 if self.options.with_avif: 55 self.requires("libavif/0.11.1") 56 if self.options.with_gif: 57 self.requires("giflib/5.2.1") 58 if self.options.with_jpeg2000: 59 self.requires("jasper/4.0.0") 60 if self.options.with_jpeg == "libjpeg-turbo": 61 self.requires("libjpeg-turbo/2.1.5") 62 elif self.options.with_jpeg == "libjpeg": 63 self.requires("libjpeg/9e") 64 if self.options.with_png: 65 self.requires("libpng/1.6.40") 66 if self.options.with_tiff: 67 self.requires("libtiff/4.5.1") 68 if self.options.with_webp: 69 self.requires("libwebp/1.3.1") 70 71 def layout(self): 72 cmake_layout(self, src_folder="src") 73 74 def source(self): 75 get(self, **self.conan_data["sources"][self.version], 76 strip_root=True, destination=self.source_folder) 77 78 def generate(self): 79 enable_codecs = [] 80 81 if self.options.with_avif: 82 enable_codecs.append("avif") 83 if self.options.with_gif: 84 enable_codecs.append("gif") 85 if self.options.with_jpeg2000: 86 enable_codecs.append("jpeg2000") 87 if self.options.with_jpeg: 88 enable_codecs.append("jpeg") 89 if self.options.with_png: 90 enable_codecs.append("png") 91 if self.options.with_tiff: 92 enable_codecs.append("tiff") 93 if self.options.with_webp: 94 enable_codecs.append("webp") 95 96 tc = CMakeToolchain(self) 97 tc.variables["SAIL_BUILD_APPS"] = False 98 tc.variables["SAIL_BUILD_EXAMPLES"] = False 99 tc.variables["SAIL_BUILD_TESTS"] = False 100 tc.variables["SAIL_COMBINE_CODECS"] = True 101 tc.variables["SAIL_ENABLE_CODECS"] = ";".join(enable_codecs) 102 tc.variables["SAIL_INSTALL_PDB"] = False 103 tc.variables["SAIL_THREAD_SAFE"] = self.options.thread_safe 104 # TODO: Remove after fixing https://github.com/conan-io/conan/issues/12012 105 if is_msvc(self): 106 tc.cache_variables["CMAKE_TRY_COMPILE_CONFIGURATION"] = str(self.settings.build_type) 107 # TODO: Remove after fixing https://github.com/conan-io/conan-center-index/issues/13159 108 # C3I workaround to force CMake to choose the highest version of 109 # the windows SDK available in the system 110 if is_msvc(self) and not self.conf.get("tools.cmake.cmaketoolchain:system_version"): 111 tc.variables["CMAKE_SYSTEM_VERSION"] = "10.0" 112 tc.generate() 113 114 deps = CMakeDeps(self) 115 deps.generate() 116 117 def build(self): 118 apply_conandata_patches(self) 119 120 cmake = CMake(self) 121 cmake.configure() 122 cmake.build() 123 124 def package(self): 125 copy(self, "LICENSE.txt", self.source_folder, os.path.join(self.package_folder, "licenses")) 126 copy(self, "LICENSE.INIH.txt", self.source_folder, os.path.join(self.package_folder, "licenses")) 127 copy(self, "LICENSE.MUNIT.txt", self.source_folder, os.path.join(self.package_folder, "licenses")) 128 129 cmake = CMake(self) 130 cmake.install() 131 132 # Remove CMake and pkg-config rules 133 rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) 134 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) 135 # Move icons 136 rename(self, os.path.join(self.package_folder, "share"), 137 os.path.join(self.package_folder, "res")) 138 139 def package_info(self): 140 self.cpp_info.set_property("cmake_file_name", "Sail") 141 142 self.cpp_info.filenames["cmake_find_package"] = "Sail" 143 self.cpp_info.filenames["cmake_find_package_multi"] = "Sail" 144 self.cpp_info.names["cmake_find_package"] = "SAIL" 145 self.cpp_info.names["cmake_find_package_multi"] = "SAIL" 146 147 self.cpp_info.components["sail-common"].set_property("cmake_target_name", "SAIL::SailCommon") 148 self.cpp_info.components["sail-common"].set_property("pkg_config_name", "libsail-common") 149 self.cpp_info.components["sail-common"].names["cmake_find_package"] = "SailCommon" 150 self.cpp_info.components["sail-common"].names["cmake_find_package_multi"] = "SailCommon" 151 self.cpp_info.components["sail-common"].includedirs = ["include/sail"] 152 self.cpp_info.components["sail-common"].libs = ["sail-common"] 153 154 self.cpp_info.components["sail-codecs"].set_property("cmake_target_name", "SAIL::SailCodecs") 155 self.cpp_info.components["sail-codecs"].names["cmake_find_package"] = "SailCodecs" 156 self.cpp_info.components["sail-codecs"].names["cmake_find_package_multi"] = "SailCodecs" 157 self.cpp_info.components["sail-codecs"].libs = ["sail-codecs"] 158 self.cpp_info.components["sail-codecs"].requires = ["sail-common"] 159 if self.options.with_avif: 160 self.cpp_info.components["sail-codecs"].requires.append("libavif::libavif") 161 if self.options.with_gif: 162 self.cpp_info.components["sail-codecs"].requires.append("giflib::giflib") 163 if self.options.with_jpeg2000: 164 self.cpp_info.components["sail-codecs"].requires.append("jasper::jasper") 165 if self.options.with_jpeg: 166 self.cpp_info.components["sail-codecs"].requires.append("{0}::{0}".format(self.options.with_jpeg)) 167 if self.options.with_png: 168 self.cpp_info.components["sail-codecs"].requires.append("libpng::libpng") 169 if self.options.with_tiff: 170 self.cpp_info.components["sail-codecs"].requires.append("libtiff::libtiff") 171 if self.options.with_webp: 172 self.cpp_info.components["sail-codecs"].requires.append("libwebp::libwebp") 173 174 self.cpp_info.components["libsail"].set_property("cmake_target_name", "SAIL::Sail") 175 self.cpp_info.components["libsail"].set_property("pkg_config_name", "libsail") 176 self.cpp_info.components["libsail"].names["cmake_find_package"] = "Sail" 177 self.cpp_info.components["libsail"].names["cmake_find_package_multi"] = "Sail" 178 self.cpp_info.components["libsail"].libs = ["sail"] 179 if self.settings.os in ["Linux", "FreeBSD"]: 180 self.cpp_info.components["libsail"].system_libs.append("dl") 181 if self.options.thread_safe: 182 self.cpp_info.components["libsail"].system_libs.append("pthread") 183 self.cpp_info.components["libsail"].requires = ["sail-common", "sail-codecs"] 184 185 self.cpp_info.components["sail-manip"].set_property("cmake_target_name", "SAIL::SailManip") 186 self.cpp_info.components["sail-manip"].set_property("pkg_config_name", "libsail-manip") 187 self.cpp_info.components["sail-manip"].names["cmake_find_package"] = "SailManip" 188 self.cpp_info.components["sail-manip"].names["cmake_find_package_multi"] = "SailManip" 189 self.cpp_info.components["sail-manip"].libs = ["sail-manip"] 190 self.cpp_info.components["sail-manip"].requires = ["sail-common"] 191 192 self.cpp_info.components["sail-c++"].set_property("cmake_target_name", "SAIL::SailC++") 193 self.cpp_info.components["sail-c++"].set_property("pkg_config_name", "libsail-c++") 194 self.cpp_info.components["sail-c++"].names["cmake_find_package"] = "SailC++" 195 self.cpp_info.components["sail-c++"].names["cmake_find_package_multi"] = "SailC++" 196 self.cpp_info.components["sail-c++"].libs = ["sail-c++"] 197 self.cpp_info.components["sail-c++"].requires = ["libsail", "sail-manip"] 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/sail/all/conanfile.py b/recipes/sail/all/conanfile.py --- a/recipes/sail/all/conanfile.py +++ b/recipes/sail/all/conanfile.py @@ -104,11 +104,6 @@ # TODO: Remove after fixing https://github.com/conan-io/conan/issues/12012 if is_msvc(self): tc.cache_variables["CMAKE_TRY_COMPILE_CONFIGURATION"] = str(self.settings.build_type) - # TODO: Remove after fixing https://github.com/conan-io/conan-center-index/issues/13159 - # C3I workaround to force CMake to choose the highest version of - # the windows SDK available in the system - if is_msvc(self) and not self.conf.get("tools.cmake.cmaketoolchain:system_version"): - tc.variables["CMAKE_SYSTEM_VERSION"] = "10.0" tc.generate() deps = CMakeDeps(self)
{"golden_diff": "diff --git a/recipes/sail/all/conanfile.py b/recipes/sail/all/conanfile.py\n--- a/recipes/sail/all/conanfile.py\n+++ b/recipes/sail/all/conanfile.py\n@@ -104,11 +104,6 @@\n # TODO: Remove after fixing https://github.com/conan-io/conan/issues/12012\n if is_msvc(self):\n tc.cache_variables[\"CMAKE_TRY_COMPILE_CONFIGURATION\"] = str(self.settings.build_type)\n- # TODO: Remove after fixing https://github.com/conan-io/conan-center-index/issues/13159\n- # C3I workaround to force CMake to choose the highest version of\n- # the windows SDK available in the system\n- if is_msvc(self) and not self.conf.get(\"tools.cmake.cmaketoolchain:system_version\"):\n- tc.variables[\"CMAKE_SYSTEM_VERSION\"] = \"10.0\"\n tc.generate()\n \n deps = CMakeDeps(self)\n", "issue": "[package] sail/0.9.0-rc2: PR broke the package\n### Description\n\nI've just noticed that the V2 version of sail suddenly disappeared from the center. This was caused by https://github.com/conan-io/conan-center-index/pull/18454\r\n\r\nPlease please please don't merge pull requests that break packages! Could you please also revert the PR? I have no access to the computer right now.\n\n### Package and Environment Details\n\nAll envs\n\n### Conan profile\n\nAll profiles\n\n### Steps to reproduce\n\nNo steps\n\n### Logs\n\nNo logs\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, rename, rmdir\nfrom conan.tools.microsoft import is_msvc\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass SAILConan(ConanFile):\n name = \"sail\"\n description = \"The missing small and fast image decoding library for humans (not for machines)\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://sail.software\"\n topics = ( \"image\", \"encoding\", \"decoding\", \"graphics\" )\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"thread_safe\": [True, False],\n \"with_avif\": [True, False],\n \"with_gif\": [True, False],\n \"with_jpeg2000\": [True, False],\n \"with_jpeg\": [\"libjpeg\", \"libjpeg-turbo\", False],\n \"with_png\": [True, False],\n \"with_tiff\": [True, False],\n \"with_webp\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"thread_safe\": True,\n \"with_avif\": True,\n \"with_gif\": True,\n \"with_jpeg2000\": True,\n \"with_jpeg\": \"libjpeg\",\n \"with_png\": True,\n \"with_tiff\": True,\n \"with_webp\": True,\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def requirements(self):\n if self.options.with_avif:\n self.requires(\"libavif/0.11.1\")\n if self.options.with_gif:\n self.requires(\"giflib/5.2.1\")\n if self.options.with_jpeg2000:\n self.requires(\"jasper/4.0.0\")\n if self.options.with_jpeg == \"libjpeg-turbo\":\n self.requires(\"libjpeg-turbo/2.1.5\")\n elif self.options.with_jpeg == \"libjpeg\":\n self.requires(\"libjpeg/9e\")\n if self.options.with_png:\n self.requires(\"libpng/1.6.40\")\n if self.options.with_tiff:\n self.requires(\"libtiff/4.5.1\")\n if self.options.with_webp:\n self.requires(\"libwebp/1.3.1\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self.source_folder)\n\n def generate(self):\n enable_codecs = []\n\n if self.options.with_avif:\n enable_codecs.append(\"avif\")\n if self.options.with_gif:\n enable_codecs.append(\"gif\")\n if self.options.with_jpeg2000:\n enable_codecs.append(\"jpeg2000\")\n if self.options.with_jpeg:\n enable_codecs.append(\"jpeg\")\n if self.options.with_png:\n enable_codecs.append(\"png\")\n if self.options.with_tiff:\n enable_codecs.append(\"tiff\")\n if self.options.with_webp:\n enable_codecs.append(\"webp\")\n\n tc = CMakeToolchain(self)\n tc.variables[\"SAIL_BUILD_APPS\"] = False\n tc.variables[\"SAIL_BUILD_EXAMPLES\"] = False\n tc.variables[\"SAIL_BUILD_TESTS\"] = False\n tc.variables[\"SAIL_COMBINE_CODECS\"] = True\n tc.variables[\"SAIL_ENABLE_CODECS\"] = \";\".join(enable_codecs)\n tc.variables[\"SAIL_INSTALL_PDB\"] = False\n tc.variables[\"SAIL_THREAD_SAFE\"] = self.options.thread_safe\n # TODO: Remove after fixing https://github.com/conan-io/conan/issues/12012\n if is_msvc(self):\n tc.cache_variables[\"CMAKE_TRY_COMPILE_CONFIGURATION\"] = str(self.settings.build_type)\n # TODO: Remove after fixing https://github.com/conan-io/conan-center-index/issues/13159\n # C3I workaround to force CMake to choose the highest version of\n # the windows SDK available in the system\n if is_msvc(self) and not self.conf.get(\"tools.cmake.cmaketoolchain:system_version\"):\n tc.variables[\"CMAKE_SYSTEM_VERSION\"] = \"10.0\"\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n copy(self, \"LICENSE.INIH.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n copy(self, \"LICENSE.MUNIT.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n\n cmake = CMake(self)\n cmake.install()\n\n # Remove CMake and pkg-config rules\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # Move icons\n rename(self, os.path.join(self.package_folder, \"share\"),\n os.path.join(self.package_folder, \"res\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"Sail\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Sail\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Sail\"\n self.cpp_info.names[\"cmake_find_package\"] = \"SAIL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SAIL\"\n\n self.cpp_info.components[\"sail-common\"].set_property(\"cmake_target_name\", \"SAIL::SailCommon\")\n self.cpp_info.components[\"sail-common\"].set_property(\"pkg_config_name\", \"libsail-common\")\n self.cpp_info.components[\"sail-common\"].names[\"cmake_find_package\"] = \"SailCommon\"\n self.cpp_info.components[\"sail-common\"].names[\"cmake_find_package_multi\"] = \"SailCommon\"\n self.cpp_info.components[\"sail-common\"].includedirs = [\"include/sail\"]\n self.cpp_info.components[\"sail-common\"].libs = [\"sail-common\"]\n\n self.cpp_info.components[\"sail-codecs\"].set_property(\"cmake_target_name\", \"SAIL::SailCodecs\")\n self.cpp_info.components[\"sail-codecs\"].names[\"cmake_find_package\"] = \"SailCodecs\"\n self.cpp_info.components[\"sail-codecs\"].names[\"cmake_find_package_multi\"] = \"SailCodecs\"\n self.cpp_info.components[\"sail-codecs\"].libs = [\"sail-codecs\"]\n self.cpp_info.components[\"sail-codecs\"].requires = [\"sail-common\"]\n if self.options.with_avif:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libavif::libavif\")\n if self.options.with_gif:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"giflib::giflib\")\n if self.options.with_jpeg2000:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"jasper::jasper\")\n if self.options.with_jpeg:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"{0}::{0}\".format(self.options.with_jpeg))\n if self.options.with_png:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libpng::libpng\")\n if self.options.with_tiff:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libtiff::libtiff\")\n if self.options.with_webp:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libwebp::libwebp\")\n\n self.cpp_info.components[\"libsail\"].set_property(\"cmake_target_name\", \"SAIL::Sail\")\n self.cpp_info.components[\"libsail\"].set_property(\"pkg_config_name\", \"libsail\")\n self.cpp_info.components[\"libsail\"].names[\"cmake_find_package\"] = \"Sail\"\n self.cpp_info.components[\"libsail\"].names[\"cmake_find_package_multi\"] = \"Sail\"\n self.cpp_info.components[\"libsail\"].libs = [\"sail\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.components[\"libsail\"].system_libs.append(\"dl\")\n if self.options.thread_safe:\n self.cpp_info.components[\"libsail\"].system_libs.append(\"pthread\")\n self.cpp_info.components[\"libsail\"].requires = [\"sail-common\", \"sail-codecs\"]\n\n self.cpp_info.components[\"sail-manip\"].set_property(\"cmake_target_name\", \"SAIL::SailManip\")\n self.cpp_info.components[\"sail-manip\"].set_property(\"pkg_config_name\", \"libsail-manip\")\n self.cpp_info.components[\"sail-manip\"].names[\"cmake_find_package\"] = \"SailManip\"\n self.cpp_info.components[\"sail-manip\"].names[\"cmake_find_package_multi\"] = \"SailManip\"\n self.cpp_info.components[\"sail-manip\"].libs = [\"sail-manip\"]\n self.cpp_info.components[\"sail-manip\"].requires = [\"sail-common\"]\n\n self.cpp_info.components[\"sail-c++\"].set_property(\"cmake_target_name\", \"SAIL::SailC++\")\n self.cpp_info.components[\"sail-c++\"].set_property(\"pkg_config_name\", \"libsail-c++\")\n self.cpp_info.components[\"sail-c++\"].names[\"cmake_find_package\"] = \"SailC++\"\n self.cpp_info.components[\"sail-c++\"].names[\"cmake_find_package_multi\"] = \"SailC++\"\n self.cpp_info.components[\"sail-c++\"].libs = [\"sail-c++\"]\n self.cpp_info.components[\"sail-c++\"].requires = [\"libsail\", \"sail-manip\"]\n", "path": "recipes/sail/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, rename, rmdir\nfrom conan.tools.microsoft import is_msvc\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass SAILConan(ConanFile):\n name = \"sail\"\n description = \"The missing small and fast image decoding library for humans (not for machines)\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://sail.software\"\n topics = ( \"image\", \"encoding\", \"decoding\", \"graphics\" )\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"thread_safe\": [True, False],\n \"with_avif\": [True, False],\n \"with_gif\": [True, False],\n \"with_jpeg2000\": [True, False],\n \"with_jpeg\": [\"libjpeg\", \"libjpeg-turbo\", False],\n \"with_png\": [True, False],\n \"with_tiff\": [True, False],\n \"with_webp\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"thread_safe\": True,\n \"with_avif\": True,\n \"with_gif\": True,\n \"with_jpeg2000\": True,\n \"with_jpeg\": \"libjpeg\",\n \"with_png\": True,\n \"with_tiff\": True,\n \"with_webp\": True,\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def requirements(self):\n if self.options.with_avif:\n self.requires(\"libavif/0.11.1\")\n if self.options.with_gif:\n self.requires(\"giflib/5.2.1\")\n if self.options.with_jpeg2000:\n self.requires(\"jasper/4.0.0\")\n if self.options.with_jpeg == \"libjpeg-turbo\":\n self.requires(\"libjpeg-turbo/2.1.5\")\n elif self.options.with_jpeg == \"libjpeg\":\n self.requires(\"libjpeg/9e\")\n if self.options.with_png:\n self.requires(\"libpng/1.6.40\")\n if self.options.with_tiff:\n self.requires(\"libtiff/4.5.1\")\n if self.options.with_webp:\n self.requires(\"libwebp/1.3.1\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self.source_folder)\n\n def generate(self):\n enable_codecs = []\n\n if self.options.with_avif:\n enable_codecs.append(\"avif\")\n if self.options.with_gif:\n enable_codecs.append(\"gif\")\n if self.options.with_jpeg2000:\n enable_codecs.append(\"jpeg2000\")\n if self.options.with_jpeg:\n enable_codecs.append(\"jpeg\")\n if self.options.with_png:\n enable_codecs.append(\"png\")\n if self.options.with_tiff:\n enable_codecs.append(\"tiff\")\n if self.options.with_webp:\n enable_codecs.append(\"webp\")\n\n tc = CMakeToolchain(self)\n tc.variables[\"SAIL_BUILD_APPS\"] = False\n tc.variables[\"SAIL_BUILD_EXAMPLES\"] = False\n tc.variables[\"SAIL_BUILD_TESTS\"] = False\n tc.variables[\"SAIL_COMBINE_CODECS\"] = True\n tc.variables[\"SAIL_ENABLE_CODECS\"] = \";\".join(enable_codecs)\n tc.variables[\"SAIL_INSTALL_PDB\"] = False\n tc.variables[\"SAIL_THREAD_SAFE\"] = self.options.thread_safe\n # TODO: Remove after fixing https://github.com/conan-io/conan/issues/12012\n if is_msvc(self):\n tc.cache_variables[\"CMAKE_TRY_COMPILE_CONFIGURATION\"] = str(self.settings.build_type)\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n copy(self, \"LICENSE.INIH.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n copy(self, \"LICENSE.MUNIT.txt\", self.source_folder, os.path.join(self.package_folder, \"licenses\"))\n\n cmake = CMake(self)\n cmake.install()\n\n # Remove CMake and pkg-config rules\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # Move icons\n rename(self, os.path.join(self.package_folder, \"share\"),\n os.path.join(self.package_folder, \"res\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"Sail\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Sail\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Sail\"\n self.cpp_info.names[\"cmake_find_package\"] = \"SAIL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SAIL\"\n\n self.cpp_info.components[\"sail-common\"].set_property(\"cmake_target_name\", \"SAIL::SailCommon\")\n self.cpp_info.components[\"sail-common\"].set_property(\"pkg_config_name\", \"libsail-common\")\n self.cpp_info.components[\"sail-common\"].names[\"cmake_find_package\"] = \"SailCommon\"\n self.cpp_info.components[\"sail-common\"].names[\"cmake_find_package_multi\"] = \"SailCommon\"\n self.cpp_info.components[\"sail-common\"].includedirs = [\"include/sail\"]\n self.cpp_info.components[\"sail-common\"].libs = [\"sail-common\"]\n\n self.cpp_info.components[\"sail-codecs\"].set_property(\"cmake_target_name\", \"SAIL::SailCodecs\")\n self.cpp_info.components[\"sail-codecs\"].names[\"cmake_find_package\"] = \"SailCodecs\"\n self.cpp_info.components[\"sail-codecs\"].names[\"cmake_find_package_multi\"] = \"SailCodecs\"\n self.cpp_info.components[\"sail-codecs\"].libs = [\"sail-codecs\"]\n self.cpp_info.components[\"sail-codecs\"].requires = [\"sail-common\"]\n if self.options.with_avif:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libavif::libavif\")\n if self.options.with_gif:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"giflib::giflib\")\n if self.options.with_jpeg2000:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"jasper::jasper\")\n if self.options.with_jpeg:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"{0}::{0}\".format(self.options.with_jpeg))\n if self.options.with_png:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libpng::libpng\")\n if self.options.with_tiff:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libtiff::libtiff\")\n if self.options.with_webp:\n self.cpp_info.components[\"sail-codecs\"].requires.append(\"libwebp::libwebp\")\n\n self.cpp_info.components[\"libsail\"].set_property(\"cmake_target_name\", \"SAIL::Sail\")\n self.cpp_info.components[\"libsail\"].set_property(\"pkg_config_name\", \"libsail\")\n self.cpp_info.components[\"libsail\"].names[\"cmake_find_package\"] = \"Sail\"\n self.cpp_info.components[\"libsail\"].names[\"cmake_find_package_multi\"] = \"Sail\"\n self.cpp_info.components[\"libsail\"].libs = [\"sail\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.components[\"libsail\"].system_libs.append(\"dl\")\n if self.options.thread_safe:\n self.cpp_info.components[\"libsail\"].system_libs.append(\"pthread\")\n self.cpp_info.components[\"libsail\"].requires = [\"sail-common\", \"sail-codecs\"]\n\n self.cpp_info.components[\"sail-manip\"].set_property(\"cmake_target_name\", \"SAIL::SailManip\")\n self.cpp_info.components[\"sail-manip\"].set_property(\"pkg_config_name\", \"libsail-manip\")\n self.cpp_info.components[\"sail-manip\"].names[\"cmake_find_package\"] = \"SailManip\"\n self.cpp_info.components[\"sail-manip\"].names[\"cmake_find_package_multi\"] = \"SailManip\"\n self.cpp_info.components[\"sail-manip\"].libs = [\"sail-manip\"]\n self.cpp_info.components[\"sail-manip\"].requires = [\"sail-common\"]\n\n self.cpp_info.components[\"sail-c++\"].set_property(\"cmake_target_name\", \"SAIL::SailC++\")\n self.cpp_info.components[\"sail-c++\"].set_property(\"pkg_config_name\", \"libsail-c++\")\n self.cpp_info.components[\"sail-c++\"].names[\"cmake_find_package\"] = \"SailC++\"\n self.cpp_info.components[\"sail-c++\"].names[\"cmake_find_package_multi\"] = \"SailC++\"\n self.cpp_info.components[\"sail-c++\"].libs = [\"sail-c++\"]\n self.cpp_info.components[\"sail-c++\"].requires = [\"libsail\", \"sail-manip\"]\n", "path": "recipes/sail/all/conanfile.py"}]}
3,196
225
gh_patches_debug_32168
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-4544
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- azure - event hub resources Add event hub resource & implement firewall filter --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/c7n_azure/c7n_azure/resources/event_hub.py` Content: ``` 1 # Copyright 2019 Microsoft Corporation 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from c7n_azure.provider import resources 16 from c7n_azure.resources.arm import ArmResourceManager 17 18 19 @resources.register('eventhub') 20 class EventHub(ArmResourceManager): 21 """Event Hub Resource 22 23 :example: 24 25 Finds all Event Hub resources in the subscription. 26 27 .. code-block:: yaml 28 29 policies: 30 - name: find-all-eventhubs 31 resource: azure.eventhub 32 33 """ 34 35 class resource_type(ArmResourceManager.resource_type): 36 doc_groups = ['Events'] 37 38 service = 'azure.mgmt.eventhub' 39 client = 'EventHubManagementClient' 40 enum_spec = ('namespaces', 'list', None) 41 default_report_fields = ( 42 'name', 43 'location', 44 'resourceGroup', 45 'sku.name', 46 'properties.isAutoInflateEnabled' 47 ) 48 resource_type = 'Microsoft.EventHub/namespaces' 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/c7n_azure/c7n_azure/resources/event_hub.py b/tools/c7n_azure/c7n_azure/resources/event_hub.py --- a/tools/c7n_azure/c7n_azure/resources/event_hub.py +++ b/tools/c7n_azure/c7n_azure/resources/event_hub.py @@ -12,8 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +from c7n_azure.filters import FirewallRulesFilter from c7n_azure.provider import resources from c7n_azure.resources.arm import ArmResourceManager +from netaddr import IPSet @resources.register('eventhub') @@ -22,13 +26,17 @@ :example: - Finds all Event Hub resources in the subscription. + This policy will find all Event Hubs allowing traffic from 1.2.2.128/25 CIDR. .. code-block:: yaml policies: - - name: find-all-eventhubs - resource: azure.eventhub + - name: find-event-hub-allowing-subnet + resource: azure.eventhub + filters: + - type: firewall-rules + include: + - '1.2.2.128/25' """ @@ -46,3 +54,29 @@ 'properties.isAutoInflateEnabled' ) resource_type = 'Microsoft.EventHub/namespaces' + + [email protected]_registry.register('firewall-rules') +class EventHubFirewallRulesFilter(FirewallRulesFilter): + + def __init__(self, data, manager=None): + super(EventHubFirewallRulesFilter, self).__init__(data, manager) + self._log = logging.getLogger('custodian.azure.eventhub') + self.client = None + + @property + def log(self): + return self._log + + def process(self, resources, event=None): + self.client = self.manager.get_client() + return super(EventHubFirewallRulesFilter, self).process(resources, event) + + def _query_rules(self, resource): + query = self.client.namespaces.get_network_rule_set( + resource['resourceGroup'], + resource['name']) + + resource_rules = IPSet([r.ip_mask for r in query.ip_rules]) + + return resource_rules
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/event_hub.py b/tools/c7n_azure/c7n_azure/resources/event_hub.py\n--- a/tools/c7n_azure/c7n_azure/resources/event_hub.py\n+++ b/tools/c7n_azure/c7n_azure/resources/event_hub.py\n@@ -12,8 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import logging\n+\n+from c7n_azure.filters import FirewallRulesFilter\n from c7n_azure.provider import resources\n from c7n_azure.resources.arm import ArmResourceManager\n+from netaddr import IPSet\n \n \n @resources.register('eventhub')\n@@ -22,13 +26,17 @@\n \n :example:\n \n- Finds all Event Hub resources in the subscription.\n+ This policy will find all Event Hubs allowing traffic from 1.2.2.128/25 CIDR.\n \n .. code-block:: yaml\n \n policies:\n- - name: find-all-eventhubs\n- resource: azure.eventhub\n+ - name: find-event-hub-allowing-subnet\n+ resource: azure.eventhub\n+ filters:\n+ - type: firewall-rules\n+ include:\n+ - '1.2.2.128/25'\n \n \"\"\"\n \n@@ -46,3 +54,29 @@\n 'properties.isAutoInflateEnabled'\n )\n resource_type = 'Microsoft.EventHub/namespaces'\n+\n+\[email protected]_registry.register('firewall-rules')\n+class EventHubFirewallRulesFilter(FirewallRulesFilter):\n+\n+ def __init__(self, data, manager=None):\n+ super(EventHubFirewallRulesFilter, self).__init__(data, manager)\n+ self._log = logging.getLogger('custodian.azure.eventhub')\n+ self.client = None\n+\n+ @property\n+ def log(self):\n+ return self._log\n+\n+ def process(self, resources, event=None):\n+ self.client = self.manager.get_client()\n+ return super(EventHubFirewallRulesFilter, self).process(resources, event)\n+\n+ def _query_rules(self, resource):\n+ query = self.client.namespaces.get_network_rule_set(\n+ resource['resourceGroup'],\n+ resource['name'])\n+\n+ resource_rules = IPSet([r.ip_mask for r in query.ip_rules])\n+\n+ return resource_rules\n", "issue": "azure - event hub resources\nAdd event hub resource & implement firewall filter\n", "before_files": [{"content": "# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\n\n\[email protected]('eventhub')\nclass EventHub(ArmResourceManager):\n \"\"\"Event Hub Resource\n\n :example:\n\n Finds all Event Hub resources in the subscription.\n\n .. code-block:: yaml\n\n policies:\n - name: find-all-eventhubs\n resource: azure.eventhub\n\n \"\"\"\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Events']\n\n service = 'azure.mgmt.eventhub'\n client = 'EventHubManagementClient'\n enum_spec = ('namespaces', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'sku.name',\n 'properties.isAutoInflateEnabled'\n )\n resource_type = 'Microsoft.EventHub/namespaces'\n", "path": "tools/c7n_azure/c7n_azure/resources/event_hub.py"}], "after_files": [{"content": "# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom c7n_azure.filters import FirewallRulesFilter\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\nfrom netaddr import IPSet\n\n\[email protected]('eventhub')\nclass EventHub(ArmResourceManager):\n \"\"\"Event Hub Resource\n\n :example:\n\n This policy will find all Event Hubs allowing traffic from 1.2.2.128/25 CIDR.\n\n .. code-block:: yaml\n\n policies:\n - name: find-event-hub-allowing-subnet\n resource: azure.eventhub\n filters:\n - type: firewall-rules\n include:\n - '1.2.2.128/25'\n\n \"\"\"\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Events']\n\n service = 'azure.mgmt.eventhub'\n client = 'EventHubManagementClient'\n enum_spec = ('namespaces', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'sku.name',\n 'properties.isAutoInflateEnabled'\n )\n resource_type = 'Microsoft.EventHub/namespaces'\n\n\[email protected]_registry.register('firewall-rules')\nclass EventHubFirewallRulesFilter(FirewallRulesFilter):\n\n def __init__(self, data, manager=None):\n super(EventHubFirewallRulesFilter, self).__init__(data, manager)\n self._log = logging.getLogger('custodian.azure.eventhub')\n self.client = None\n\n @property\n def log(self):\n return self._log\n\n def process(self, resources, event=None):\n self.client = self.manager.get_client()\n return super(EventHubFirewallRulesFilter, self).process(resources, event)\n\n def _query_rules(self, resource):\n query = self.client.namespaces.get_network_rule_set(\n resource['resourceGroup'],\n resource['name'])\n\n resource_rules = IPSet([r.ip_mask for r in query.ip_rules])\n\n return resource_rules\n", "path": "tools/c7n_azure/c7n_azure/resources/event_hub.py"}]}
693
552
gh_patches_debug_20098
rasdani/github-patches
git_diff
akvo__akvo-rsr-3495
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Project list widget fails due to a date vs. datetime formatting error --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/rsr/templatetags/rsr_filters.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Akvo RSR is covered by the GNU Affero General Public License. 4 5 See more details in the license.txt file located at the root folder of the Akvo RSR module. 6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 7 """ 8 9 import datetime 10 import time 11 12 from django import template 13 from django.conf import settings 14 from decimal import Decimal, ROUND_HALF_UP 15 16 register = template.Library() 17 18 DECIMAL_PLACES = getattr(settings, 'DECIMALS_DECIMAL_PLACES', 2) 19 20 21 @register.filter 22 def get_item(dictionary, key): 23 """Enable lookup in dicts.""" 24 return dictionary.get(key) 25 26 27 @register.filter 28 def string_to_date(value): 29 try: 30 time_format = "%Y-%m-%d %H:%M:%S" 31 fmt_time = time.strptime(value, time_format) 32 return datetime.datetime(*fmt_time[:6]) 33 except: 34 return value 35 36 # http://stackoverflow.com/questions/250357/smart-truncate-in-python 37 38 39 @register.filter("smart_truncate") 40 def smart_truncate(content, length=100, suffix='...'): 41 if len(content) <= length: 42 return content 43 else: 44 return content[:length].rsplit(' ', 1)[0] + suffix 45 46 47 @register.filter 48 def round(value, decimal_places=DECIMAL_PLACES): 49 try: 50 value = Decimal(str(value)) 51 except: 52 return u'' 53 if settings.DECIMALS_DEBUG: 54 decimal_result = value.quantize(Decimal(10) ** -decimal_places) 55 return decimal_result 56 else: 57 decimal_result = value.quantize(Decimal(10), ROUND_HALF_UP) 58 return 0 if decimal_result <= 0 else decimal_result 59 round.is_safe = True 60 61 62 @register.filter 63 def countries_list(obj): 64 """ return a list of the countries of all locations of an object. 65 currently works for Project and Organisation """ 66 return obj.locations.values_list('country__name', flat=True) 67 68 69 @register.filter 70 def continents_list(obj): 71 """return a list of the continents of all locations of an object" 72 currently works for Project and Organisation """ 73 return obj.locations.values_list('country__continent', flat=True) 74 75 76 @register.filter 77 def rsr_sorted_set(iterable): 78 """ create a set of the iterable to eliminate duplicates 79 then make a list of the set and sort it 80 used with countries_list and continents_list 81 """ 82 set_list = list(frozenset(iterable)) 83 set_list.sort() 84 return set_list 85 86 87 @register.filter 88 def load_partnerships_and_orgs(project): 89 return project.partnerships.prefetch_related('organisation').all() 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/rsr/templatetags/rsr_filters.py b/akvo/rsr/templatetags/rsr_filters.py --- a/akvo/rsr/templatetags/rsr_filters.py +++ b/akvo/rsr/templatetags/rsr_filters.py @@ -6,9 +6,6 @@ For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. """ -import datetime -import time - from django import template from django.conf import settings from decimal import Decimal, ROUND_HALF_UP @@ -24,18 +21,7 @@ return dictionary.get(key) [email protected] -def string_to_date(value): - try: - time_format = "%Y-%m-%d %H:%M:%S" - fmt_time = time.strptime(value, time_format) - return datetime.datetime(*fmt_time[:6]) - except: - return value - # http://stackoverflow.com/questions/250357/smart-truncate-in-python - - @register.filter("smart_truncate") def smart_truncate(content, length=100, suffix='...'): if len(content) <= length:
{"golden_diff": "diff --git a/akvo/rsr/templatetags/rsr_filters.py b/akvo/rsr/templatetags/rsr_filters.py\n--- a/akvo/rsr/templatetags/rsr_filters.py\n+++ b/akvo/rsr/templatetags/rsr_filters.py\n@@ -6,9 +6,6 @@\n For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \"\"\"\n \n-import datetime\n-import time\n-\n from django import template\n from django.conf import settings\n from decimal import Decimal, ROUND_HALF_UP\n@@ -24,18 +21,7 @@\n return dictionary.get(key)\n \n \[email protected]\n-def string_to_date(value):\n- try:\n- time_format = \"%Y-%m-%d %H:%M:%S\"\n- fmt_time = time.strptime(value, time_format)\n- return datetime.datetime(*fmt_time[:6])\n- except:\n- return value\n-\n # http://stackoverflow.com/questions/250357/smart-truncate-in-python\n-\n-\n @register.filter(\"smart_truncate\")\n def smart_truncate(content, length=100, suffix='...'):\n if len(content) <= length:\n", "issue": "Project list widget fails due to a date vs. datetime formatting error\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport datetime\nimport time\n\nfrom django import template\nfrom django.conf import settings\nfrom decimal import Decimal, ROUND_HALF_UP\n\nregister = template.Library()\n\nDECIMAL_PLACES = getattr(settings, 'DECIMALS_DECIMAL_PLACES', 2)\n\n\[email protected]\ndef get_item(dictionary, key):\n \"\"\"Enable lookup in dicts.\"\"\"\n return dictionary.get(key)\n\n\[email protected]\ndef string_to_date(value):\n try:\n time_format = \"%Y-%m-%d %H:%M:%S\"\n fmt_time = time.strptime(value, time_format)\n return datetime.datetime(*fmt_time[:6])\n except:\n return value\n\n# http://stackoverflow.com/questions/250357/smart-truncate-in-python\n\n\[email protected](\"smart_truncate\")\ndef smart_truncate(content, length=100, suffix='...'):\n if len(content) <= length:\n return content\n else:\n return content[:length].rsplit(' ', 1)[0] + suffix\n\n\[email protected]\ndef round(value, decimal_places=DECIMAL_PLACES):\n try:\n value = Decimal(str(value))\n except:\n return u''\n if settings.DECIMALS_DEBUG:\n decimal_result = value.quantize(Decimal(10) ** -decimal_places)\n return decimal_result\n else:\n decimal_result = value.quantize(Decimal(10), ROUND_HALF_UP)\n return 0 if decimal_result <= 0 else decimal_result\nround.is_safe = True\n\n\[email protected]\ndef countries_list(obj):\n \"\"\" return a list of the countries of all locations of an object.\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__name', flat=True)\n\n\[email protected]\ndef continents_list(obj):\n \"\"\"return a list of the continents of all locations of an object\"\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__continent', flat=True)\n\n\[email protected]\ndef rsr_sorted_set(iterable):\n \"\"\" create a set of the iterable to eliminate duplicates\n then make a list of the set and sort it\n used with countries_list and continents_list\n \"\"\"\n set_list = list(frozenset(iterable))\n set_list.sort()\n return set_list\n\n\[email protected]\ndef load_partnerships_and_orgs(project):\n return project.partnerships.prefetch_related('organisation').all()\n", "path": "akvo/rsr/templatetags/rsr_filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django import template\nfrom django.conf import settings\nfrom decimal import Decimal, ROUND_HALF_UP\n\nregister = template.Library()\n\nDECIMAL_PLACES = getattr(settings, 'DECIMALS_DECIMAL_PLACES', 2)\n\n\[email protected]\ndef get_item(dictionary, key):\n \"\"\"Enable lookup in dicts.\"\"\"\n return dictionary.get(key)\n\n\n# http://stackoverflow.com/questions/250357/smart-truncate-in-python\[email protected](\"smart_truncate\")\ndef smart_truncate(content, length=100, suffix='...'):\n if len(content) <= length:\n return content\n else:\n return content[:length].rsplit(' ', 1)[0] + suffix\n\n\[email protected]\ndef round(value, decimal_places=DECIMAL_PLACES):\n try:\n value = Decimal(str(value))\n except:\n return u''\n if settings.DECIMALS_DEBUG:\n decimal_result = value.quantize(Decimal(10) ** -decimal_places)\n return decimal_result\n else:\n decimal_result = value.quantize(Decimal(10), ROUND_HALF_UP)\n return 0 if decimal_result <= 0 else decimal_result\nround.is_safe = True\n\n\[email protected]\ndef countries_list(obj):\n \"\"\" return a list of the countries of all locations of an object.\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__name', flat=True)\n\n\[email protected]\ndef continents_list(obj):\n \"\"\"return a list of the continents of all locations of an object\"\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__continent', flat=True)\n\n\[email protected]\ndef rsr_sorted_set(iterable):\n \"\"\" create a set of the iterable to eliminate duplicates\n then make a list of the set and sort it\n used with countries_list and continents_list\n \"\"\"\n set_list = list(frozenset(iterable))\n set_list.sort()\n return set_list\n\n\[email protected]\ndef load_partnerships_and_orgs(project):\n return project.partnerships.prefetch_related('organisation').all()\n", "path": "akvo/rsr/templatetags/rsr_filters.py"}]}
1,044
268
gh_patches_debug_3430
rasdani/github-patches
git_diff
open-mmlab__mmdetection-1781
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot reproduce the results of SSD-300 on WIDER_FACE Hi @sovrasov, Recently, I'm reproducing the results of SSD-300 on WIDER_FACE. I use the provided config based on mmdetection and get Recall 36.9, Precision 0.023 and AP 27.3. The results are worse than Recall 43.4, Precision 0.029 and AP 34.7 (shown in https://github.com/open-mmlab/mmdetection/pull/765#issuecomment-502579220). Generally, the models provided by mmdetection are trained on 8 gpus. So, is the SSD-300 on WIDER_FACE also trained on 8 gpus? If so, maybe I will change the learning rate because I trained the model on 4 gpus. Moreover, any other advice? Thanks a lot. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `configs/wider_face/ssd300_wider_face.py` Content: ``` 1 # model settings 2 input_size = 300 3 model = dict( 4 type='SingleStageDetector', 5 pretrained='open-mmlab://vgg16_caffe', 6 backbone=dict( 7 type='SSDVGG', 8 input_size=input_size, 9 depth=16, 10 with_last_pool=False, 11 ceil_mode=True, 12 out_indices=(3, 4), 13 out_feature_indices=(22, 34), 14 l2_norm_scale=20), 15 neck=None, 16 bbox_head=dict( 17 type='SSDHead', 18 input_size=input_size, 19 in_channels=(512, 1024, 512, 256, 256, 256), 20 num_classes=2, 21 anchor_strides=(8, 16, 32, 64, 100, 300), 22 basesize_ratio_range=(0.15, 0.9), 23 anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), 24 target_means=(.0, .0, .0, .0), 25 target_stds=(0.1, 0.1, 0.2, 0.2))) 26 # model training and testing settings 27 cudnn_benchmark = True 28 train_cfg = dict( 29 assigner=dict( 30 type='MaxIoUAssigner', 31 pos_iou_thr=0.5, 32 neg_iou_thr=0.5, 33 min_pos_iou=0., 34 ignore_iof_thr=-1, 35 gt_max_assign_all=False), 36 smoothl1_beta=1., 37 allowed_border=-1, 38 pos_weight=-1, 39 neg_pos_ratio=3, 40 debug=False) 41 test_cfg = dict( 42 nms=dict(type='nms', iou_thr=0.45), 43 min_bbox_size=0, 44 score_thr=0.02, 45 max_per_img=200) 46 # dataset settings 47 dataset_type = 'WIDERFaceDataset' 48 data_root = 'data/WIDERFace/' 49 img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) 50 train_pipeline = [ 51 dict(type='LoadImageFromFile', to_float32=True), 52 dict(type='LoadAnnotations', with_bbox=True), 53 dict( 54 type='PhotoMetricDistortion', 55 brightness_delta=32, 56 contrast_range=(0.5, 1.5), 57 saturation_range=(0.5, 1.5), 58 hue_delta=18), 59 dict( 60 type='Expand', 61 mean=img_norm_cfg['mean'], 62 to_rgb=img_norm_cfg['to_rgb'], 63 ratio_range=(1, 4)), 64 dict( 65 type='MinIoURandomCrop', 66 min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), 67 min_crop_size=0.3), 68 dict(type='Resize', img_scale=(300, 300), keep_ratio=False), 69 dict(type='Normalize', **img_norm_cfg), 70 dict(type='RandomFlip', flip_ratio=0.5), 71 dict(type='DefaultFormatBundle'), 72 dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), 73 ] 74 test_pipeline = [ 75 dict(type='LoadImageFromFile'), 76 dict( 77 type='MultiScaleFlipAug', 78 img_scale=(300, 300), 79 flip=False, 80 transforms=[ 81 dict(type='Resize', keep_ratio=False), 82 dict(type='Normalize', **img_norm_cfg), 83 dict(type='ImageToTensor', keys=['img']), 84 dict(type='Collect', keys=['img']), 85 ]) 86 ] 87 data = dict( 88 imgs_per_gpu=60, 89 workers_per_gpu=2, 90 train=dict( 91 type='RepeatDataset', 92 times=2, 93 dataset=dict( 94 type=dataset_type, 95 ann_file=data_root + 'train.txt', 96 img_prefix=data_root + 'WIDER_train/', 97 min_size=17, 98 pipeline=train_pipeline)), 99 val=dict( 100 type=dataset_type, 101 ann_file=data_root + 'val.txt', 102 img_prefix=data_root + 'WIDER_val/', 103 pipeline=test_pipeline), 104 test=dict( 105 type=dataset_type, 106 ann_file=data_root + 'val.txt', 107 img_prefix=data_root + 'WIDER_val/', 108 pipeline=test_pipeline)) 109 # optimizer 110 optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) 111 optimizer_config = dict() 112 # learning policy 113 lr_config = dict( 114 policy='step', 115 warmup='linear', 116 warmup_iters=1000, 117 warmup_ratio=1.0 / 3, 118 step=[16, 20]) 119 checkpoint_config = dict(interval=1) 120 # yapf:disable 121 log_config = dict( 122 interval=1, 123 hooks=[ 124 dict(type='TextLoggerHook'), 125 # dict(type='TensorboardLoggerHook') 126 ]) 127 # yapf:enable 128 # runtime settings 129 total_epochs = 24 130 dist_params = dict(backend='nccl') 131 log_level = 'INFO' 132 work_dir = './work_dirs/ssd300_wider' 133 load_from = None 134 resume_from = None 135 workflow = [('train', 1)] 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/configs/wider_face/ssd300_wider_face.py b/configs/wider_face/ssd300_wider_face.py --- a/configs/wider_face/ssd300_wider_face.py +++ b/configs/wider_face/ssd300_wider_face.py @@ -107,7 +107,7 @@ img_prefix=data_root + 'WIDER_val/', pipeline=test_pipeline)) # optimizer -optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) +optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict(
{"golden_diff": "diff --git a/configs/wider_face/ssd300_wider_face.py b/configs/wider_face/ssd300_wider_face.py\n--- a/configs/wider_face/ssd300_wider_face.py\n+++ b/configs/wider_face/ssd300_wider_face.py\n@@ -107,7 +107,7 @@\n img_prefix=data_root + 'WIDER_val/',\n pipeline=test_pipeline))\n # optimizer\n-optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)\n+optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4)\n optimizer_config = dict()\n # learning policy\n lr_config = dict(\n", "issue": "Cannot reproduce the results of SSD-300 on WIDER_FACE\nHi @sovrasov,\r\nRecently, I'm reproducing the results of SSD-300 on WIDER_FACE.\r\nI use the provided config based on mmdetection and get Recall 36.9, Precision 0.023 and AP 27.3.\r\nThe results are worse than Recall 43.4, Precision 0.029 and AP 34.7 (shown in https://github.com/open-mmlab/mmdetection/pull/765#issuecomment-502579220).\r\n\r\nGenerally, the models provided by mmdetection are trained on 8 gpus. So, is the SSD-300 on WIDER_FACE also trained on 8 gpus? If so, maybe I will change the learning rate because I trained the model on 4 gpus.\r\n\r\nMoreover, any other advice?\r\n\r\nThanks a lot.\r\n\n", "before_files": [{"content": "# model settings\ninput_size = 300\nmodel = dict(\n type='SingleStageDetector',\n pretrained='open-mmlab://vgg16_caffe',\n backbone=dict(\n type='SSDVGG',\n input_size=input_size,\n depth=16,\n with_last_pool=False,\n ceil_mode=True,\n out_indices=(3, 4),\n out_feature_indices=(22, 34),\n l2_norm_scale=20),\n neck=None,\n bbox_head=dict(\n type='SSDHead',\n input_size=input_size,\n in_channels=(512, 1024, 512, 256, 256, 256),\n num_classes=2,\n anchor_strides=(8, 16, 32, 64, 100, 300),\n basesize_ratio_range=(0.15, 0.9),\n anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n target_means=(.0, .0, .0, .0),\n target_stds=(0.1, 0.1, 0.2, 0.2)))\n# model training and testing settings\ncudnn_benchmark = True\ntrain_cfg = dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n smoothl1_beta=1.,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False)\ntest_cfg = dict(\n nms=dict(type='nms', iou_thr=0.45),\n min_bbox_size=0,\n score_thr=0.02,\n max_per_img=200)\n# dataset settings\ndataset_type = 'WIDERFaceDataset'\ndata_root = 'data/WIDERFace/'\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='PhotoMetricDistortion',\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18),\n dict(\n type='Expand',\n mean=img_norm_cfg['mean'],\n to_rgb=img_norm_cfg['to_rgb'],\n ratio_range=(1, 4)),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=(300, 300), keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(300, 300),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n imgs_per_gpu=60,\n workers_per_gpu=2,\n train=dict(\n type='RepeatDataset',\n times=2,\n dataset=dict(\n type=dataset_type,\n ann_file=data_root + 'train.txt',\n img_prefix=data_root + 'WIDER_train/',\n min_size=17,\n pipeline=train_pipeline)),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'val.txt',\n img_prefix=data_root + 'WIDER_val/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'val.txt',\n img_prefix=data_root + 'WIDER_val/',\n pipeline=test_pipeline))\n# optimizer\noptimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)\noptimizer_config = dict()\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=1000,\n warmup_ratio=1.0 / 3,\n step=[16, 20])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=1,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 24\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/ssd300_wider'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n", "path": "configs/wider_face/ssd300_wider_face.py"}], "after_files": [{"content": "# model settings\ninput_size = 300\nmodel = dict(\n type='SingleStageDetector',\n pretrained='open-mmlab://vgg16_caffe',\n backbone=dict(\n type='SSDVGG',\n input_size=input_size,\n depth=16,\n with_last_pool=False,\n ceil_mode=True,\n out_indices=(3, 4),\n out_feature_indices=(22, 34),\n l2_norm_scale=20),\n neck=None,\n bbox_head=dict(\n type='SSDHead',\n input_size=input_size,\n in_channels=(512, 1024, 512, 256, 256, 256),\n num_classes=2,\n anchor_strides=(8, 16, 32, 64, 100, 300),\n basesize_ratio_range=(0.15, 0.9),\n anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n target_means=(.0, .0, .0, .0),\n target_stds=(0.1, 0.1, 0.2, 0.2)))\n# model training and testing settings\ncudnn_benchmark = True\ntrain_cfg = dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n smoothl1_beta=1.,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False)\ntest_cfg = dict(\n nms=dict(type='nms', iou_thr=0.45),\n min_bbox_size=0,\n score_thr=0.02,\n max_per_img=200)\n# dataset settings\ndataset_type = 'WIDERFaceDataset'\ndata_root = 'data/WIDERFace/'\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='PhotoMetricDistortion',\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18),\n dict(\n type='Expand',\n mean=img_norm_cfg['mean'],\n to_rgb=img_norm_cfg['to_rgb'],\n ratio_range=(1, 4)),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=(300, 300), keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(300, 300),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=False),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n imgs_per_gpu=60,\n workers_per_gpu=2,\n train=dict(\n type='RepeatDataset',\n times=2,\n dataset=dict(\n type=dataset_type,\n ann_file=data_root + 'train.txt',\n img_prefix=data_root + 'WIDER_train/',\n min_size=17,\n pipeline=train_pipeline)),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'val.txt',\n img_prefix=data_root + 'WIDER_val/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'val.txt',\n img_prefix=data_root + 'WIDER_val/',\n pipeline=test_pipeline))\n# optimizer\noptimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4)\noptimizer_config = dict()\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=1000,\n warmup_ratio=1.0 / 3,\n step=[16, 20])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=1,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 24\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/ssd300_wider'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n", "path": "configs/wider_face/ssd300_wider_face.py"}]}
1,955
176
gh_patches_debug_60370
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-597
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Confusing KerError message for flash registry ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior: ``` from flash.image import ImageClassificationData, ImageClassifier print(ImageClassifier.backbones.get('abcd')) ``` #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ### Expected behavior It should throw a keyerror. ### Environment - PyTorch Version (e.g., 1.0): - OS (e.g., Linux): - How you installed PyTorch (`conda`, `pip`, source): - Build command you used (if compiling from source): - Python version: - CUDA/cuDNN version: - GPU models and configuration: - Any other relevant information: ### Additional context Sending in PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flash/core/registry.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from functools import partial 15 from types import FunctionType 16 from typing import Any, Callable, Dict, List, Optional, Union 17 18 from pytorch_lightning.utilities import rank_zero_info 19 from pytorch_lightning.utilities.exceptions import MisconfigurationException 20 21 _REGISTERED_FUNCTION = Dict[str, Any] 22 23 24 class FlashRegistry: 25 """This class is used to register function or :class:`functools.partial` class to a registry.""" 26 27 def __init__(self, name: str, verbose: bool = False) -> None: 28 self.name = name 29 self.functions: List[_REGISTERED_FUNCTION] = [] 30 self._verbose = verbose 31 32 def __len__(self) -> int: 33 return len(self.functions) 34 35 def __contains__(self, key) -> bool: 36 return any(key == e["name"] for e in self.functions) 37 38 def __repr__(self) -> str: 39 return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})' 40 41 def get( 42 self, 43 key: str, 44 with_metadata: bool = False, 45 strict: bool = True, 46 **metadata, 47 ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]: 48 """ 49 This function is used to gather matches from the registry: 50 51 Args: 52 key: Name of the registered function. 53 with_metadata: Whether to include the associated metadata in the return value. 54 strict: Whether to return all matches or just one. 55 metadata: Metadata used to filter against existing registry item's metadata. 56 """ 57 matches = [e for e in self.functions if key == e["name"]] 58 if not matches: 59 raise KeyError(f"Key: {key} is not in {repr(self)}") 60 61 if metadata: 62 matches = [m for m in matches if metadata.items() <= m["metadata"].items()] 63 if not matches: 64 raise KeyError("Found no matches that fit your metadata criteria. Try removing some metadata") 65 66 matches = [e if with_metadata else e["fn"] for e in matches] 67 return matches[0] if strict else matches 68 69 def remove(self, key: str) -> None: 70 self.functions = [f for f in self.functions if f["name"] != key] 71 72 def _register_function( 73 self, 74 fn: Callable, 75 name: Optional[str] = None, 76 override: bool = False, 77 metadata: Optional[Dict[str, Any]] = None 78 ): 79 if not isinstance(fn, FunctionType) and not isinstance(fn, partial): 80 raise MisconfigurationException(f"You can only register a function, found: {fn}") 81 82 name = name or fn.__name__ 83 84 if self._verbose: 85 rank_zero_info(f"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}") 86 87 item = {"fn": fn, "name": name, "metadata": metadata or {}} 88 89 matching_index = self._find_matching_index(item) 90 if override and matching_index is not None: 91 self.functions[matching_index] = item 92 else: 93 if matching_index is not None: 94 raise MisconfigurationException( 95 f"Function with name: {name} and metadata: {metadata} is already present within {self}." 96 " HINT: Use `override=True`." 97 ) 98 self.functions.append(item) 99 100 def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]: 101 for idx, fn in enumerate(self.functions): 102 if all(fn[k] == item[k] for k in ("fn", "name", "metadata")): 103 return idx 104 105 def __call__( 106 self, 107 fn: Optional[Callable[..., Any]] = None, 108 name: Optional[str] = None, 109 override: bool = False, 110 **metadata 111 ) -> Callable: 112 """ 113 This function is used to register new functions to the registry along their metadata. 114 115 Functions can be filtered using metadata using the ``get`` function. 116 117 """ 118 if fn is not None: 119 self._register_function(fn=fn, name=name, override=override, metadata=metadata) 120 return fn 121 122 # raise the error ahead of time 123 if not (name is None or isinstance(name, str)): 124 raise TypeError(f'`name` must be a str, found {name}') 125 126 def _register(cls): 127 self._register_function(fn=cls, name=name, override=override, metadata=metadata) 128 return cls 129 130 return _register 131 132 def available_keys(self) -> List[str]: 133 return sorted(v["name"] for v in self.functions) 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flash/core/registry.py b/flash/core/registry.py --- a/flash/core/registry.py +++ b/flash/core/registry.py @@ -56,7 +56,7 @@ """ matches = [e for e in self.functions if key == e["name"]] if not matches: - raise KeyError(f"Key: {key} is not in {repr(self)}") + raise KeyError(f"Key: {key} is not in {type(self).__name__}") if metadata: matches = [m for m in matches if metadata.items() <= m["metadata"].items()]
{"golden_diff": "diff --git a/flash/core/registry.py b/flash/core/registry.py\n--- a/flash/core/registry.py\n+++ b/flash/core/registry.py\n@@ -56,7 +56,7 @@\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n- raise KeyError(f\"Key: {key} is not in {repr(self)}\")\n+ raise KeyError(f\"Key: {key} is not in {type(self).__name__}\")\n \n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n", "issue": "Confusing KerError message for flash registry\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nfrom flash.image import ImageClassificationData, ImageClassifier\r\n\r\nprint(ImageClassifier.backbones.get('abcd'))\r\n```\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\nIt should throw a keyerror.\r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0):\r\n - OS (e.g., Linux):\r\n - How you installed PyTorch (`conda`, `pip`, source):\r\n - Build command you used (if compiling from source):\r\n - Python version:\r\n - CUDA/cuDNN version:\r\n - GPU models and configuration:\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nSending in PR.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {repr(self)}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n", "path": "flash/core/registry.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {type(self).__name__}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n", "path": "flash/core/registry.py"}]}
1,869
137